1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdfilter/filter_io_26.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
43 #ifndef AUTOCONF_INCLUDED
44 #include <linux/config.h>
46 #include <linux/module.h>
47 #include <linux/pagemap.h> // XXX kill me soon
48 #include <linux/version.h>
49 #include <linux/buffer_head.h>
51 #define DEBUG_SUBSYSTEM S_FILTER
53 #include <obd_class.h>
54 #include <lustre_fsfilt.h>
55 #include <lustre_quota.h>
56 #include "filter_internal.h"
58 /* 512byte block min */
59 #define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512)
61 cfs_atomic_t dr_numreqs; /* number of reqs being processed */
66 struct page **dr_pages;
67 unsigned long *dr_blocks;
68 unsigned int dr_ignore_quota:1;
69 struct filter_obd *dr_filter;
72 static void record_start_io(struct filter_iobuf *iobuf, int rw, int size,
73 struct obd_export *exp)
75 struct filter_obd *filter = iobuf->dr_filter;
77 cfs_atomic_inc(&iobuf->dr_numreqs);
79 if (rw == OBD_BRW_READ) {
80 cfs_atomic_inc(&filter->fo_r_in_flight);
81 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_R_RPC_HIST],
82 cfs_atomic_read(&filter->fo_r_in_flight));
83 lprocfs_oh_tally_log2(&filter->
84 fo_filter_stats.hist[BRW_R_DISK_IOSIZE],
86 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
87 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
89 cfs_atomic_read(&filter-> \
91 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
92 nid_brw_stats->hist[BRW_R_DISK_IOSIZE],
96 cfs_atomic_inc(&filter->fo_w_in_flight);
97 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_W_RPC_HIST],
98 cfs_atomic_read(&filter->fo_w_in_flight));
99 lprocfs_oh_tally_log2(&filter->
100 fo_filter_stats.hist[BRW_W_DISK_IOSIZE],
102 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
103 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
104 hist[BRW_W_RPC_HIST],
105 cfs_atomic_read(&filter-> \
107 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
108 nid_brw_stats->hist[BRW_W_DISK_IOSIZE],
114 static void record_finish_io(struct filter_iobuf *iobuf, int rw, int rc)
116 struct filter_obd *filter = iobuf->dr_filter;
118 /* CAVEAT EMPTOR: possibly in IRQ context
119 * DO NOT record procfs stats here!!! */
121 if (rw == OBD_BRW_READ)
122 cfs_atomic_dec(&filter->fo_r_in_flight);
124 cfs_atomic_dec(&filter->fo_w_in_flight);
126 if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
127 cfs_waitq_signal(&iobuf->dr_wait);
130 static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
132 struct filter_iobuf *iobuf = bio->bi_private;
136 /* CAVEAT EMPTOR: possibly in IRQ context
137 * DO NOT record procfs stats here!!! */
139 #ifdef HAVE_BIO_ENDIO_2ARG
140 /* The "bi_size" check was needed for kernels < 2.6.24 in order to
141 * handle the case where a SCSI request error caused this callback
142 * to be called before all of the biovecs had been processed.
143 * Without this check the server thread will hang. In newer kernels
144 * the bio_end_io routine is never called for partial completions,
145 * so this check is no longer needed. */
146 if (bio->bi_size) /* Not complete */
150 if (unlikely(iobuf == NULL)) {
151 CERROR("***** bio->bi_private is NULL! This should never "
152 "happen. Normally, I would crash here, but instead I "
153 "will dump the bio contents to the console. Please "
154 "report this to <http://bugzilla.lustre.org/> , along "
155 "with any interesting messages leading up to this point "
156 "(like SCSI errors, perhaps). Because bi_private is "
157 "NULL, I can't wake up the thread that initiated this "
158 "IO - you will probably have to reboot this node.\n");
159 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
160 "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
161 "bi_private: %p\n", bio->bi_next, bio->bi_flags,
162 bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
163 bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
168 /* the check is outside of the cycle for performance reason -bzzz */
169 if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
170 bio_for_each_segment(bvl, bio, i) {
171 if (likely(error == 0))
172 SetPageUptodate(bvl->bv_page);
173 LASSERT(PageLocked(bvl->bv_page));
174 ClearPageConstant(bvl->bv_page);
176 record_finish_io(iobuf, OBD_BRW_READ, error);
178 if (mapping_cap_page_constant_write(iobuf->dr_pages[0]->mapping)){
179 bio_for_each_segment(bvl, bio, i) {
180 ClearPageConstant(bvl->bv_page);
183 record_finish_io(iobuf, OBD_BRW_WRITE, error);
186 /* any real error is good enough -bzzz */
187 if (error != 0 && iobuf->dr_error == 0)
188 iobuf->dr_error = error;
190 /* Completed bios used to be chained off iobuf->dr_bios and freed in
191 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
192 * mempool when serious on-disk fragmentation was encountered,
193 * deadlocking the OST. The bios are now released as soon as complete
194 * so the pool cannot be exhausted while IOs are competing. bug 10076 */
199 static int can_be_merged(struct bio *bio, sector_t sector)
206 size = bio->bi_size >> 9;
207 return bio->bi_sector + size == sector ? 1 : 0;
210 struct filter_iobuf *filter_alloc_iobuf(struct filter_obd *filter,
211 int rw, int num_pages)
213 struct filter_iobuf *iobuf;
215 LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
217 OBD_ALLOC(iobuf, sizeof(*iobuf));
221 OBD_ALLOC(iobuf->dr_pages, num_pages * sizeof(*iobuf->dr_pages));
222 if (iobuf->dr_pages == NULL)
225 OBD_ALLOC(iobuf->dr_blocks,
226 MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*iobuf->dr_blocks));
227 if (iobuf->dr_blocks == NULL)
230 iobuf->dr_filter = filter;
231 cfs_waitq_init(&iobuf->dr_wait);
232 cfs_atomic_set(&iobuf->dr_numreqs, 0);
233 iobuf->dr_max_pages = num_pages;
234 iobuf->dr_npages = 0;
240 OBD_FREE(iobuf->dr_pages,
241 num_pages * sizeof(*iobuf->dr_pages));
243 OBD_FREE(iobuf, sizeof(*iobuf));
245 RETURN(ERR_PTR(-ENOMEM));
248 static void filter_clear_iobuf(struct filter_iobuf *iobuf)
250 iobuf->dr_npages = 0;
252 cfs_atomic_set(&iobuf->dr_numreqs, 0);
255 void filter_free_iobuf(struct filter_iobuf *iobuf)
257 int num_pages = iobuf->dr_max_pages;
259 filter_clear_iobuf(iobuf);
261 OBD_FREE(iobuf->dr_blocks,
262 MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*iobuf->dr_blocks));
263 OBD_FREE(iobuf->dr_pages,
264 num_pages * sizeof(*iobuf->dr_pages));
268 void filter_iobuf_put(struct filter_obd *filter, struct filter_iobuf *iobuf,
269 struct obd_trans_info *oti)
271 int thread_id = (oti && oti->oti_thread) ?
272 oti->oti_thread->t_id : -1;
274 if (unlikely(thread_id < 0)) {
275 filter_free_iobuf(iobuf);
279 LASSERTF(filter->fo_iobuf_pool[thread_id] == iobuf,
280 "iobuf mismatch for thread %d: pool %p iobuf %p\n",
281 thread_id, filter->fo_iobuf_pool[thread_id], iobuf);
282 filter_clear_iobuf(iobuf);
285 int filter_iobuf_add_page(struct obd_device *obd, struct filter_iobuf *iobuf,
286 struct inode *inode, struct page *page)
288 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
289 iobuf->dr_pages[iobuf->dr_npages++] = page;
294 int filter_do_bio(struct obd_export *exp, struct inode *inode,
295 struct filter_iobuf *iobuf, int rw)
297 struct obd_device *obd = exp->exp_obd;
298 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
299 struct page **pages = iobuf->dr_pages;
300 int npages = iobuf->dr_npages;
301 unsigned long *blocks = iobuf->dr_blocks;
302 int total_blocks = npages * blocks_per_page;
303 int sector_bits = inode->i_sb->s_blocksize_bits - 9;
304 unsigned int blocksize = inode->i_sb->s_blocksize;
305 struct bio *bio = NULL;
307 unsigned long start_time = jiffies;
309 unsigned int page_offset;
318 LASSERT(iobuf->dr_npages == npages);
319 LASSERT(total_blocks <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
321 for (page_idx = 0, block_idx = 0;
323 page_idx++, block_idx += blocks_per_page) {
325 page = pages[page_idx];
326 LASSERT (block_idx + blocks_per_page <= total_blocks);
328 for (i = 0, page_offset = 0;
330 i += nblocks, page_offset += blocksize * nblocks) {
334 if (blocks[block_idx + i] == 0) { /* hole */
335 LASSERT(rw == OBD_BRW_READ);
336 memset(kmap(page) + page_offset, 0, blocksize);
341 sector = (sector_t)blocks[block_idx + i] << sector_bits;
343 /* Additional contiguous file blocks? */
344 while (i + nblocks < blocks_per_page &&
345 (sector + (nblocks << sector_bits)) ==
346 ((sector_t)blocks[block_idx + i + nblocks] <<
350 /* I only set the page to be constant only if it
351 * is mapped to a contiguous underlying disk block(s).
352 * It will then make sure the corresponding device
353 * cache of raid5 will be overwritten by this page.
355 if ((rw == OBD_BRW_WRITE) &&
356 (nblocks == blocks_per_page) &&
357 mapping_cap_page_constant_write(inode->i_mapping))
358 SetPageConstant(page);
361 can_be_merged(bio, sector) &&
362 bio_add_page(bio, page,
363 blocksize * nblocks, page_offset) != 0)
364 continue; /* added this frag OK */
368 bdev_get_queue(bio->bi_bdev);
370 /* Dang! I have to fragment this I/O */
371 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
372 "sectors %d(%d) psg %d(%d) hsg %d(%d) "
373 "sector %llu next %llu\n",
375 bio->bi_vcnt, bio->bi_max_vecs,
376 bio->bi_size >> 9, q->max_sectors,
377 bio_phys_segments(q, bio),
378 q->max_phys_segments,
379 bio_hw_segments(q, bio),
384 record_start_io(iobuf, rw, bio->bi_size, exp);
385 rc = fsfilt_send_bio(rw, obd, inode, bio);
387 CERROR("Can't send bio: %d\n", rc);
388 record_finish_io(iobuf, rw, rc);
394 /* allocate new bio, limited by max BIO size, b=9945 */
395 bio = bio_alloc(GFP_NOIO, max(BIO_MAX_PAGES,
396 (npages - page_idx) *
399 CERROR("Can't allocate bio %u*%u = %u pages\n",
400 (npages - page_idx), blocks_per_page,
401 (npages - page_idx) * blocks_per_page);
406 bio->bi_bdev = inode->i_sb->s_bdev;
407 bio->bi_sector = sector;
408 bio->bi_end_io = dio_complete_routine;
409 bio->bi_private = iobuf;
411 rc = bio_add_page(bio, page,
412 blocksize * nblocks, page_offset);
418 record_start_io(iobuf, rw, bio->bi_size, exp);
419 rc = fsfilt_send_bio(rw, obd, inode, bio);
424 CERROR("Can't send bio: %d\n", rc);
425 record_finish_io(iobuf, rw, rc);
430 cfs_wait_event(iobuf->dr_wait,
431 cfs_atomic_read(&iobuf->dr_numreqs) == 0);
433 if (rw == OBD_BRW_READ) {
434 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.
435 hist[BRW_R_DIO_FRAGS],
437 lprocfs_oh_tally_log2(&obd->u.filter.
438 fo_filter_stats.hist[BRW_R_IO_TIME],
439 jiffies - start_time);
440 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
441 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
442 hist[BRW_R_DIO_FRAGS],
444 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
445 nid_brw_stats->hist[BRW_R_IO_TIME],
446 jiffies - start_time);
449 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.
450 hist[BRW_W_DIO_FRAGS], frags);
451 lprocfs_oh_tally_log2(&obd->u.filter.fo_filter_stats.
453 jiffies - start_time);
454 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
455 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
456 hist[BRW_W_DIO_FRAGS],
458 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
459 nid_brw_stats->hist[BRW_W_IO_TIME],
460 jiffies - start_time);
465 rc = iobuf->dr_error;
469 /* Must be called with i_mutex taken for writes; this will drop it */
470 int filter_direct_io(int rw, struct dentry *dchild, struct filter_iobuf *iobuf,
471 struct obd_export *exp, struct iattr *attr,
472 struct obd_trans_info *oti, void **wait_handle)
474 struct obd_device *obd = exp->exp_obd;
475 struct inode *inode = dchild->d_inode;
476 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
478 cfs_semaphore_t *sem;
481 LASSERTF(iobuf->dr_npages <= iobuf->dr_max_pages, "%d,%d\n",
482 iobuf->dr_npages, iobuf->dr_max_pages);
483 LASSERT(iobuf->dr_npages <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
485 if (rw == OBD_BRW_READ) {
486 if (iobuf->dr_npages == 0)
491 LASSERTF(rw == OBD_BRW_WRITE, "%x\n", rw);
492 LASSERT(iobuf->dr_npages > 0);
494 sem = &obd->u.filter.fo_alloc_lock;
496 lquota_enforce(filter_quota_interface_ref, obd,
497 iobuf->dr_ignore_quota);
500 rc = fsfilt_map_inode_pages(obd, inode, iobuf->dr_pages,
501 iobuf->dr_npages, iobuf->dr_blocks,
502 obdfilter_created_scratchpad, create, sem);
504 if (rw == OBD_BRW_WRITE) {
506 filter_tally(exp, iobuf->dr_pages,
507 iobuf->dr_npages, iobuf->dr_blocks,
509 if (attr->ia_size > i_size_read(inode))
510 attr->ia_valid |= ATTR_SIZE;
511 rc = fsfilt_setattr(obd, dchild,
512 oti->oti_handle, attr, 0);
515 UNLOCK_INODE_MUTEX(inode);
517 rc2 = filter_finish_transno(exp, inode, oti, 0, 0);
519 CERROR("can't close transaction: %d\n", rc2);
525 rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,
528 rc2 = fsfilt_commit(obd, inode, oti->oti_handle, 0);
533 } else if (rc == 0) {
534 filter_tally(exp, iobuf->dr_pages, iobuf->dr_npages,
535 iobuf->dr_blocks, blocks_per_page, 0);
538 RETURN(filter_do_bio(exp, inode, iobuf, rw));
541 /* See if there are unallocated parts in given file region */
542 static int filter_range_is_mapped(struct inode *inode, obd_size offset, int len)
544 sector_t (*fs_bmap)(struct address_space *, sector_t) =
545 inode->i_mapping->a_ops->bmap;
548 /* We can't know if we are overwriting or not */
552 offset >>= inode->i_blkbits;
553 len >>= inode->i_blkbits;
555 for (j = 0; j <= len; j++)
556 if (fs_bmap(inode->i_mapping, offset + j) == 0)
563 * interesting use cases on how it interacts with VM:
565 * - vm writeout -- shouldn't see our pages as we don't mark them dirty
566 * though vm can find partial page left dirty by truncate. in this
567 * usual writeout is used unless our write rewrite that page - then we
568 * drop PG_dirty with PG_lock held.
573 int filter_commitrw_write(struct obd_export *exp, struct obdo *oa,
574 int objcount, struct obd_ioobj *obj,
575 struct niobuf_remote *nb, int niocount,
576 struct niobuf_local *res, struct obd_trans_info *oti,
579 struct niobuf_local *lnb;
580 struct filter_iobuf *iobuf = NULL;
581 struct lvfs_run_ctxt saved;
582 struct fsfilt_objinfo fso;
583 struct iattr iattr = { 0 };
584 struct inode *inode = res->dentry->d_inode;
585 unsigned long now = jiffies;
586 int i, err, cleanup_phase = 0;
587 struct obd_device *obd = exp->exp_obd;
588 struct filter_obd *fo = &obd->u.filter;
589 void *wait_handle = NULL;
591 unsigned int qcids[MAXQUOTAS] = { oa->o_uid, oa->o_gid };
592 int rec_pending[MAXQUOTAS] = { 0, 0 }, quota_pages = 0;
593 int sync_journal_commit = obd->u.filter.fo_syncjournal;
596 LASSERT(oti != NULL);
597 LASSERT(objcount == 1);
598 LASSERT(current->journal_info == NULL);
603 iobuf = filter_iobuf_get(&obd->u.filter, oti);
605 GOTO(cleanup, rc = PTR_ERR(iobuf));
608 fso.fso_dentry = res->dentry;
609 fso.fso_bufcnt = obj->ioo_bufcnt;
611 iobuf->dr_ignore_quota = 0;
612 for (i = 0, lnb = res; i < niocount; i++, lnb++) {
614 __u32 flags = lnb->flags;
616 if (filter_range_is_mapped(inode, lnb->offset, lnb->len)) {
617 /* If overwriting an existing block,
618 * we don't need a grant */
619 if (!(flags & OBD_BRW_GRANTED) && lnb->rc == -ENOSPC)
625 if (lnb->rc) { /* ENOSPC, network RPC error, etc. */
626 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i, lnb->rc);
630 LASSERT(PageLocked(lnb->page));
631 LASSERT(!PageWriteback(lnb->page));
633 /* since write & truncate are serialized by the i_alloc_sem,
634 * even partial truncate should not leave dirty pages in
636 LASSERT(!PageDirty(lnb->page));
638 SetPageUptodate(lnb->page);
640 err = filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
643 total_size += lnb->len;
645 /* we expect these pages to be in offset order, but we'll
647 this_size = lnb->offset + lnb->len;
648 if (this_size > iattr.ia_size)
649 iattr.ia_size = this_size;
651 /* if one page is a write-back page from client cache and
652 * not from direct_io, or it's written by root, then mark
653 * the whole io request as ignore quota request, remote
654 * client can not break through quota. */
655 if (exp_connect_rmtclient(exp))
656 flags &= ~OBD_BRW_NOQUOTA;
657 if ((flags & OBD_BRW_NOQUOTA) ||
658 (flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
660 iobuf->dr_ignore_quota = 1;
662 if (!(lnb->flags & OBD_BRW_ASYNC)) {
663 sync_journal_commit = 1;
667 /* we try to get enough quota to write here, and let ldiskfs
668 * decide if it is out of quota or not b=14783 */
669 rc = lquota_chkquota(filter_quota_interface_ref, obd, exp, qcids,
670 rec_pending, quota_pages, oti, LQUOTA_FLAGS_BLK,
671 (void *)inode, obj->ioo_bufcnt);
675 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
679 fsfilt_check_slow(obd, now, "quota init");
681 LOCK_INODE_MUTEX(inode);
682 fsfilt_check_slow(obd, now, "i_mutex");
683 oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
685 if (IS_ERR(oti->oti_handle)) {
686 UNLOCK_INODE_MUTEX(inode);
687 rc = PTR_ERR(oti->oti_handle);
688 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
689 "error starting transaction: rc = %d\n", rc);
690 oti->oti_handle = NULL;
693 /* have to call fsfilt_commit() from this point on */
695 fsfilt_check_slow(obd, now, "brw_start");
697 i = OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
699 /* If the inode still has SUID+SGID bits set (see filter_precreate())
700 * then we will accept the UID+GID if sent by the client for
701 * initializing the ownership of this inode. We only allow this to
702 * happen once (so clear these bits) and later only allow setattr. */
703 if (inode->i_mode & S_ISUID)
705 if (inode->i_mode & S_ISGID)
708 iattr_from_obdo(&iattr, oa, i);
709 if (iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
712 CDEBUG(D_INODE, "update UID/GID to %lu/%lu\n",
713 (unsigned long)oa->o_uid, (unsigned long)oa->o_gid);
715 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
717 iattr.ia_valid |= ATTR_MODE;
718 iattr.ia_mode = inode->i_mode;
719 if (iattr.ia_valid & ATTR_UID)
720 iattr.ia_mode &= ~S_ISUID;
721 if (iattr.ia_valid & ATTR_GID)
722 iattr.ia_mode &= ~S_ISGID;
724 rc = filter_update_fidea(exp, inode, oti->oti_handle, oa);
726 /* To avoid problems with quotas, UID and GID must be set
727 * in the inode before filter_direct_io() - see bug 10357. */
728 save = iattr.ia_valid;
729 iattr.ia_valid &= (ATTR_UID | ATTR_GID);
730 rc = fsfilt_setattr(obd, res->dentry, oti->oti_handle,&iattr,0);
731 CDEBUG(D_QUOTA, "set uid(%u)/gid(%u) to ino(%lu). rc(%d)\n",
732 iattr.ia_uid, iattr.ia_gid, inode->i_ino, rc);
733 iattr.ia_valid = save & ~(ATTR_UID | ATTR_GID);
736 /* filter_direct_io drops i_mutex */
737 rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, iobuf, exp, &iattr,
738 oti, sync_journal_commit ? &wait_handle : NULL);
740 obdo_from_inode(oa, inode, NULL, rc == 0 ? FILTER_VALID_FLAGS : 0 |
741 OBD_MD_FLUID |OBD_MD_FLGID);
743 lquota_getflag(filter_quota_interface_ref, obd, oa);
745 fsfilt_check_slow(obd, now, "direct_io");
748 err = fsfilt_commit_wait(obd, inode, wait_handle);
753 CERROR("Failure to commit OST transaction (%d)?\n", err);
758 if (obd->obd_replayable && !rc && wait_handle)
759 LASSERTF(oti->oti_transno <= obd->obd_last_committed,
760 "oti_transno "LPU64" last_committed "LPU64"\n",
761 oti->oti_transno, obd->obd_last_committed);
763 fsfilt_check_slow(obd, now, "commitrw commit");
766 lquota_pending_commit(filter_quota_interface_ref, obd, qcids,
769 filter_grant_commit(exp, niocount, res);
771 switch (cleanup_phase) {
773 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
774 LASSERT(current->journal_info == NULL);
776 filter_iobuf_put(&obd->u.filter, iobuf, oti);
779 * lnb->page automatically returns back into per-thread page
785 /* trigger quota pre-acquire */
786 err = lquota_adjust(filter_quota_interface_ref, obd, qcids, NULL, rc,
788 CDEBUG(err ? D_ERROR : D_QUOTA, "filter adjust qunit! "
789 "(rc:%d, uid:%u, gid:%u)\n",
790 err, qcids[USRQUOTA], qcids[GRPQUOTA]);
791 if (qcids[USRQUOTA] != oa->o_uid || qcids[GRPQUOTA] != oa->o_gid) {
792 qcids[USRQUOTA] = oa->o_uid;
793 qcids[GRPQUOTA] = oa->o_gid;
794 err = lquota_adjust(filter_quota_interface_ref, obd, qcids,
795 NULL, rc, FSFILT_OP_CREATE);
796 CDEBUG(err ? D_ERROR : D_QUOTA, "filter adjust qunit! "
797 "(rc:%d, uid:%u, gid:%u)\n",
798 err, qcids[USRQUOTA], qcids[GRPQUOTA]);
801 for (i = 0, lnb = res; i < niocount; i++, lnb++) {
802 if (lnb->page == NULL)
806 /* If the write has failed, the page cache may
807 * not be consitent with what is on disk, so
808 * force pages to be reread next time it is
810 ClearPageUptodate(lnb->page);
812 LASSERT(PageLocked(lnb->page));
813 unlock_page(lnb->page);
815 page_cache_release(lnb->page);
821 if (fo->fo_writethrough_cache == 0 ||
822 i_size_read(inode) > fo->fo_readcache_max_filesize)
823 filter_release_cache(obd, obj, nb, inode);
824 up_read(&inode->i_alloc_sem);