1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter_io.c
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
11 * This file is part of the Lustre file system, http://www.lustre.org
12 * Lustre is a trademark of Cluster File Systems, Inc.
14 * You may have signed or agreed to another license before downloading
15 * this software. If so, you are bound by the terms and conditions
16 * of that agreement, and the following does not apply to you. See the
17 * LICENSE file included with this distribution for more information.
19 * If you did not agree to a different license, then this copy of Lustre
20 * is open source software; you can redistribute it and/or modify it
21 * under the terms of version 2 of the GNU General Public License as
22 * published by the Free Software Foundation.
24 * In either case, Lustre is distributed in the hope that it will be
25 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
26 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * license text for more details.
30 #ifndef AUTOCONF_INCLUDED
31 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/pagemap.h> // XXX kill me soon
35 #include <linux/version.h>
36 #include <linux/buffer_head.h>
38 #define DEBUG_SUBSYSTEM S_FILTER
40 #include <obd_class.h>
41 #include <lustre_fsfilt.h>
42 #include <lustre_quota.h>
43 #include "filter_internal.h"
45 /* 512byte block min */
46 #define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512)
48 atomic_t dr_numreqs; /* number of reqs being processed */
49 wait_queue_head_t dr_wait;
53 struct page **dr_pages;
54 unsigned long *dr_blocks;
55 spinlock_t dr_lock; /* IRQ lock */
56 unsigned int dr_ignore_quota:1;
57 struct filter_obd *dr_filter;
60 static void record_start_io(struct filter_iobuf *iobuf, int rw, int size,
61 struct obd_export *exp)
63 struct filter_obd *filter = iobuf->dr_filter;
65 atomic_inc(&iobuf->dr_numreqs);
67 if (rw == OBD_BRW_READ) {
68 atomic_inc(&filter->fo_r_in_flight);
69 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_R_RPC_HIST],
70 atomic_read(&filter->fo_r_in_flight));
71 lprocfs_oh_tally_log2(&filter->fo_filter_stats.hist[BRW_R_DISK_IOSIZE],
73 lprocfs_oh_tally(&exp->exp_filter_data.fed_brw_stats.hist[BRW_R_RPC_HIST],
74 atomic_read(&filter->fo_r_in_flight));
75 lprocfs_oh_tally_log2(&exp->exp_filter_data.fed_brw_stats.hist[BRW_R_DISK_IOSIZE], size);
77 atomic_inc(&filter->fo_w_in_flight);
78 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_W_RPC_HIST],
79 atomic_read(&filter->fo_w_in_flight));
80 lprocfs_oh_tally_log2(&filter->fo_filter_stats.hist[BRW_W_DISK_IOSIZE],
82 lprocfs_oh_tally(&exp->exp_filter_data.fed_brw_stats.hist[BRW_W_RPC_HIST],
83 atomic_read(&filter->fo_w_in_flight));
84 lprocfs_oh_tally_log2(&exp->exp_filter_data.fed_brw_stats.hist[BRW_W_DISK_IOSIZE], size);
88 static void record_finish_io(struct filter_iobuf *iobuf, int rw, int rc)
90 struct filter_obd *filter = iobuf->dr_filter;
92 /* CAVEAT EMPTOR: possibly in IRQ context
93 * DO NOT record procfs stats here!!! */
95 if (rw == OBD_BRW_READ)
96 atomic_dec(&filter->fo_r_in_flight);
98 atomic_dec(&filter->fo_w_in_flight);
100 if (atomic_dec_and_test(&iobuf->dr_numreqs))
101 wake_up(&iobuf->dr_wait);
104 static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
106 struct filter_iobuf *iobuf = bio->bi_private;
109 /* CAVEAT EMPTOR: possibly in IRQ context
110 * DO NOT record procfs stats here!!! */
112 if (bio->bi_size) /* Not complete */
116 CERROR("***** bio->bi_private is NULL! This should never "
117 "happen. Normally, I would crash here, but instead I "
118 "will dump the bio contents to the console. Please "
119 "report this to CFS, along with any interesting "
120 "messages leading up to this point (like SCSI errors, "
121 "perhaps). Because bi_private is NULL, I can't wake up "
122 "the thread that initiated this I/O -- so you will "
123 "probably have to reboot this node.\n");
124 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
125 "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
126 "bi_private: %p\n", bio->bi_next, bio->bi_flags,
127 bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
128 bio->bi_end_io, atomic_read(&bio->bi_cnt),
133 spin_lock_irqsave(&iobuf->dr_lock, flags);
134 if (iobuf->dr_error == 0)
135 iobuf->dr_error = error;
136 spin_unlock_irqrestore(&iobuf->dr_lock, flags);
138 record_finish_io(iobuf, test_bit(BIO_RW, &bio->bi_rw) ?
139 OBD_BRW_WRITE : OBD_BRW_READ, error);
141 /* Completed bios used to be chained off iobuf->dr_bios and freed in
142 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
143 * mempool when serious on-disk fragmentation was encountered,
144 * deadlocking the OST. The bios are now released as soon as complete
145 * so the pool cannot be exhausted while IOs are competing. bug 10076 */
150 static int can_be_merged(struct bio *bio, sector_t sector)
157 size = bio->bi_size >> 9;
158 return bio->bi_sector + size == sector ? 1 : 0;
161 struct filter_iobuf *filter_alloc_iobuf(struct filter_obd *filter,
162 int rw, int num_pages)
164 struct filter_iobuf *iobuf;
166 LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
168 OBD_ALLOC(iobuf, sizeof(*iobuf));
172 OBD_ALLOC(iobuf->dr_pages, num_pages * sizeof(*iobuf->dr_pages));
173 if (iobuf->dr_pages == NULL)
176 OBD_ALLOC(iobuf->dr_blocks,
177 MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*iobuf->dr_blocks));
178 if (iobuf->dr_blocks == NULL)
181 iobuf->dr_filter = filter;
182 init_waitqueue_head(&iobuf->dr_wait);
183 atomic_set(&iobuf->dr_numreqs, 0);
184 spin_lock_init(&iobuf->dr_lock);
185 iobuf->dr_max_pages = num_pages;
186 iobuf->dr_npages = 0;
192 OBD_FREE(iobuf->dr_pages,
193 num_pages * sizeof(*iobuf->dr_pages));
195 OBD_FREE(iobuf, sizeof(*iobuf));
197 RETURN(ERR_PTR(-ENOMEM));
200 static void filter_clear_iobuf(struct filter_iobuf *iobuf)
202 iobuf->dr_npages = 0;
204 atomic_set(&iobuf->dr_numreqs, 0);
207 void filter_free_iobuf(struct filter_iobuf *iobuf)
209 int num_pages = iobuf->dr_max_pages;
211 filter_clear_iobuf(iobuf);
213 OBD_FREE(iobuf->dr_blocks,
214 MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*iobuf->dr_blocks));
215 OBD_FREE(iobuf->dr_pages,
216 num_pages * sizeof(*iobuf->dr_pages));
220 void filter_iobuf_put(struct filter_obd *filter, struct filter_iobuf *iobuf,
221 struct obd_trans_info *oti)
223 int thread_id = oti ? oti->oti_thread_id : -1;
225 if (unlikely(thread_id < 0)) {
226 filter_free_iobuf(iobuf);
230 LASSERTF(filter->fo_iobuf_pool[thread_id] == iobuf,
231 "iobuf mismatch for thread %d: pool %p iobuf %p\n",
232 thread_id, filter->fo_iobuf_pool[thread_id], iobuf);
233 filter_clear_iobuf(iobuf);
236 int filter_iobuf_add_page(struct obd_device *obd, struct filter_iobuf *iobuf,
237 struct inode *inode, struct page *page)
239 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
240 iobuf->dr_pages[iobuf->dr_npages++] = page;
245 int filter_do_bio(struct obd_export *exp, struct inode *inode,
246 struct filter_iobuf *iobuf, int rw)
248 struct obd_device *obd = exp->exp_obd;
249 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
250 struct page **pages = iobuf->dr_pages;
251 int npages = iobuf->dr_npages;
252 unsigned long *blocks = iobuf->dr_blocks;
253 int total_blocks = npages * blocks_per_page;
254 int sector_bits = inode->i_sb->s_blocksize_bits - 9;
255 unsigned int blocksize = inode->i_sb->s_blocksize;
256 struct bio *bio = NULL;
258 unsigned long start_time = jiffies;
260 unsigned int page_offset;
269 LASSERT(iobuf->dr_npages == npages);
270 LASSERT(total_blocks <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
272 for (page_idx = 0, block_idx = 0;
274 page_idx++, block_idx += blocks_per_page) {
276 page = pages[page_idx];
277 LASSERT (block_idx + blocks_per_page <= total_blocks);
279 for (i = 0, page_offset = 0;
281 i += nblocks, page_offset += blocksize * nblocks) {
285 if (blocks[block_idx + i] == 0) { /* hole */
286 LASSERT(rw == OBD_BRW_READ);
287 memset(kmap(page) + page_offset, 0, blocksize);
292 sector = (sector_t)blocks[block_idx + i] << sector_bits;
294 /* Additional contiguous file blocks? */
295 while (i + nblocks < blocks_per_page &&
296 (sector + (nblocks << sector_bits)) ==
297 ((sector_t)blocks[block_idx + i + nblocks] <<
302 can_be_merged(bio, sector) &&
303 bio_add_page(bio, page,
304 blocksize * nblocks, page_offset) != 0)
305 continue; /* added this frag OK */
309 bdev_get_queue(bio->bi_bdev);
311 /* Dang! I have to fragment this I/O */
312 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
313 "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
315 bio->bi_vcnt, bio->bi_max_vecs,
316 bio->bi_size >> 9, q->max_sectors,
317 bio_phys_segments(q, bio),
318 q->max_phys_segments,
319 bio_hw_segments(q, bio),
322 record_start_io(iobuf, rw, bio->bi_size, exp);
323 rc = fsfilt_send_bio(rw, obd, inode, bio);
325 CERROR("Can't send bio: %d\n", rc);
326 record_finish_io(iobuf, rw, rc);
332 /* allocate new bio */
333 bio = bio_alloc(GFP_NOIO,
334 (npages - page_idx) * blocks_per_page);
336 CERROR("Can't allocate bio %u*%u = %u pages\n",
337 (npages - page_idx), blocks_per_page,
338 (npages - page_idx) * blocks_per_page);
343 bio->bi_bdev = inode->i_sb->s_bdev;
344 bio->bi_sector = sector;
345 bio->bi_end_io = dio_complete_routine;
346 bio->bi_private = iobuf;
348 rc = bio_add_page(bio, page,
349 blocksize * nblocks, page_offset);
355 record_start_io(iobuf, rw, bio->bi_size, exp);
356 rc = fsfilt_send_bio(rw, obd, inode, bio);
361 CERROR("Can't send bio: %d\n", rc);
362 record_finish_io(iobuf, rw, rc);
367 wait_event(iobuf->dr_wait, atomic_read(&iobuf->dr_numreqs) == 0);
369 if (rw == OBD_BRW_READ) {
370 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.hist[BRW_R_DIO_FRAGS],
372 lprocfs_oh_tally(&exp->exp_filter_data.fed_brw_stats.hist[BRW_R_DIO_FRAGS],
374 lprocfs_oh_tally_log2(&obd->u.filter.fo_filter_stats.hist[BRW_R_IO_TIME],
375 jiffies - start_time);
376 lprocfs_oh_tally_log2(&exp->exp_filter_data.fed_brw_stats.hist[BRW_R_IO_TIME], jiffies - start_time);
378 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.hist[BRW_W_DIO_FRAGS],
380 lprocfs_oh_tally(&exp->exp_filter_data.fed_brw_stats.hist[BRW_W_DIO_FRAGS],
382 lprocfs_oh_tally_log2(&obd->u.filter.fo_filter_stats.hist[BRW_W_IO_TIME],
383 jiffies - start_time);
384 lprocfs_oh_tally_log2(&exp->exp_filter_data.fed_brw_stats.hist[BRW_W_IO_TIME], jiffies - start_time);
388 rc = iobuf->dr_error;
392 /* These are our hacks to keep our directio/bh IO coherent with ext3's
393 * page cache use. Most notably ext3 reads file data into the page
394 * cache when it is zeroing the tail of partial-block truncates and
395 * leaves it there, sometimes generating io from it at later truncates.
396 * This removes the partial page and its buffers from the page cache,
397 * so it should only ever cause a wait in rare cases, as otherwise we
398 * always do full-page IO to the OST.
400 * The call to truncate_complete_page() will call journal_invalidatepage()
401 * to free the buffers and drop the page from cache. The buffers should
402 * not be dirty, because we already called fdatasync/fdatawait on them.
404 static int filter_sync_inode_data(struct inode *inode, int locked)
408 /* This is nearly do_fsync(), without the waiting on the inode */
409 /* XXX: in 2.6.16 (at least) we don't need to hold i_mutex over
410 * filemap_fdatawrite() and filemap_fdatawait(), so we may no longer
411 * need this lock here at all. */
413 LOCK_INODE_MUTEX(inode);
414 if (inode->i_mapping->nrpages) {
416 current->flags |= PF_SYNCWRITE;
418 rc = filemap_fdatawrite(inode->i_mapping);
420 rc = filemap_fdatawait(inode->i_mapping);
422 current->flags &= ~PF_SYNCWRITE;
426 UNLOCK_INODE_MUTEX(inode);
430 /* Clear pages from the mapping before we do direct IO to that offset.
431 * Now that the only source of such pages in the truncate path flushes
432 * these pages to disk and then discards them, this is error condition.
433 * If add back read cache this will happen again. This could be disabled
434 * until that time if we never see the below error. */
435 static int filter_clear_page_cache(struct inode *inode,
436 struct filter_iobuf *iobuf)
441 rc = filter_sync_inode_data(inode, 0);
445 /* be careful to call this after fsync_inode_data_buffers has waited
446 * for IO to complete before we evict it from the cache */
447 for (i = 0; i < iobuf->dr_npages; i++) {
448 page = find_lock_page(inode->i_mapping,
449 iobuf->dr_pages[i]->index);
452 if (page->mapping != NULL) {
453 CERROR("page %lu (%d/%d) in page cache during write!\n",
454 page->index, i, iobuf->dr_npages);
455 wait_on_page_writeback(page);
456 ll_truncate_complete_page(page);
460 page_cache_release(page);
466 int filter_clear_truncated_page(struct inode *inode)
471 /* Truncate on page boundary, so nothing to flush? */
472 if (!(i_size_read(inode) & ~CFS_PAGE_MASK))
475 rc = filter_sync_inode_data(inode, 1);
479 /* be careful to call this after fsync_inode_data_buffers has waited
480 * for IO to complete before we evict it from the cache */
481 page = find_lock_page(inode->i_mapping,
482 i_size_read(inode) >> CFS_PAGE_SHIFT);
484 if (page->mapping != NULL) {
485 wait_on_page_writeback(page);
486 ll_truncate_complete_page(page);
489 page_cache_release(page);
495 /* Must be called with i_mutex taken for writes; this will drop it */
496 int filter_direct_io(int rw, struct dentry *dchild, struct filter_iobuf *iobuf,
497 struct obd_export *exp, struct iattr *attr,
498 struct obd_trans_info *oti, void **wait_handle)
500 struct obd_device *obd = exp->exp_obd;
501 struct inode *inode = dchild->d_inode;
502 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
504 struct semaphore *sem;
507 LASSERTF(iobuf->dr_npages <= iobuf->dr_max_pages, "%d,%d\n",
508 iobuf->dr_npages, iobuf->dr_max_pages);
509 LASSERT(iobuf->dr_npages <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
511 if (rw == OBD_BRW_READ) {
512 if (iobuf->dr_npages == 0)
517 LASSERTF(rw == OBD_BRW_WRITE, "%x\n", rw);
518 LASSERT(iobuf->dr_npages > 0);
520 sem = &obd->u.filter.fo_alloc_lock;
522 lquota_enforce(filter_quota_interface_ref, obd, iobuf->dr_ignore_quota);
525 rc = fsfilt_map_inode_pages(obd, inode, iobuf->dr_pages,
526 iobuf->dr_npages, iobuf->dr_blocks,
527 obdfilter_created_scratchpad, create, sem);
529 if (rw == OBD_BRW_WRITE) {
531 filter_tally(exp, iobuf->dr_pages,
532 iobuf->dr_npages, iobuf->dr_blocks,
534 if (attr->ia_size > i_size_read(inode))
535 attr->ia_valid |= ATTR_SIZE;
536 rc = fsfilt_setattr(obd, dchild,
537 oti->oti_handle, attr, 0);
540 UNLOCK_INODE_MUTEX(inode);
542 rc2 = filter_finish_transno(exp, oti, 0, 0);
544 CERROR("can't close transaction: %d\n", rc2);
549 rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,
555 } else if (rc == 0) {
556 filter_tally(exp, iobuf->dr_pages, iobuf->dr_npages,
557 iobuf->dr_blocks, blocks_per_page, 0);
560 rc = filter_clear_page_cache(inode, iobuf);
564 RETURN(filter_do_bio(exp, inode, iobuf, rw));
567 /* See if there are unallocated parts in given file region */
568 static int filter_range_is_mapped(struct inode *inode, obd_size offset, int len)
570 sector_t (*fs_bmap)(struct address_space *, sector_t) =
571 inode->i_mapping->a_ops->bmap;
574 /* We can't know if we are overwriting or not */
578 offset >>= inode->i_blkbits;
579 len >>= inode->i_blkbits;
581 for (j = 0; j <= len; j++)
582 if (fs_bmap(inode->i_mapping, offset + j) == 0)
588 int filter_commitrw_write(struct obd_export *exp, struct obdo *oa,
589 int objcount, struct obd_ioobj *obj, int niocount,
590 struct niobuf_local *res, struct obd_trans_info *oti,
593 struct niobuf_local *lnb;
594 struct filter_iobuf *iobuf = NULL;
595 struct lvfs_run_ctxt saved;
596 struct fsfilt_objinfo fso;
597 struct iattr iattr = { 0 };
598 struct inode *inode = NULL;
599 unsigned long now = jiffies;
600 int i, err, cleanup_phase = 0;
601 struct obd_device *obd = exp->exp_obd;
603 int total_size = 0, rc2;
604 unsigned int qcids[MAXQUOTAS] = {0, 0};
607 LASSERT(oti != NULL);
608 LASSERT(objcount == 1);
609 LASSERT(current->journal_info == NULL);
614 /* Unfortunately, if quota master is too busy to handle the
615 * pre-dqacq in time and quota hash on ost is used up, we
616 * have to wait for the completion of in flight dqacq/dqrel,
618 if ((rc2 = lquota_chkquota(filter_quota_interface_ref, obd, oa->o_uid,
619 oa->o_gid, niocount)) == QUOTA_RET_ACQUOTA) {
620 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_HOLD_WRITE_RPC, 90);
621 lquota_acquire(filter_quota_interface_ref, obd, oa->o_uid,
630 iobuf = filter_iobuf_get(&obd->u.filter, oti);
632 GOTO(cleanup, rc = PTR_ERR(iobuf));
635 fso.fso_dentry = res->dentry;
636 fso.fso_bufcnt = obj->ioo_bufcnt;
637 inode = res->dentry->d_inode;
639 iobuf->dr_ignore_quota = 0;
640 for (i = 0, lnb = res; i < obj->ioo_bufcnt; i++, lnb++) {
643 /* If overwriting an existing block, we don't need a grant */
644 if (!(lnb->flags & OBD_BRW_GRANTED) && lnb->rc == -ENOSPC &&
645 filter_range_is_mapped(inode, lnb->offset, lnb->len))
648 if (lnb->rc) { /* ENOSPC, network RPC error, etc. */
649 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i, lnb->rc);
653 err = filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
656 total_size += lnb->len;
658 /* we expect these pages to be in offset order, but we'll
660 this_size = lnb->offset + lnb->len;
661 if (this_size > iattr.ia_size)
662 iattr.ia_size = this_size;
664 /* if one page is a write-back page from client cache, or it's
665 * written by root, then mark the whole io request as ignore
667 if (lnb->flags & (OBD_BRW_FROM_GRANT | OBD_BRW_NOQUOTA))
668 iobuf->dr_ignore_quota = 1;
671 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
676 LOCK_INODE_MUTEX(inode);
677 fsfilt_check_slow(obd, now, obd_timeout, "i_mutex");
678 oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
680 if (IS_ERR(oti->oti_handle)) {
681 UNLOCK_INODE_MUTEX(inode);
682 rc = PTR_ERR(oti->oti_handle);
683 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
684 "error starting transaction: rc = %d\n", rc);
685 oti->oti_handle = NULL;
688 /* have to call fsfilt_commit() from this point on */
690 fsfilt_check_slow(obd, now, obd_timeout, "brw_start");
692 i = OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
694 /* If the inode still has SUID+SGID bits set (see filter_precreate())
695 * then we will accept the UID+GID if sent by the client for
696 * initializing the ownership of this inode. We only allow this to
697 * happen once (so clear these bits) and later only allow setattr. */
698 if (inode->i_mode & S_ISUID)
700 if (inode->i_mode & S_ISGID)
703 iattr_from_obdo(&iattr, oa, i);
704 if (iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
707 CDEBUG(D_INODE, "update UID/GID to %lu/%lu\n",
708 (unsigned long)oa->o_uid, (unsigned long)oa->o_gid);
710 cap_raise(current->cap_effective, CAP_SYS_RESOURCE);
712 iattr.ia_valid |= ATTR_MODE;
713 iattr.ia_mode = inode->i_mode;
714 if (iattr.ia_valid & ATTR_UID)
715 iattr.ia_mode &= ~S_ISUID;
716 if (iattr.ia_valid & ATTR_GID)
717 iattr.ia_mode &= ~S_ISGID;
719 rc = filter_update_fidea(exp, inode, oti->oti_handle, oa);
721 /* To avoid problems with quotas, UID and GID must be set
722 * in the inode before filter_direct_io() - see bug 10357. */
723 save = iattr.ia_valid;
724 iattr.ia_valid &= (ATTR_UID | ATTR_GID);
725 rc = fsfilt_setattr(obd, res->dentry, oti->oti_handle, &iattr, 0);
726 CDEBUG(D_QUOTA, "set uid(%u)/gid(%u) to ino(%lu). rc(%d)\n",
727 iattr.ia_uid, iattr.ia_gid, inode->i_ino, rc);
728 iattr.ia_valid = save & ~(ATTR_UID | ATTR_GID);
731 /* filter_direct_io drops i_mutex */
732 rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, iobuf, exp, &iattr,
735 obdo_from_inode(oa, inode,
736 FILTER_VALID_FLAGS |OBD_MD_FLUID |OBD_MD_FLGID);
738 obdo_from_inode(oa, inode, OBD_MD_FLUID | OBD_MD_FLGID);
740 lquota_getflag(filter_quota_interface_ref, obd, oa);
742 fsfilt_check_slow(obd, now, obd_timeout, "direct_io");
744 err = fsfilt_commit_wait(obd, inode, wait_handle);
746 CERROR("Failure to commit OST transaction (%d)?\n", err);
750 if (obd->obd_replayable && !rc)
751 LASSERTF(oti->oti_transno <= obd->obd_last_committed,
752 "oti_transno "LPU64" last_committed "LPU64"\n",
753 oti->oti_transno, obd->obd_last_committed);
755 fsfilt_check_slow(obd, now, obd_timeout, "commitrw commit");
758 filter_grant_commit(exp, niocount, res);
760 switch (cleanup_phase) {
762 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
763 LASSERT(current->journal_info == NULL);
765 filter_iobuf_put(&obd->u.filter, iobuf, oti);
768 * lnb->page automatically returns back into per-thread page
774 /* trigger quota pre-acquire */
775 qcids[USRQUOTA] = oa->o_uid;
776 qcids[GRPQUOTA] = oa->o_gid;
777 err = lquota_adjust(filter_quota_interface_ref, obd, qcids, NULL, rc,
779 CDEBUG(err ? D_ERROR : D_QUOTA,
780 "filter adjust qunit! (rc:%d)\n", err);