1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/obdfilter/filter_io_26.c
40 * Author: Peter Braam <braam@clusterfs.com>
41 * Author: Andreas Dilger <adilger@clusterfs.com>
42 * Author: Phil Schwan <phil@clusterfs.com>
45 #ifndef AUTOCONF_INCLUDED
46 #include <linux/config.h>
48 #include <linux/module.h>
49 #include <linux/pagemap.h> // XXX kill me soon
50 #include <linux/version.h>
51 #include <linux/buffer_head.h>
53 #define DEBUG_SUBSYSTEM S_FILTER
55 #include <obd_class.h>
56 #include <lustre_fsfilt.h>
57 #include <lustre_quota.h>
58 #include "filter_internal.h"
60 /* 512byte block min */
61 #define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512)
63 cfs_atomic_t dr_numreqs; /* number of reqs being processed */
68 struct page **dr_pages;
69 unsigned long *dr_blocks;
70 unsigned int dr_ignore_quota:1;
71 struct filter_obd *dr_filter;
74 static void record_start_io(struct filter_iobuf *iobuf, int rw, int size,
75 struct obd_export *exp)
77 struct filter_obd *filter = iobuf->dr_filter;
79 cfs_atomic_inc(&iobuf->dr_numreqs);
81 if (rw == OBD_BRW_READ) {
82 cfs_atomic_inc(&filter->fo_r_in_flight);
83 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_R_RPC_HIST],
84 cfs_atomic_read(&filter->fo_r_in_flight));
85 lprocfs_oh_tally_log2(&filter->
86 fo_filter_stats.hist[BRW_R_DISK_IOSIZE],
88 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
89 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
91 cfs_atomic_read(&filter-> \
93 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
94 nid_brw_stats->hist[BRW_R_DISK_IOSIZE],
98 cfs_atomic_inc(&filter->fo_w_in_flight);
99 lprocfs_oh_tally(&filter->fo_filter_stats.hist[BRW_W_RPC_HIST],
100 cfs_atomic_read(&filter->fo_w_in_flight));
101 lprocfs_oh_tally_log2(&filter->
102 fo_filter_stats.hist[BRW_W_DISK_IOSIZE],
104 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
105 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
106 hist[BRW_W_RPC_HIST],
107 cfs_atomic_read(&filter-> \
109 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
110 nid_brw_stats->hist[BRW_W_DISK_IOSIZE],
116 static void record_finish_io(struct filter_iobuf *iobuf, int rw, int rc)
118 struct filter_obd *filter = iobuf->dr_filter;
120 /* CAVEAT EMPTOR: possibly in IRQ context
121 * DO NOT record procfs stats here!!! */
123 if (rw == OBD_BRW_READ)
124 cfs_atomic_dec(&filter->fo_r_in_flight);
126 cfs_atomic_dec(&filter->fo_w_in_flight);
128 if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
129 cfs_waitq_signal(&iobuf->dr_wait);
132 #ifdef HAVE_BIO_ENDIO_2ARG
133 #define DIO_RETURN(a)
134 static void dio_complete_routine(struct bio *bio, int error)
136 #define DIO_RETURN(a) return(a)
137 static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
140 struct filter_iobuf *iobuf = bio->bi_private;
144 /* CAVEAT EMPTOR: possibly in IRQ context
145 * DO NOT record procfs stats here!!! */
147 #ifdef HAVE_BIO_ENDIO_2ARG
148 /* The "bi_size" check was needed for kernels < 2.6.24 in order to
149 * handle the case where a SCSI request error caused this callback
150 * to be called before all of the biovecs had been processed.
151 * Without this check the server thread will hang. In newer kernels
152 * the bio_end_io routine is never called for partial completions,
153 * so this check is no longer needed. */
154 if (bio->bi_size) /* Not complete */
158 if (unlikely(iobuf == NULL)) {
159 CERROR("***** bio->bi_private is NULL! This should never "
160 "happen. Normally, I would crash here, but instead I "
161 "will dump the bio contents to the console. Please "
162 "report this to <http://bugs.whamcloud.com/>, along "
163 "with any interesting messages leading up to this point "
164 "(like SCSI errors, perhaps). Because bi_private is "
165 "NULL, I can't wake up the thread that initiated this "
166 "IO - you will probably have to reboot this node.\n");
167 CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
168 "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
169 "bi_private: %p\n", bio->bi_next, bio->bi_flags,
170 bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
171 bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
176 /* the check is outside of the cycle for performance reason -bzzz */
177 if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
178 bio_for_each_segment(bvl, bio, i) {
179 if (likely(error == 0))
180 SetPageUptodate(bvl->bv_page);
181 LASSERT(PageLocked(bvl->bv_page));
182 ClearPageConstant(bvl->bv_page);
184 record_finish_io(iobuf, OBD_BRW_READ, error);
186 if (mapping_cap_page_constant_write(iobuf->dr_pages[0]->mapping)){
187 bio_for_each_segment(bvl, bio, i) {
188 ClearPageConstant(bvl->bv_page);
191 record_finish_io(iobuf, OBD_BRW_WRITE, error);
194 /* any real error is good enough -bzzz */
195 if (error != 0 && iobuf->dr_error == 0)
196 iobuf->dr_error = error;
198 /* Completed bios used to be chained off iobuf->dr_bios and freed in
199 * filter_clear_dreq(). It was then possible to exhaust the biovec-256
200 * mempool when serious on-disk fragmentation was encountered,
201 * deadlocking the OST. The bios are now released as soon as complete
202 * so the pool cannot be exhausted while IOs are competing. bug 10076 */
207 static int can_be_merged(struct bio *bio, sector_t sector)
214 size = bio->bi_size >> 9;
215 return bio->bi_sector + size == sector ? 1 : 0;
218 struct filter_iobuf *filter_alloc_iobuf(struct filter_obd *filter,
219 int rw, int num_pages)
221 struct filter_iobuf *iobuf;
223 LASSERTF(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ, "%x\n", rw);
225 OBD_ALLOC(iobuf, sizeof(*iobuf));
229 OBD_ALLOC(iobuf->dr_pages, num_pages * sizeof(*iobuf->dr_pages));
230 if (iobuf->dr_pages == NULL)
233 OBD_ALLOC(iobuf->dr_blocks,
234 MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*iobuf->dr_blocks));
235 if (iobuf->dr_blocks == NULL)
238 iobuf->dr_filter = filter;
239 cfs_waitq_init(&iobuf->dr_wait);
240 cfs_atomic_set(&iobuf->dr_numreqs, 0);
241 iobuf->dr_max_pages = num_pages;
242 iobuf->dr_npages = 0;
248 OBD_FREE(iobuf->dr_pages,
249 num_pages * sizeof(*iobuf->dr_pages));
251 OBD_FREE(iobuf, sizeof(*iobuf));
253 RETURN(ERR_PTR(-ENOMEM));
256 static void filter_clear_iobuf(struct filter_iobuf *iobuf)
258 iobuf->dr_npages = 0;
260 cfs_atomic_set(&iobuf->dr_numreqs, 0);
263 void filter_free_iobuf(struct filter_iobuf *iobuf)
265 int num_pages = iobuf->dr_max_pages;
267 filter_clear_iobuf(iobuf);
269 OBD_FREE(iobuf->dr_blocks,
270 MAX_BLOCKS_PER_PAGE * num_pages * sizeof(*iobuf->dr_blocks));
271 OBD_FREE(iobuf->dr_pages,
272 num_pages * sizeof(*iobuf->dr_pages));
276 void filter_iobuf_put(struct filter_obd *filter, struct filter_iobuf *iobuf,
277 struct obd_trans_info *oti)
279 int thread_id = (oti && oti->oti_thread) ?
280 oti->oti_thread->t_id : -1;
282 if (unlikely(thread_id < 0)) {
283 filter_free_iobuf(iobuf);
287 LASSERTF(filter->fo_iobuf_pool[thread_id] == iobuf,
288 "iobuf mismatch for thread %d: pool %p iobuf %p\n",
289 thread_id, filter->fo_iobuf_pool[thread_id], iobuf);
290 filter_clear_iobuf(iobuf);
293 int filter_iobuf_add_page(struct obd_device *obd, struct filter_iobuf *iobuf,
294 struct inode *inode, struct page *page)
296 LASSERT(iobuf->dr_npages < iobuf->dr_max_pages);
297 iobuf->dr_pages[iobuf->dr_npages++] = page;
302 int filter_do_bio(struct obd_export *exp, struct inode *inode,
303 struct filter_iobuf *iobuf, int rw)
305 struct obd_device *obd = exp->exp_obd;
306 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
307 struct page **pages = iobuf->dr_pages;
308 int npages = iobuf->dr_npages;
309 unsigned long *blocks = iobuf->dr_blocks;
310 int total_blocks = npages * blocks_per_page;
311 int sector_bits = inode->i_sb->s_blocksize_bits - 9;
312 unsigned int blocksize = inode->i_sb->s_blocksize;
313 struct bio *bio = NULL;
315 unsigned long start_time = jiffies;
317 unsigned int page_offset;
326 LASSERT(iobuf->dr_npages == npages);
327 LASSERT(total_blocks <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
329 for (page_idx = 0, block_idx = 0;
331 page_idx++, block_idx += blocks_per_page) {
333 page = pages[page_idx];
334 LASSERT (block_idx + blocks_per_page <= total_blocks);
336 for (i = 0, page_offset = 0;
338 i += nblocks, page_offset += blocksize * nblocks) {
342 if (blocks[block_idx + i] == 0) { /* hole */
343 LASSERT(rw == OBD_BRW_READ);
344 memset(kmap(page) + page_offset, 0, blocksize);
349 sector = (sector_t)blocks[block_idx + i] << sector_bits;
351 /* Additional contiguous file blocks? */
352 while (i + nblocks < blocks_per_page &&
353 (sector + (nblocks << sector_bits)) ==
354 ((sector_t)blocks[block_idx + i + nblocks] <<
358 /* I only set the page to be constant only if it
359 * is mapped to a contiguous underlying disk block(s).
360 * It will then make sure the corresponding device
361 * cache of raid5 will be overwritten by this page.
363 if ((rw == OBD_BRW_WRITE) &&
364 (nblocks == blocks_per_page) &&
365 mapping_cap_page_constant_write(inode->i_mapping))
366 SetPageConstant(page);
369 can_be_merged(bio, sector) &&
370 bio_add_page(bio, page,
371 blocksize * nblocks, page_offset) != 0)
372 continue; /* added this frag OK */
375 struct request_queue *q =
376 bdev_get_queue(bio->bi_bdev);
378 /* Dang! I have to fragment this I/O */
379 CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
380 "sectors %d(%d) psg %d(%d) hsg %d(%d) "
381 "sector %llu next %llu\n",
383 bio->bi_vcnt, bio->bi_max_vecs,
384 bio->bi_size >> 9, queue_max_sectors(q),
385 bio_phys_segments(q, bio),
386 queue_max_phys_segments(q),
387 bio_hw_segments(q, bio),
388 queue_max_hw_segments(q),
389 (unsigned long long)bio->bi_sector,
390 (unsigned long long)sector);
392 record_start_io(iobuf, rw, bio->bi_size, exp);
393 rc = fsfilt_send_bio(rw, obd, inode, bio);
395 CERROR("Can't send bio: %d\n", rc);
396 record_finish_io(iobuf, rw, rc);
402 /* allocate new bio, limited by max BIO size, b=9945 */
403 bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
404 (npages - page_idx) *
407 CERROR("Can't allocate bio %u*%u = %u pages\n",
408 (npages - page_idx), blocks_per_page,
409 (npages - page_idx) * blocks_per_page);
414 bio->bi_bdev = inode->i_sb->s_bdev;
415 bio->bi_sector = sector;
416 bio->bi_end_io = dio_complete_routine;
417 bio->bi_private = iobuf;
419 rc = bio_add_page(bio, page,
420 blocksize * nblocks, page_offset);
426 record_start_io(iobuf, rw, bio->bi_size, exp);
427 rc = fsfilt_send_bio(rw, obd, inode, bio);
432 CERROR("Can't send bio: %d\n", rc);
433 record_finish_io(iobuf, rw, rc);
438 cfs_wait_event(iobuf->dr_wait,
439 cfs_atomic_read(&iobuf->dr_numreqs) == 0);
441 if (rw == OBD_BRW_READ) {
442 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.
443 hist[BRW_R_DIO_FRAGS],
445 lprocfs_oh_tally_log2(&obd->u.filter.
446 fo_filter_stats.hist[BRW_R_IO_TIME],
447 jiffies - start_time);
448 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
449 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
450 hist[BRW_R_DIO_FRAGS],
452 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
453 nid_brw_stats->hist[BRW_R_IO_TIME],
454 jiffies - start_time);
457 lprocfs_oh_tally(&obd->u.filter.fo_filter_stats.
458 hist[BRW_W_DIO_FRAGS], frags);
459 lprocfs_oh_tally_log2(&obd->u.filter.fo_filter_stats.
461 jiffies - start_time);
462 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_brw_stats) {
463 lprocfs_oh_tally(&exp->exp_nid_stats->nid_brw_stats->
464 hist[BRW_W_DIO_FRAGS],
466 lprocfs_oh_tally_log2(&exp->exp_nid_stats->
467 nid_brw_stats->hist[BRW_W_IO_TIME],
468 jiffies - start_time);
473 rc = iobuf->dr_error;
477 /* Must be called with i_mutex taken for writes; this will drop it */
478 int filter_direct_io(int rw, struct dentry *dchild, struct filter_iobuf *iobuf,
479 struct obd_export *exp, struct iattr *attr,
480 struct obd_trans_info *oti, void **wait_handle)
482 struct obd_device *obd = exp->exp_obd;
483 struct inode *inode = dchild->d_inode;
484 int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits;
486 cfs_semaphore_t *sem;
489 LASSERTF(iobuf->dr_npages <= iobuf->dr_max_pages, "%d,%d\n",
490 iobuf->dr_npages, iobuf->dr_max_pages);
491 LASSERT(iobuf->dr_npages <= OBDFILTER_CREATED_SCRATCHPAD_ENTRIES);
493 if (rw == OBD_BRW_READ) {
494 if (iobuf->dr_npages == 0)
499 LASSERTF(rw == OBD_BRW_WRITE, "%x\n", rw);
500 LASSERT(iobuf->dr_npages > 0);
502 sem = &obd->u.filter.fo_alloc_lock;
504 lquota_enforce(filter_quota_interface_ref, obd,
505 iobuf->dr_ignore_quota);
508 if (rw == OBD_BRW_WRITE &&
509 OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
512 rc = fsfilt_map_inode_pages(obd, inode, iobuf->dr_pages,
513 iobuf->dr_npages, iobuf->dr_blocks,
514 obdfilter_created_scratchpad, create, sem);
517 if (rw == OBD_BRW_WRITE) {
519 filter_tally(exp, iobuf->dr_pages,
520 iobuf->dr_npages, iobuf->dr_blocks,
522 if (attr->ia_size > i_size_read(inode))
523 attr->ia_valid |= ATTR_SIZE;
524 rc = fsfilt_setattr(obd, dchild,
525 oti->oti_handle, attr, 0);
528 UNLOCK_INODE_MUTEX(inode);
530 /* Force commit to make the just-deleted blocks
531 * reusable. LU-456 */
533 fsfilt_commit(obd, inode, oti->oti_handle, 1);
537 rc2 = filter_finish_transno(exp, inode, oti, 0, 0);
539 CERROR("can't close transaction: %d\n", rc2);
545 rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,
548 rc2 = fsfilt_commit(obd, inode, oti->oti_handle, 0);
553 } else if (rc == 0) {
554 filter_tally(exp, iobuf->dr_pages, iobuf->dr_npages,
555 iobuf->dr_blocks, blocks_per_page, 0);
558 RETURN(filter_do_bio(exp, inode, iobuf, rw));
561 /* See if there are unallocated parts in given file region */
562 static int filter_range_is_mapped(struct inode *inode, obd_size offset, int len)
564 sector_t (*fs_bmap)(struct address_space *, sector_t) =
565 inode->i_mapping->a_ops->bmap;
568 /* We can't know if we are overwriting or not */
572 offset >>= inode->i_blkbits;
573 len >>= inode->i_blkbits;
575 for (j = 0; j <= len; j++)
576 if (fs_bmap(inode->i_mapping, offset + j) == 0)
583 * interesting use cases on how it interacts with VM:
585 * - vm writeout -- shouldn't see our pages as we don't mark them dirty
586 * though vm can find partial page left dirty by truncate. in this
587 * usual writeout is used unless our write rewrite that page - then we
588 * drop PG_dirty with PG_lock held.
593 int filter_commitrw_write(struct obd_export *exp, struct obdo *oa,
594 int objcount, struct obd_ioobj *obj,
595 struct niobuf_remote *nb, int niocount,
596 struct niobuf_local *res, struct obd_trans_info *oti,
599 struct niobuf_local *lnb;
600 struct filter_iobuf *iobuf = NULL;
601 struct lvfs_run_ctxt saved;
602 struct fsfilt_objinfo fso;
603 struct iattr iattr = { 0 };
604 struct inode *inode = res->dentry->d_inode;
605 unsigned long now = jiffies;
606 int i, err, cleanup_phase = 0;
607 struct obd_device *obd = exp->exp_obd;
608 struct filter_obd *fo = &obd->u.filter;
609 void *wait_handle = NULL;
611 unsigned int qcids[MAXQUOTAS] = { oa->o_uid, oa->o_gid };
612 int rec_pending[MAXQUOTAS] = { 0, 0 }, quota_pages = 0;
613 int sync_journal_commit = obd->u.filter.fo_syncjournal;
617 LASSERT(oti != NULL);
618 LASSERT(objcount == 1);
619 LASSERT(current->journal_info == NULL);
624 iobuf = filter_iobuf_get(&obd->u.filter, oti);
626 GOTO(cleanup, rc = PTR_ERR(iobuf));
629 fso.fso_dentry = res->dentry;
630 fso.fso_bufcnt = obj->ioo_bufcnt;
632 iobuf->dr_ignore_quota = 0;
633 for (i = 0, lnb = res; i < niocount; i++, lnb++) {
635 __u32 flags = lnb->flags;
637 if (filter_range_is_mapped(inode, lnb->offset, lnb->len)) {
638 /* If overwriting an existing block,
639 * we don't need a grant */
640 if (!(flags & OBD_BRW_GRANTED) && lnb->rc == -ENOSPC)
646 if (lnb->rc) { /* ENOSPC, network RPC error, etc. */
647 CDEBUG(D_INODE, "Skipping [%d] == %d\n", i, lnb->rc);
651 LASSERT(PageLocked(lnb->page));
652 LASSERT(!PageWriteback(lnb->page));
654 /* since write & truncate are serialized by the i_alloc_sem,
655 * even partial truncate should not leave dirty pages in
657 LASSERT(!PageDirty(lnb->page));
659 SetPageUptodate(lnb->page);
661 err = filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
664 total_size += lnb->len;
666 /* we expect these pages to be in offset order, but we'll
668 this_size = lnb->offset + lnb->len;
669 if (this_size > iattr.ia_size)
670 iattr.ia_size = this_size;
672 /* if one page is a write-back page from client cache and
673 * not from direct_io, or it's written by root, then mark
674 * the whole io request as ignore quota request, remote
675 * client can not break through quota. */
676 if (exp_connect_rmtclient(exp))
677 flags &= ~OBD_BRW_NOQUOTA;
678 if ((flags & OBD_BRW_NOQUOTA) ||
679 (flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
681 iobuf->dr_ignore_quota = 1;
683 if (!(lnb->flags & OBD_BRW_ASYNC)) {
684 sync_journal_commit = 1;
688 /* we try to get enough quota to write here, and let ldiskfs
689 * decide if it is out of quota or not b=14783 */
690 rc = lquota_chkquota(filter_quota_interface_ref, obd, exp, qcids,
691 rec_pending, quota_pages, oti, LQUOTA_FLAGS_BLK,
692 (void *)inode, obj->ioo_bufcnt);
696 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
699 fsfilt_check_slow(obd, now, "quota init");
702 LOCK_INODE_MUTEX(inode);
703 fsfilt_check_slow(obd, now, "i_mutex");
704 oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
706 if (IS_ERR(oti->oti_handle)) {
707 UNLOCK_INODE_MUTEX(inode);
708 rc = PTR_ERR(oti->oti_handle);
709 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
710 "error starting transaction: rc = %d\n", rc);
711 oti->oti_handle = NULL;
714 /* have to call fsfilt_commit() from this point on */
716 fsfilt_check_slow(obd, now, "brw_start");
718 /* Locking order: i_mutex -> journal_lock -> dqptr_sem. LU-952 */
719 ll_vfs_dq_init(inode);
721 i = OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
723 /* If the inode still has SUID+SGID bits set (see filter_precreate())
724 * then we will accept the UID+GID if sent by the client for
725 * initializing the ownership of this inode. We only allow this to
726 * happen once (so clear these bits) and later only allow setattr. */
727 if (inode->i_mode & S_ISUID)
729 if (inode->i_mode & S_ISGID)
732 iattr_from_obdo(&iattr, oa, i);
733 if (iattr.ia_valid & (ATTR_UID | ATTR_GID)) {
736 CDEBUG(D_INODE, "update UID/GID to %lu/%lu\n",
737 (unsigned long)oa->o_uid, (unsigned long)oa->o_gid);
739 cfs_cap_raise(CFS_CAP_SYS_RESOURCE);
741 iattr.ia_valid |= ATTR_MODE;
742 iattr.ia_mode = inode->i_mode;
743 if (iattr.ia_valid & ATTR_UID)
744 iattr.ia_mode &= ~S_ISUID;
745 if (iattr.ia_valid & ATTR_GID)
746 iattr.ia_mode &= ~S_ISGID;
748 rc = filter_update_fidea(exp, inode, oti->oti_handle, oa);
750 /* To avoid problems with quotas, UID and GID must be set
751 * in the inode before filter_direct_io() - see bug 10357. */
752 save = iattr.ia_valid;
753 iattr.ia_valid &= (ATTR_UID | ATTR_GID);
754 rc = fsfilt_setattr(obd, res->dentry, oti->oti_handle,&iattr,0);
755 CDEBUG(D_QUOTA, "set uid(%u)/gid(%u) to ino(%lu). rc(%d)\n",
756 iattr.ia_uid, iattr.ia_gid, inode->i_ino, rc);
757 iattr.ia_valid = save & ~(ATTR_UID | ATTR_GID);
760 /* filter_direct_io drops i_mutex */
761 rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, iobuf, exp, &iattr,
762 oti, sync_journal_commit ? &wait_handle : NULL);
763 if (rc == -ENOSPC && retries++ < 3) {
764 CDEBUG(D_INODE, "retry after force commit, retries:%d\n",
766 oti->oti_handle = NULL;
767 fsfilt_check_slow(obd, now, "direct_io");
771 obdo_from_inode(oa, inode, NULL, rc == 0 ? FILTER_VALID_FLAGS : 0 |
772 OBD_MD_FLUID |OBD_MD_FLGID);
774 lquota_getflag(filter_quota_interface_ref, obd, oa);
776 fsfilt_check_slow(obd, now, "direct_io");
779 err = fsfilt_commit_wait(obd, inode, wait_handle);
784 CERROR("Failure to commit OST transaction (%d)?\n", err);
789 /* In rare cases fsfilt_commit_wait() will wake up and return after
790 * the transaction has finished its work and updated j_commit_sequence
791 * but the commit callbacks have not been run yet. Wait here until
792 * that is finished so that clients requesting sync IO don't see the
793 * reply transno < last_committed. LU-753 */
794 if (unlikely(obd->obd_replayable && !rc && wait_handle &&
795 oti->oti_transno > obd->obd_last_committed)) {
797 struct l_wait_info lwi =
798 LWI_TIMEOUT_INTERVAL(cfs_time_seconds(5),
799 (cfs_duration_t)((HZ + 4)/5),
803 oti->oti_transno <= obd->obd_last_committed,
806 /* commit callback isn't done after waiting for 5 secs ? */
807 if (unlikely(oti->oti_transno > obd->obd_last_committed))
808 CERROR("transno:"LPU64" > last_committed:"LPU64"\n",
809 oti->oti_transno, obd->obd_last_committed);
812 fsfilt_check_slow(obd, now, "commitrw commit");
815 lquota_pending_commit(filter_quota_interface_ref, obd, qcids,
818 filter_grant_commit(exp, niocount, res);
820 switch (cleanup_phase) {
822 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
823 LASSERT(current->journal_info == NULL);
825 filter_iobuf_put(&obd->u.filter, iobuf, oti);
828 * lnb->page automatically returns back into per-thread page
834 /* trigger quota pre-acquire */
835 err = lquota_adjust(filter_quota_interface_ref, obd, qcids, NULL, rc,
837 CDEBUG(err ? D_ERROR : D_QUOTA, "filter adjust qunit! "
838 "(rc:%d, uid:%u, gid:%u)\n",
839 err, qcids[USRQUOTA], qcids[GRPQUOTA]);
840 if (qcids[USRQUOTA] != oa->o_uid || qcids[GRPQUOTA] != oa->o_gid) {
841 qcids[USRQUOTA] = oa->o_uid;
842 qcids[GRPQUOTA] = oa->o_gid;
843 err = lquota_adjust(filter_quota_interface_ref, obd, qcids,
844 NULL, rc, FSFILT_OP_CREATE);
845 CDEBUG(err ? D_ERROR : D_QUOTA, "filter adjust qunit! "
846 "(rc:%d, uid:%u, gid:%u)\n",
847 err, qcids[USRQUOTA], qcids[GRPQUOTA]);
850 for (i = 0, lnb = res; i < niocount; i++, lnb++) {
851 if (lnb->page == NULL)
855 /* If the write has failed, the page cache may
856 * not be consitent with what is on disk, so
857 * force pages to be reread next time it is
859 ClearPageUptodate(lnb->page);
861 LASSERT(PageLocked(lnb->page));
862 unlock_page(lnb->page);
864 page_cache_release(lnb->page);
870 if (fo->fo_writethrough_cache == 0 ||
871 i_size_read(inode) > fo->fo_readcache_max_filesize)
872 filter_release_cache(obd, obj, nb, inode);
873 up_read(&inode->i_alloc_sem);