1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdfilter/filter_io.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_FILTER
45 #ifndef AUTOCONF_INCLUDED
46 #include <linux/config.h>
48 #include <linux/module.h>
49 #include <linux/pagemap.h> // XXX kill me soon
50 #include <linux/version.h>
52 #include <obd_class.h>
53 #include <lustre_fsfilt.h>
54 #include "filter_internal.h"
56 int *obdfilter_created_scratchpad;
58 /* Grab the dirty and seen grant announcements from the incoming obdo.
59 * We will later calculate the clients new grant and return it.
60 * Caller must hold osfs lock */
61 void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
63 struct filter_export_data *fed;
64 struct obd_device *obd = exp->exp_obd;
67 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
69 if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
70 (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
71 oa->o_valid &= ~OBD_MD_FLGRANT;
76 fed = &exp->exp_filter_data;
78 /* Add some margin, since there is a small race if other RPCs arrive
79 * out-or-order and have already consumed some grant. We want to
80 * leave this here in case there is a large error in accounting. */
82 "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
83 obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
84 oa->o_dropped, fed->fed_grant);
86 /* Update our accounting now so that statfs takes it into account.
87 * Note that fed_dirty is only approximate and can become incorrect
88 * if RPCs arrive out-of-order. No important calculations depend
89 * on fed_dirty however, but we must check sanity to not assert. */
90 if ((long long)oa->o_dirty < 0)
92 else if (oa->o_dirty > fed->fed_grant + 4 * FILTER_GRANT_CHUNK)
93 oa->o_dirty = fed->fed_grant + 4 * FILTER_GRANT_CHUNK;
94 obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
95 if (fed->fed_grant < oa->o_dropped) {
96 CDEBUG(D_CACHE,"%s: cli %s/%p reports %u dropped > grant %lu\n",
97 obd->obd_name, exp->exp_client_uuid.uuid, exp,
98 oa->o_dropped, fed->fed_grant);
101 if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
102 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
103 obd->obd_name, exp->exp_client_uuid.uuid, exp,
104 oa->o_dropped, obd->u.filter.fo_tot_granted);
107 obd->u.filter.fo_tot_granted -= oa->o_dropped;
108 fed->fed_grant -= oa->o_dropped;
109 fed->fed_dirty = oa->o_dirty;
111 if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_SHRINK_GRANT) {
112 obd_size left_space = filter_grant_space_left(exp);
113 struct filter_obd *filter = &exp->exp_obd->u.filter;
115 /*Only if left_space < fo_tot_clients * 32M,
116 *then the grant space could be shrinked */
117 if (left_space < filter->fo_tot_granted_clients *
118 FILTER_GRANT_SHRINK_LIMIT) {
119 fed->fed_grant -= oa->o_grant;
120 filter->fo_tot_granted -= oa->o_grant;
121 CDEBUG(D_CACHE, "%s: cli %s/%p shrink "LPU64
122 "fed_grant %ld total "LPU64"\n",
123 obd->obd_name, exp->exp_client_uuid.uuid,
124 exp, oa->o_grant, fed->fed_grant,
125 filter->fo_tot_granted);
130 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
131 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
132 obd->obd_name, exp->exp_client_uuid.uuid, exp,
133 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
134 spin_unlock(&obd->obd_osfs_lock);
140 /* Figure out how much space is available between what we've granted
141 * and what remains in the filesystem. Compensate for ext3 indirect
142 * block overhead when computing how much free space is left ungranted.
144 * Caller must hold obd_osfs_lock. */
145 obd_size filter_grant_space_left(struct obd_export *exp)
147 struct obd_device *obd = exp->exp_obd;
148 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
149 obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
150 int rc, statfs_done = 0;
152 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
154 if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
156 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
157 cfs_time_current_64() + HZ);
158 if (rc) /* N.B. statfs can't really fail */
163 avail = obd->obd_osfs.os_bavail;
164 left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
165 if (left > GRANT_FOR_LLOG(obd)) {
166 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
168 left = 0 /* << blockbits */;
171 if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
172 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
176 if (left >= tot_granted) {
179 if (left < tot_granted - obd->u.filter.fo_tot_pending) {
180 CERROR("%s: cli %s/%p grant "LPU64" > available "
181 LPU64" and pending "LPU64"\n", obd->obd_name,
182 exp->exp_client_uuid.uuid, exp, tot_granted,
183 left, obd->u.filter.fo_tot_pending);
188 CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
189 " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
190 exp->exp_client_uuid.uuid, exp,
191 obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
192 tot_granted, left, obd->u.filter.fo_tot_pending);
197 /* Calculate how much grant space to allocate to this client, based on how
198 * much space is currently free and how much of that is already granted.
200 * if @conservative != 0, we limit the maximum grant to FILTER_GRANT_CHUNK;
201 * otherwise we'll satisfy the requested amount as possible as we can, this
202 * usually due to client reconnect.
204 * Caller must hold obd_osfs_lock. */
205 long filter_grant(struct obd_export *exp, obd_size current_grant,
206 obd_size want, obd_size fs_space_left, int conservative)
208 struct obd_device *obd = exp->exp_obd;
209 struct filter_export_data *fed = &exp->exp_filter_data;
210 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
213 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
215 /* Grant some fraction of the client's requested grant space so that
216 * they are not always waiting for write credits (not all of it to
217 * avoid overgranting in face of multiple RPCs in flight). This
218 * essentially will be able to control the OSC_MAX_RIF for a client.
220 * If we do have a large disparity between what the client thinks it
221 * has and what we think it has, don't grant very much and let the
222 * client consume its grant first. Either it just has lots of RPCs
223 * in flight, or it was evicted and its grants will soon be used up. */
224 if (want > 0x7fffffff) {
225 CERROR("%s: client %s/%p requesting > 2GB grant "LPU64"\n",
226 obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
227 } else if (current_grant < want &&
228 current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
229 grant = min(want + (1 << blockbits) - 1, fs_space_left / 8);
230 grant &= ~((1ULL << blockbits) - 1);
233 if (grant > FILTER_GRANT_CHUNK && conservative)
234 grant = FILTER_GRANT_CHUNK;
236 obd->u.filter.fo_tot_granted += grant;
237 fed->fed_grant += grant;
238 if (fed->fed_grant < 0) {
239 CERROR("%s: cli %s/%p grant %ld want "LPU64
241 obd->obd_name, exp->exp_client_uuid.uuid,
242 exp, fed->fed_grant, want,current_grant);
243 spin_unlock(&obd->obd_osfs_lock);
250 "%s: cli %s/%p wants: "LPU64" current grant "LPU64
251 " granting: "LPU64"\n", obd->obd_name, exp->exp_client_uuid.uuid,
252 exp, want, current_grant, grant);
254 "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
255 " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
256 exp, obd->u.filter.fo_tot_dirty,
257 obd->u.filter.fo_tot_granted, obd->obd_num_exports);
263 * the routine is used to request pages from pagecache
265 * use GFP_NOFS for requests from a local client not allowing to enter FS
266 * as we might end up waiting on a page he sent in the request we're serving.
267 * use __GFP_HIGHMEM so that the pages can use all of the available memory
269 * use more aggressive GFP_HIGHUSER flags from non-local clients to be able to
270 * generate more memory pressure.
272 * See Bug 19529 and Bug 19917 for details.
274 static struct page * filter_get_page(struct obd_device *obd,
281 page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
282 (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
284 if (unlikely(page == NULL))
285 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
291 * the routine initializes array of local_niobuf from remote_niobuf
293 static int filter_map_remote_to_local(int objcount, struct obd_ioobj *obj,
294 struct niobuf_remote *nb,
295 int *nrpages, struct niobuf_local *res)
297 struct niobuf_remote *rnb;
298 struct niobuf_local *lnb;
302 /* we don't support multiobject RPC yet
303 * ost_brw_read() and ost_brw_write() check this */
304 LASSERT(objcount == 1);
308 for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt; i++, rnb++) {
309 obd_off offset = rnb->offset;
310 unsigned int len = rnb->len;
313 int poff = offset & (CFS_PAGE_SIZE - 1);
314 int plen = CFS_PAGE_SIZE - poff;
316 if (*nrpages >= max) {
317 CERROR("small array of local bufs: %d\n", max);
323 lnb->offset = offset;
325 lnb->flags = rnb->flags;
328 lnb->lnb_grant_used = 0;
330 LASSERTF(plen <= len, "plen %u, len %u\n", plen, len);
341 * Invalidating the pages to get them out of cache doesn't work because
342 * LNET pins the pages. Instead (on newer kernels) the pages are truncated
343 * from the cache, while older kernels (RHEL4 and SLES9) just leave them in
344 * the cache. b=18718/
346 void filter_release_cache(struct obd_device *obd, struct obd_ioobj *obj,
347 struct niobuf_remote *rnb, struct inode *inode)
351 LASSERT(inode != NULL);
352 for (i = 0; i < obj->ioo_bufcnt; i++, rnb++) {
353 #ifdef HAVE_TRUNCATE_RANGE
354 /* remove pages in which range is fit */
355 truncate_inode_pages_range(inode->i_mapping,
356 rnb->offset & CFS_PAGE_MASK,
357 (rnb->offset + rnb->len - 1) |
360 /* use invalidate for old kernels */
361 invalidate_mapping_pages(inode->i_mapping,
362 rnb->offset >> CFS_PAGE_SHIFT,
363 (rnb->offset + rnb->len) >>
369 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
370 int objcount, struct obd_ioobj *obj,
371 struct niobuf_remote *nb,
372 int *pages, struct niobuf_local *res,
373 struct obd_trans_info *oti)
375 struct obd_device *obd = exp->exp_obd;
376 struct timeval start, end;
377 struct lvfs_run_ctxt saved;
378 struct niobuf_local *lnb;
379 struct dentry *dentry = NULL;
380 struct inode *inode = NULL;
382 int rc = 0, i, tot_bytes = 0;
383 unsigned long now = jiffies;
388 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
389 * When we do this function's dentry cleanup will need to be fixed.
390 * These values are verified in ost_brw_write() from the wire. */
391 LASSERTF(objcount == 1, "%d\n", objcount);
392 LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
394 if (oa->o_valid & OBD_MD_FLGRANT) {
395 spin_lock(&obd->obd_osfs_lock);
396 filter_grant_incoming(exp, oa);
398 if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
399 !(oa->o_flags & OBD_FL_SHRINK_GRANT))
401 spin_unlock(&obd->obd_osfs_lock);
404 iobuf = filter_iobuf_get(&obd->u.filter, oti);
406 RETURN(PTR_ERR(iobuf));
408 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
409 dentry = filter_oa2dentry(obd, oa);
410 if (IS_ERR(dentry)) {
411 rc = PTR_ERR(dentry);
416 inode = dentry->d_inode;
417 /* While we are reading i_size only once, it might change after that
418 * while we are still reading, but this is perfectly fine race that
419 * we do not need to care about (bug 20142). */
420 isize = i_size_read(inode);
422 obdo_to_inode(inode, oa, OBD_MD_FLATIME);
424 rc = filter_map_remote_to_local(objcount, obj, nb, pages, res);
428 fsfilt_check_slow(obd, now, "preprw_read setup");
430 /* find pages for all segments, fill array with them */
431 do_gettimeofday(&start);
432 for (i = 0, lnb = res; i < *pages; i++, lnb++) {
434 lnb->dentry = dentry;
436 if (isize <= lnb->offset)
437 /* If there's no more data, abort early. lnb->rc == 0,
438 * so it's easy to detect later. */
441 lnb->page = filter_get_page(obd, inode, lnb->offset, 0);
442 if (lnb->page == NULL)
443 GOTO(cleanup, rc = -ENOMEM);
445 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS, 1);
447 if (isize < lnb->offset + lnb->len - 1)
448 lnb->rc = isize - lnb->offset;
452 tot_bytes += lnb->rc;
454 if (PageUptodate(lnb->page)) {
455 lprocfs_counter_add(obd->obd_stats,
456 LPROC_FILTER_CACHE_HIT, 1);
460 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_MISS, 1);
461 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
463 do_gettimeofday(&end);
464 timediff = cfs_timeval_sub(&end, &start, NULL);
465 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
467 if (OBD_FAIL_CHECK(OBD_FAIL_OST_NOMEM))
468 GOTO(cleanup, rc = -ENOMEM);
470 fsfilt_check_slow(obd, now, "start_page_read");
472 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf,
473 exp, NULL, NULL, NULL);
477 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
478 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
479 lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
480 LPROC_FILTER_READ_BYTES, tot_bytes);
485 /* unlock pages to allow access from concurrent OST_READ */
486 for (i = 0, lnb = res; i < *pages; i++, lnb++) {
488 LASSERT(PageLocked(lnb->page));
489 unlock_page(lnb->page);
492 page_cache_release(lnb->page);
503 filter_iobuf_put(&obd->u.filter, iobuf, oti);
505 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
507 CERROR("io error %d\n", rc);
512 /* When clients have dirtied as much space as they've been granted they
513 * fall through to sync writes. These sync writes haven't been expressed
514 * in grants and need to error with ENOSPC when there isn't room in the
515 * filesystem for them after grants are taken into account. However,
516 * writeback of the dirty data that was already granted space can write
519 * Caller must hold obd_osfs_lock. */
520 static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
521 int objcount, struct fsfilt_objinfo *fso,
522 int niocount, struct niobuf_local *lnb,
523 obd_size *left, struct inode *inode)
525 struct filter_export_data *fed = &exp->exp_filter_data;
526 int blocksize = exp->exp_obd->u.obt.obt_sb->s_blocksize;
527 unsigned long used = 0, ungranted = 0, using;
528 int i, rc = -ENOSPC, obj, n = 0;
531 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
532 (oa->o_flags & OBD_FL_RECOV_RESEND)) {
534 CDEBUG(D_CACHE, "Recoverable resend arrived, skipping "
538 LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
540 for (obj = 0; obj < objcount; obj++) {
541 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
544 /* should match the code in osc_exit_cache */
546 bytes += lnb[n].offset & (blocksize - 1);
547 tmp = (lnb[n].offset + lnb[n].len) & (blocksize - 1);
549 bytes += blocksize - tmp;
551 if ((lnb[n].flags & OBD_BRW_FROM_GRANT) &&
552 (oa->o_valid & OBD_MD_FLGRANT)) {
554 /* this is a recoverable resent */
555 lnb[n].flags |= OBD_BRW_GRANTED;
558 } else if (fed->fed_grant < used + bytes) {
560 "%s: cli %s/%p claims %ld+%d "
561 "GRANT, real grant %lu idx %d\n",
562 exp->exp_obd->obd_name,
563 exp->exp_client_uuid.uuid, exp,
564 used, bytes, fed->fed_grant, n);
567 lnb[n].flags |= OBD_BRW_GRANTED;
568 lnb[n].lnb_grant_used = bytes;
569 CDEBUG(0, "idx %d used=%lu\n", n, used);
574 if (*left > ungranted + bytes) {
575 /* if enough space, pretend it was granted */
577 lnb[n].flags |= OBD_BRW_GRANTED;
578 lnb[n].lnb_grant_used = bytes;
579 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
584 /* We can't check for already-mapped blocks here, as
585 * it requires dropping the osfs lock to do the bmap.
586 * Instead, we return ENOSPC and in that case we need
587 * to go through and verify if all of the blocks not
588 * marked BRW_GRANTED are already mapped and we can
589 * ignore this error. */
591 lnb[n].flags &= ~OBD_BRW_GRANTED;
592 CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
593 exp->exp_obd->obd_name,
594 exp->exp_client_uuid.uuid, exp, n, bytes);
598 /* Now substract what client have used already. We don't subtract
599 * this from the tot_granted yet, so that other client's can't grab
600 * that space before we have actually allocated our blocks. That
601 * happens in filter_grant_commit() after the writes are done. */
603 fed->fed_grant -= used;
604 fed->fed_pending += used + ungranted;
605 exp->exp_obd->u.filter.fo_tot_granted += ungranted;
606 exp->exp_obd->u.filter.fo_tot_pending += used + ungranted;
609 "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
610 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
611 ungranted, fed->fed_grant, fed->fed_dirty);
613 /* Rough calc in case we don't refresh cached statfs data */
614 using = (used + ungranted + 1 ) >>
615 exp->exp_obd->u.obt.obt_sb->s_blocksize_bits;
616 if (exp->exp_obd->obd_osfs.os_bavail > using)
617 exp->exp_obd->obd_osfs.os_bavail -= using;
619 exp->exp_obd->obd_osfs.os_bavail = 0;
621 if (fed->fed_dirty < used) {
622 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
623 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
624 used, fed->fed_dirty);
625 used = fed->fed_dirty;
627 exp->exp_obd->u.filter.fo_tot_dirty -= used;
628 fed->fed_dirty -= used;
630 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
631 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
632 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
633 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
634 spin_unlock(&exp->exp_obd->obd_osfs_lock);
640 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
641 * on mulitple inodes. That isn't all, because there still exists the
642 * possibility of a truncate starting a new transaction while holding the ext3
643 * rwsem = write while some writes (which have started their transactions here)
644 * blocking on the ext3 rwsem = read => lock inversion.
646 * The handling gets very ugly when dealing with locked pages. It may be easier
647 * to just get rid of the locked page code (which has problems of its own) and
648 * either discover we do not need it anymore (i.e. it was a symptom of another
649 * bug) or ensure we get the page locks in an appropriate order. */
650 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
651 int objcount, struct obd_ioobj *obj,
652 struct niobuf_remote *nb, int *pages,
653 struct niobuf_local *res,
654 struct obd_trans_info *oti)
656 struct obd_device *obd = exp->exp_obd;
657 struct timeval start, end;
658 struct lvfs_run_ctxt saved;
659 struct niobuf_local *lnb = res;
660 struct fsfilt_objinfo fso;
661 struct filter_mod_data *fmd;
662 struct dentry *dentry = NULL;
665 unsigned long now = jiffies, timediff;
666 int rc = 0, i, tot_bytes = 0, cleanup_phase = 0, localreq = 0;
668 LASSERT(objcount == 1);
669 LASSERT(obj->ioo_bufcnt > 0);
671 if (exp->exp_connection &&
672 exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
675 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
676 iobuf = filter_iobuf_get(&obd->u.filter, oti);
678 GOTO(cleanup, rc = PTR_ERR(iobuf));
681 dentry = filter_fid2dentry(obd, NULL, obj->ioo_gr, obj->ioo_id);
683 GOTO(cleanup, rc = PTR_ERR(dentry));
686 if (dentry->d_inode == NULL) {
687 if (exp->exp_obd->obd_recovering) {
688 struct obdo *noa = oa;
693 GOTO(recreate_out, rc = -ENOMEM);
694 noa->o_id = obj->ioo_id;
695 noa->o_valid = OBD_MD_FLID;
698 if (filter_recreate(exp->exp_obd, noa) == 0) {
700 dentry = filter_fid2dentry(exp->exp_obd, NULL,
708 if (IS_ERR(dentry) || dentry->d_inode == NULL) {
709 CERROR("%s: BRW to missing obj "LPU64"/"LPU64":rc %d\n",
710 exp->exp_obd->obd_name,
711 obj->ioo_id, obj->ioo_gr,
712 IS_ERR(dentry) ? (int)PTR_ERR(dentry) : -ENOENT);
715 GOTO(cleanup, rc = -ENOENT);
719 rc = filter_map_remote_to_local(objcount, obj, nb, pages, res);
723 fsfilt_check_slow(obd, now, "preprw_write setup");
725 /* Filter truncate first locks i_mutex then partially truncated
726 * page, filter write code first locks pages then take
727 * i_mutex. To avoid a deadlock in case of concurrent
728 * punch/write requests from one client, filter writes and
729 * filter truncates are serialized by i_alloc_sem, allowing
730 * multiple writes or single truncate. */
731 down_read(&dentry->d_inode->i_alloc_sem);
733 /* Don't update inode timestamps if this write is older than a
734 * setattr which modifies the timestamps. b=10150 */
735 /* XXX when we start having persistent reservations this needs to
736 * be changed to filter_fmd_get() to create the fmd if it doesn't
737 * already exist so we can store the reservation handle there. */
738 fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
741 spin_lock(&obd->obd_osfs_lock);
743 filter_grant_incoming(exp, oa);
744 if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
745 oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
748 obdo_to_inode(dentry->d_inode, oa, OBD_MD_FLATIME |
749 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
752 left = filter_grant_space_left(exp);
754 fso.fso_dentry = dentry;
755 fso.fso_bufcnt = *pages;
757 rc = filter_grant_check(exp, oa, objcount, &fso, *pages, res,
758 &left, dentry->d_inode);
760 /* do not zero out oa->o_valid as it is used in filter_commitrw_write()
761 * for setting UID/GID and fid EA in first write time. */
762 /* If OBD_FL_SHRINK_GRANT is set, the client just returned us some grant
763 * so no sense in allocating it some more. We either return the grant
764 * back to the client if we have plenty of space or we don't return
765 * anything if we are short. This was decided in filter_grant_incoming*/
766 if ((oa->o_valid & OBD_MD_FLGRANT) &&
767 (!(oa->o_valid & OBD_MD_FLFLAGS) ||
768 !(oa->o_flags & OBD_FL_SHRINK_GRANT)))
769 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
772 spin_unlock(&obd->obd_osfs_lock);
773 filter_fmd_put(exp, fmd);
775 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK2, (obd_timeout + 1) / 4);
781 do_gettimeofday(&start);
782 for (i = 0, lnb = res; i < *pages; i++, lnb++) {
784 /* We still set up for ungranted pages so that granted pages
785 * can be written to disk as they were promised, and portals
786 * needs to keep the pages all aligned properly. */
787 lnb->dentry = dentry;
789 lnb->page = filter_get_page(obd, dentry->d_inode, lnb->offset,
791 if (lnb->page == NULL)
792 GOTO(cleanup, rc = -ENOMEM);
794 /* DLM locking protects us from write and truncate competing
795 * for same region, but truncate can leave dirty page in the
796 * cache. it's possible the writeout on a such a page is in
797 * progress when we access it. it's also possible that during
798 * this writeout we put new (partial) data, but then won't
799 * be able to proceed in filter_commitrw_write(). thus let's
800 * just wait for writeout completion, should be rare enough.
802 wait_on_page_writeback(lnb->page);
803 BUG_ON(PageWriteback(lnb->page));
805 /* If the filter writes a partial page, then has the file
806 * extended, the client will read in the whole page. the
807 * filter has to be careful to zero the rest of the partial
808 * page on disk. we do it by hand for partial extending
809 * writes, send_bio() is responsible for zeroing pages when
810 * asked to read unmapped blocks -- brw_kiovec() does this. */
811 if (lnb->len != CFS_PAGE_SIZE) {
814 maxidx = ((i_size_read(dentry->d_inode) +
815 CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
816 if (maxidx >= lnb->page->index) {
817 LL_CDEBUG_PAGE(D_PAGE, lnb->page, "write %u @ "
818 LPU64" flg %x before EOF %llu\n",
819 lnb->len, lnb->offset,lnb->flags,
820 i_size_read(dentry->d_inode));
821 filter_iobuf_add_page(obd, iobuf,
826 char *p = kmap(lnb->page);
828 off = lnb->offset & ~CFS_PAGE_MASK;
831 off = (lnb->offset + lnb->len) & ~CFS_PAGE_MASK;
833 memset(p + off, 0, CFS_PAGE_SIZE - off);
838 tot_bytes += lnb->len;
840 do_gettimeofday(&end);
841 timediff = cfs_timeval_sub(&end, &start, NULL);
842 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
844 if (OBD_FAIL_CHECK(OBD_FAIL_OST_NOMEM))
845 GOTO(cleanup, rc = -ENOMEM);
847 /* don't unlock pages to prevent any access */
848 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
851 fsfilt_check_slow(obd, now, "start_page_write");
853 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
855 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
856 lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
857 LPROC_FILTER_WRITE_BYTES, tot_bytes);
860 switch(cleanup_phase) {
863 for (i = 0, lnb = res; i < *pages; i++, lnb++) {
864 if (lnb->page != NULL) {
865 unlock_page(lnb->page);
866 page_cache_release(lnb->page);
870 filter_grant_commit(exp, *pages, res);
874 up_read(&dentry->d_inode->i_alloc_sem);
876 filter_iobuf_put(&obd->u.filter, iobuf, oti);
878 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
883 filter_iobuf_put(&obd->u.filter, iobuf, oti);
885 spin_lock(&obd->obd_osfs_lock);
887 filter_grant_incoming(exp, oa);
888 spin_unlock(&obd->obd_osfs_lock);
889 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
896 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
897 int objcount, struct obd_ioobj *obj,
898 struct niobuf_remote *nb, int *pages,
899 struct niobuf_local *res, struct obd_trans_info *oti)
901 if (cmd == OBD_BRW_WRITE)
902 return filter_preprw_write(cmd, exp, oa, objcount, obj,
903 nb, pages, res, oti);
904 if (cmd == OBD_BRW_READ)
905 return filter_preprw_read(cmd, exp, oa, objcount, obj,
906 nb, pages, res, oti);
911 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
912 int objcount, struct obd_ioobj *obj,
913 struct niobuf_remote *rnb,
914 int pages, struct niobuf_local *res,
915 struct obd_trans_info *oti, int rc)
917 struct filter_obd *fo = &exp->exp_obd->u.filter;
918 struct inode *inode = NULL;
919 struct ldlm_res_id res_id = { .name = { obj->ioo_id } };
920 struct ldlm_resource *resource = NULL;
921 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
922 struct niobuf_local *lnb;
926 /* If oa != NULL then filter_preprw_read updated the inode atime
927 * and we should update the lvb so that other glimpses will also
928 * get the updated value. bug 5972 */
929 if (oa && ns && ns->ns_lvbo && ns->ns_lvbo->lvbo_update) {
930 resource = ldlm_resource_get(ns, NULL, res_id, LDLM_EXTENT, 0);
932 if (resource != NULL) {
933 ns->ns_lvbo->lvbo_update(resource, NULL, 0, 1);
934 ldlm_resource_putref(resource);
938 if (res->dentry != NULL)
939 inode = res->dentry->d_inode;
941 for (i = 0, lnb = res; i < pages; i++, lnb++) {
942 if (lnb->page != NULL) {
943 page_cache_release(lnb->page);
948 if (inode && (fo->fo_read_cache == 0 ||
949 i_size_read(inode) > fo->fo_readcache_max_filesize))
950 filter_release_cache(exp->exp_obd, obj, rnb, inode);
952 if (res->dentry != NULL)
957 void filter_grant_commit(struct obd_export *exp, int niocount,
958 struct niobuf_local *res)
960 struct filter_obd *filter = &exp->exp_obd->u.filter;
961 struct niobuf_local *lnb = res;
962 unsigned long pending = 0;
965 spin_lock(&exp->exp_obd->obd_osfs_lock);
966 for (i = 0, lnb = res; i < niocount; i++, lnb++)
967 pending += lnb->lnb_grant_used;
969 LASSERTF(exp->exp_filter_data.fed_pending >= pending,
970 "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
971 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
972 exp->exp_filter_data.fed_pending, pending);
973 exp->exp_filter_data.fed_pending -= pending;
974 LASSERTF(filter->fo_tot_granted >= pending,
975 "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
976 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
977 exp->exp_obd->u.filter.fo_tot_granted, pending);
978 filter->fo_tot_granted -= pending;
979 LASSERTF(filter->fo_tot_pending >= pending,
980 "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
981 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
982 filter->fo_tot_pending, pending);
983 filter->fo_tot_pending -= pending;
985 spin_unlock(&exp->exp_obd->obd_osfs_lock);
988 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
989 int objcount, struct obd_ioobj *obj,
990 struct niobuf_remote *nb, int pages,
991 struct niobuf_local *res, struct obd_trans_info *oti,
994 if (cmd == OBD_BRW_WRITE)
995 return filter_commitrw_write(exp, oa, objcount, obj,
996 nb, pages, res, oti, rc);
997 if (cmd == OBD_BRW_READ)
998 return filter_commitrw_read(exp, oa, objcount, obj,
999 nb, pages, res, oti, rc);
1004 int filter_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1005 obd_count oa_bufs, struct brw_page *pga,
1006 struct obd_trans_info *oti)
1008 struct obd_ioobj ioo;
1009 struct niobuf_local *lnb;
1010 struct niobuf_remote *rnb;
1012 int ret = 0, npages;
1015 OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
1016 OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
1018 if (lnb == NULL || rnb == NULL)
1019 GOTO(out, ret = -ENOMEM);
1021 for (i = 0; i < oa_bufs; i++) {
1022 lnb[i].page = pga[i].pg;
1023 rnb[i].offset = pga[i].off;
1024 rnb[i].len = pga[i].count;
1025 lnb[i].flags = rnb[i].flags = pga[i].flag;
1028 obdo_to_ioobj(oinfo->oi_oa, &ioo);
1029 ioo.ioo_bufcnt = oa_bufs;
1032 ret = filter_preprw(cmd, exp, oinfo->oi_oa, 1, &ioo,
1033 rnb, &npages, lnb, oti);
1036 LASSERTF(oa_bufs == npages, "%u != %u\n", oa_bufs, npages);
1038 ret = filter_commitrw(cmd, exp, oinfo->oi_oa, 1, &ioo, rnb,
1039 npages, lnb, oti, ret);
1043 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
1045 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));