1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdfilter/filter_io.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_FILTER
45 #ifndef AUTOCONF_INCLUDED
46 #include <linux/config.h>
48 #include <linux/module.h>
49 #include <linux/pagemap.h> // XXX kill me soon
50 #include <linux/version.h>
52 #include <obd_class.h>
54 #include <lustre_fsfilt.h>
55 #include "filter_internal.h"
57 int *obdfilter_created_scratchpad;
59 /* Grab the dirty and seen grant announcements from the incoming obdo.
60 * We will later calculate the clients new grant and return it.
61 * Caller must hold osfs lock */
62 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
64 struct filter_export_data *fed;
65 struct obd_device *obd = exp->exp_obd;
68 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
70 if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
71 (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
72 oa->o_valid &= ~OBD_MD_FLGRANT;
77 fed = &exp->exp_filter_data;
79 /* Add some margin, since there is a small race if other RPCs arrive
80 * out-or-order and have already consumed some grant. We want to
81 * leave this here in case there is a large error in accounting. */
83 "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
84 obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
85 oa->o_dropped, fed->fed_grant);
87 /* Update our accounting now so that statfs takes it into account.
88 * Note that fed_dirty is only approximate and can become incorrect
89 * if RPCs arrive out-of-order. No important calculations depend
90 * on fed_dirty however, but we must check sanity to not assert. */
91 if ((long long)oa->o_dirty < 0)
93 else if (oa->o_dirty > fed->fed_grant + 4 * FILTER_GRANT_CHUNK)
94 oa->o_dirty = fed->fed_grant + 4 * FILTER_GRANT_CHUNK;
95 obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
96 if (fed->fed_grant < oa->o_dropped) {
97 CDEBUG(D_CACHE,"%s: cli %s/%p reports %u dropped > grant %lu\n",
98 obd->obd_name, exp->exp_client_uuid.uuid, exp,
99 oa->o_dropped, fed->fed_grant);
102 if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
103 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
104 obd->obd_name, exp->exp_client_uuid.uuid, exp,
105 oa->o_dropped, obd->u.filter.fo_tot_granted);
108 obd->u.filter.fo_tot_granted -= oa->o_dropped;
109 fed->fed_grant -= oa->o_dropped;
110 fed->fed_dirty = oa->o_dirty;
111 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
112 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
113 obd->obd_name, exp->exp_client_uuid.uuid, exp,
114 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
115 spin_unlock(&obd->obd_osfs_lock);
121 /* Figure out how much space is available between what we've granted
122 * and what remains in the filesystem. Compensate for ext3 indirect
123 * block overhead when computing how much free space is left ungranted.
125 * Caller must hold obd_osfs_lock. */
126 obd_size filter_grant_space_left(struct obd_export *exp)
128 struct obd_device *obd = exp->exp_obd;
129 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
130 obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
131 int rc, statfs_done = 0;
133 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
135 if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
137 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
138 cfs_time_current_64() + HZ);
139 if (rc) /* N.B. statfs can't really fail */
144 avail = obd->obd_osfs.os_bavail;
145 left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
146 if (left > GRANT_FOR_LLOG(obd)) {
147 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
149 left = 0 /* << blockbits */;
152 if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
153 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
157 if (left >= tot_granted) {
160 if (left < tot_granted - obd->u.filter.fo_tot_pending) {
161 CERROR("%s: cli %s/%p grant "LPU64" > available "
162 LPU64" and pending "LPU64"\n", obd->obd_name,
163 exp->exp_client_uuid.uuid, exp, tot_granted,
164 left, obd->u.filter.fo_tot_pending);
169 CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
170 " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
171 exp->exp_client_uuid.uuid, exp,
172 obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
173 tot_granted, left, obd->u.filter.fo_tot_pending);
178 /* Calculate how much grant space to allocate to this client, based on how
179 * much space is currently free and how much of that is already granted.
181 * Caller must hold obd_osfs_lock. */
182 long filter_grant(struct obd_export *exp, obd_size current_grant,
183 obd_size want, obd_size fs_space_left)
185 struct obd_device *obd = exp->exp_obd;
186 struct filter_export_data *fed = &exp->exp_filter_data;
187 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
190 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
192 /* Grant some fraction of the client's requested grant space so that
193 * they are not always waiting for write credits (not all of it to
194 * avoid overgranting in face of multiple RPCs in flight). This
195 * essentially will be able to control the OSC_MAX_RIF for a client.
197 * If we do have a large disparity between what the client thinks it
198 * has and what we think it has, don't grant very much and let the
199 * client consume its grant first. Either it just has lots of RPCs
200 * in flight, or it was evicted and its grants will soon be used up. */
201 if (want > 0x7fffffff) {
202 CERROR("%s: client %s/%p requesting > 2GB grant "LPU64"\n",
203 obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
204 } else if (current_grant < want &&
205 current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
206 grant = min((want >> blockbits),
207 (fs_space_left >> blockbits) / 8);
211 /* Allow >FILTER_GRANT_CHUNK size when clients
212 * reconnect due to a server reboot.
214 if ((grant > FILTER_GRANT_CHUNK) &&
215 (!obd->obd_recovering))
216 grant = FILTER_GRANT_CHUNK;
218 obd->u.filter.fo_tot_granted += grant;
219 fed->fed_grant += grant;
220 if (fed->fed_grant < 0) {
221 CERROR("%s: cli %s/%p grant %ld want "LPU64
223 obd->obd_name, exp->exp_client_uuid.uuid,
224 exp, fed->fed_grant, want,current_grant);
225 spin_unlock(&obd->obd_osfs_lock);
232 "%s: cli %s/%p wants: "LPU64" current grant "LPU64
233 " granting: "LPU64"\n", obd->obd_name, exp->exp_client_uuid.uuid,
234 exp, want, current_grant, grant);
236 "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
237 " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
238 exp, obd->u.filter.fo_tot_dirty,
239 obd->u.filter.fo_tot_granted, obd->obd_num_exports);
245 * the routine is used to request pages from pagecache
247 * use GFP_NOFS not allowing to enter FS as the client can run on this node
248 * and we might end waiting on a page he sent in the request we're serving.
250 * use NORETRY so that the allocator doesn't go crazy: chance to more lucky
251 * thread have enough memory to complete his request. for our request client
252 * will do resend hopefully -bzzz
254 static struct page * filter_get_page(struct obd_device *obd,
260 page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
261 GFP_NOFS | __GFP_NORETRY);
262 if (unlikely(page == NULL))
263 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
269 * the routine initializes array of local_niobuf from remote_niobuf
271 static int filter_map_remote_to_local(int objcount, struct obd_ioobj *obj,
272 struct niobuf_remote *nb,
273 int *nrpages, struct niobuf_local *res)
275 struct niobuf_remote *rnb;
276 struct niobuf_local *lnb;
280 /* we don't support multiobject RPC yet
281 * ost_brw_read() and ost_brw_write() check this */
282 LASSERT(objcount == 1);
286 for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt; i++, rnb++) {
287 obd_off offset = rnb->offset;
288 unsigned int len = rnb->len;
291 int poff = offset & (CFS_PAGE_SIZE - 1);
292 int plen = CFS_PAGE_SIZE - poff;
294 if (*nrpages >= max) {
295 CERROR("small array of local bufs: %d\n", max);
301 lnb->offset = offset;
303 lnb->flags = rnb->flags;
306 lnb->lnb_grant_used = 0;
308 LASSERTF(plen <= len, "plen %u, len %u\n", plen, len);
319 * the function is used to free all pages used for request
320 * just to mimic cacheless OSS which don't occupy much memory
322 void filter_invalidate_cache(struct obd_device *obd, struct obd_ioobj *obj,
323 struct niobuf_remote *nb, struct inode *inode)
325 struct niobuf_remote *rnb;
328 LASSERT(inode != NULL);
330 for (i = 0, rnb = nb; i < obj->ioo_bufcnt; i++, rnb++) {
334 start = rnb->offset >> CFS_PAGE_SHIFT;
335 end = (rnb->offset + rnb->len) >> CFS_PAGE_SHIFT;
336 invalidate_mapping_pages(inode->i_mapping, start, end);
337 /* just to avoid warnings */
343 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
344 int objcount, struct obd_ioobj *obj,
345 struct niobuf_remote *nb,
346 int *npages, struct niobuf_local *res,
347 struct obd_trans_info *oti,
348 struct lustre_capa *capa)
350 struct obd_device *obd = exp->exp_obd;
351 struct timeval start, end;
352 struct lvfs_run_ctxt saved;
353 struct niobuf_local *lnb;
354 struct dentry *dentry = NULL;
355 struct inode *inode = NULL;
357 int rc = 0, i, tot_bytes = 0;
358 unsigned long now = jiffies;
362 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
363 * When we do this function's dentry cleanup will need to be fixed.
364 * These values are verified in ost_brw_write() from the wire. */
365 LASSERTF(objcount == 1, "%d\n", objcount);
366 LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
368 rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
373 if (oa && oa->o_valid & OBD_MD_FLGRANT) {
374 spin_lock(&obd->obd_osfs_lock);
375 filter_grant_incoming(exp, oa);
378 spin_unlock(&obd->obd_osfs_lock);
381 iobuf = filter_iobuf_get(&obd->u.filter, oti);
383 RETURN(PTR_ERR(iobuf));
385 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
386 dentry = filter_oa2dentry(obd, oa);
387 if (IS_ERR(dentry)) {
388 rc = PTR_ERR(dentry);
393 inode = dentry->d_inode;
395 obdo_to_inode(inode, oa, OBD_MD_FLATIME);
397 rc = filter_map_remote_to_local(objcount, obj, nb, npages, res);
401 fsfilt_check_slow(obd, now, "preprw_read setup");
403 /* find pages for all segments, fill array with them */
404 do_gettimeofday(&start);
405 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
407 lnb->dentry = dentry;
409 if (i_size_read(inode) <= lnb->offset)
410 /* If there's no more data, abort early. lnb->rc == 0,
411 * so it's easy to detect later. */
414 lnb->page = filter_get_page(obd, inode, lnb->offset);
415 if (lnb->page == NULL)
416 GOTO(cleanup, rc = -ENOMEM);
418 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS, 1);
420 if (i_size_read(inode) < lnb->offset + lnb->len - 1)
421 lnb->rc = i_size_read(inode) - lnb->offset;
425 tot_bytes += lnb->rc;
427 if (PageUptodate(lnb->page)) {
428 lprocfs_counter_add(obd->obd_stats,
429 LPROC_FILTER_CACHE_HIT, 1);
433 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_MISS, 1);
434 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
436 do_gettimeofday(&end);
437 timediff = cfs_timeval_sub(&end, &start, NULL);
438 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
440 if (OBD_FAIL_CHECK(OBD_FAIL_OST_NOMEM))
441 GOTO(cleanup, rc = -ENOMEM);
443 fsfilt_check_slow(obd, now, "start_page_read");
445 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf,
446 exp, NULL, NULL, NULL);
450 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
452 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
453 lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
454 LPROC_FILTER_READ_BYTES, tot_bytes);
459 /* unlock pages to allow access from concurrent OST_READ */
460 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
462 LASSERT(PageLocked(lnb->page));
463 unlock_page(lnb->page);
466 page_cache_release(lnb->page);
477 filter_iobuf_put(&obd->u.filter, iobuf, oti);
479 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
481 CERROR("io error %d\n", rc);
486 /* When clients have dirtied as much space as they've been granted they
487 * fall through to sync writes. These sync writes haven't been expressed
488 * in grants and need to error with ENOSPC when there isn't room in the
489 * filesystem for them after grants are taken into account. However,
490 * writeback of the dirty data that was already granted space can write
493 * Caller must hold obd_osfs_lock. */
494 static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
495 int objcount, struct fsfilt_objinfo *fso,
496 int niocount, struct niobuf_local *lnb,
497 obd_size *left, struct inode *inode)
499 struct filter_export_data *fed = &exp->exp_filter_data;
500 int blocksize = exp->exp_obd->u.obt.obt_sb->s_blocksize;
501 unsigned long used = 0, ungranted = 0, using;
502 int i, rc = -ENOSPC, obj, n = 0;
504 LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
506 for (obj = 0; obj < objcount; obj++) {
507 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
510 /* should match the code in osc_exit_cache */
512 bytes += lnb[n].offset & (blocksize - 1);
513 tmp = (lnb[n].offset + lnb[n].len) & (blocksize - 1);
515 bytes += blocksize - tmp;
517 if ((lnb[n].flags & OBD_BRW_FROM_GRANT) &&
518 (oa->o_valid & OBD_MD_FLGRANT)) {
519 if (fed->fed_grant < used + bytes) {
521 "%s: cli %s/%p claims %ld+%d "
522 "GRANT, real grant %lu idx %d\n",
523 exp->exp_obd->obd_name,
524 exp->exp_client_uuid.uuid, exp,
525 used, bytes, fed->fed_grant, n);
528 lnb[n].flags |= OBD_BRW_GRANTED;
529 lnb[n].lnb_grant_used = bytes;
530 CDEBUG(0, "idx %d used=%lu\n", n, used);
535 if (*left > ungranted + bytes) {
536 /* if enough space, pretend it was granted */
538 lnb[n].flags |= OBD_BRW_GRANTED;
539 lnb[n].lnb_grant_used = bytes;
540 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
545 /* We can't check for already-mapped blocks here, as
546 * it requires dropping the osfs lock to do the bmap.
547 * Instead, we return ENOSPC and in that case we need
548 * to go through and verify if all of the blocks not
549 * marked BRW_GRANTED are already mapped and we can
550 * ignore this error. */
552 lnb[n].flags &= ~OBD_BRW_GRANTED;
553 CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
554 exp->exp_obd->obd_name,
555 exp->exp_client_uuid.uuid, exp, n, bytes);
559 /* Now substract what client have used already. We don't subtract
560 * this from the tot_granted yet, so that other client's can't grab
561 * that space before we have actually allocated our blocks. That
562 * happens in filter_grant_commit() after the writes are done. */
564 fed->fed_grant -= used;
565 fed->fed_pending += used + ungranted;
566 exp->exp_obd->u.filter.fo_tot_granted += ungranted;
567 exp->exp_obd->u.filter.fo_tot_pending += used + ungranted;
570 "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
571 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
572 ungranted, fed->fed_grant, fed->fed_dirty);
574 /* Rough calc in case we don't refresh cached statfs data */
575 using = (used + ungranted + 1 ) >>
576 exp->exp_obd->u.obt.obt_sb->s_blocksize_bits;
577 if (exp->exp_obd->obd_osfs.os_bavail > using)
578 exp->exp_obd->obd_osfs.os_bavail -= using;
580 exp->exp_obd->obd_osfs.os_bavail = 0;
582 if (fed->fed_dirty < used) {
583 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
584 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
585 used, fed->fed_dirty);
586 used = fed->fed_dirty;
588 exp->exp_obd->u.filter.fo_tot_dirty -= used;
589 fed->fed_dirty -= used;
591 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
592 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
593 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
594 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
595 spin_unlock(&exp->exp_obd->obd_osfs_lock);
601 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
602 * on mulitple inodes. That isn't all, because there still exists the
603 * possibility of a truncate starting a new transaction while holding the ext3
604 * rwsem = write while some writes (which have started their transactions here)
605 * blocking on the ext3 rwsem = read => lock inversion.
607 * The handling gets very ugly when dealing with locked pages. It may be easier
608 * to just get rid of the locked page code (which has problems of its own) and
609 * either discover we do not need it anymore (i.e. it was a symptom of another
610 * bug) or ensure we get the page locks in an appropriate order. */
611 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
612 int objcount, struct obd_ioobj *obj,
613 struct niobuf_remote *nb, int *npages,
614 struct niobuf_local *res,
615 struct obd_trans_info *oti,
616 struct lustre_capa *capa)
618 struct obd_device *obd = exp->exp_obd;
619 struct timeval start, end;
620 struct lvfs_run_ctxt saved;
621 struct niobuf_local *lnb = res;
622 struct fsfilt_objinfo fso;
623 struct filter_mod_data *fmd;
624 struct dentry *dentry = NULL;
627 unsigned long now = jiffies, timediff;
628 int rc = 0, i, tot_bytes = 0, cleanup_phase = 0;
630 LASSERT(objcount == 1);
631 LASSERT(obj->ioo_bufcnt > 0);
633 rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
638 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
639 iobuf = filter_iobuf_get(&exp->exp_obd->u.filter, oti);
641 GOTO(cleanup, rc = PTR_ERR(iobuf));
644 dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
647 GOTO(cleanup, rc = PTR_ERR(dentry));
650 if (dentry->d_inode == NULL) {
651 CERROR("%s: trying to BRW to non-existent file "LPU64"\n",
652 exp->exp_obd->obd_name, obj->ioo_id);
653 GOTO(cleanup, rc = -ENOENT);
656 if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID) &&
657 dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) {
658 rc = filter_capa_fixoa(exp, oa, obdo_mdsno(oa), capa);
663 rc = filter_map_remote_to_local(objcount, obj, nb, npages, res);
667 fsfilt_check_slow(exp->exp_obd, now, "preprw_write setup");
669 /* Don't update inode timestamps if this write is older than a
670 * setattr which modifies the timestamps. b=10150 */
671 /* XXX when we start having persistent reservations this needs to
672 * be changed to filter_fmd_get() to create the fmd if it doesn't
673 * already exist so we can store the reservation handle there. */
674 fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
677 spin_lock(&exp->exp_obd->obd_osfs_lock);
678 filter_grant_incoming(exp, oa);
679 if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
680 oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
683 obdo_to_inode(dentry->d_inode, oa, OBD_MD_FLATIME |
684 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
687 left = filter_grant_space_left(exp);
689 fso.fso_dentry = dentry;
690 fso.fso_bufcnt = *npages;
692 rc = filter_grant_check(exp, oa, objcount, &fso, *npages, res,
693 &left, dentry->d_inode);
695 /* do not zero out oa->o_valid as it is used in filter_commitrw_write()
696 * for setting UID/GID and fid EA in first write time. */
697 if (oa->o_valid & OBD_MD_FLGRANT)
698 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
700 spin_unlock(&exp->exp_obd->obd_osfs_lock);
701 filter_fmd_put(exp, fmd);
706 do_gettimeofday(&start);
707 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
709 /* We still set up for ungranted pages so that granted pages
710 * can be written to disk as they were promised, and portals
711 * needs to keep the pages all aligned properly. */
712 lnb->dentry = dentry;
714 lnb->page = filter_get_page(obd, dentry->d_inode, lnb->offset);
715 if (lnb->page == NULL)
716 GOTO(cleanup, rc = -ENOMEM);
719 /* DLM locking protects us from write and truncate competing
720 * for same region, but truncate can leave dirty page in the
721 * cache. it's possible the writeout on a such a page is in
722 * progress when we access it. it's also possible that during
723 * this writeout we put new (partial) data, but then won't
724 * be able to proceed in filter_commitrw_write(). thus let's
725 * just wait for writeout completion, should be rare enough.
727 if (obd->u.filter.fo_writethrough_cache)
728 wait_on_page_writeback(lnb->page);
729 BUG_ON(PageWriteback(lnb->page));
731 /* If the filter writes a partial page, then has the file
732 * extended, the client will read in the whole page. the
733 * filter has to be careful to zero the rest of the partial
734 * page on disk. we do it by hand for partial extending
735 * writes, send_bio() is responsible for zeroing pages when
736 * asked to read unmapped blocks -- brw_kiovec() does this. */
737 if (lnb->len != CFS_PAGE_SIZE) {
740 maxidx = ((i_size_read(dentry->d_inode) +
741 CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
742 if (maxidx >= lnb->page->index) {
743 LL_CDEBUG_PAGE(D_PAGE, lnb->page, "write %u @ "
744 LPU64" flg %x before EOF %llu\n",
745 lnb->len, lnb->offset,lnb->flags,
746 i_size_read(dentry->d_inode));
747 filter_iobuf_add_page(exp->exp_obd, iobuf,
752 char *p = kmap(lnb->page);
754 off = lnb->offset & ~CFS_PAGE_MASK;
757 off = (lnb->offset + lnb->len) & ~CFS_PAGE_MASK;
759 memset(p + off, 0, CFS_PAGE_SIZE - off);
764 tot_bytes += lnb->len;
766 do_gettimeofday(&end);
767 timediff = cfs_timeval_sub(&end, &start, NULL);
768 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
770 if (OBD_FAIL_CHECK(OBD_FAIL_OST_NOMEM))
771 GOTO(cleanup, rc = -ENOMEM);
773 /* don't unlock pages to prevent any access */
774 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
777 fsfilt_check_slow(exp->exp_obd, now, "start_page_write");
779 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
780 lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
781 LPROC_FILTER_WRITE_BYTES, tot_bytes);
784 switch(cleanup_phase) {
787 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
788 if (lnb->page != NULL) {
789 unlock_page(lnb->page);
790 page_cache_release(lnb->page);
796 filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
798 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
803 filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
805 spin_lock(&exp->exp_obd->obd_osfs_lock);
807 filter_grant_incoming(exp, oa);
808 spin_unlock(&exp->exp_obd->obd_osfs_lock);
809 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
816 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
817 int objcount, struct obd_ioobj *obj,
818 struct niobuf_remote *nb, int *npages,
819 struct niobuf_local *res, struct obd_trans_info *oti,
820 struct lustre_capa *capa)
822 if (cmd == OBD_BRW_WRITE)
823 return filter_preprw_write(cmd, exp, oa, objcount, obj,
824 nb, npages, res, oti, capa);
825 if (cmd == OBD_BRW_READ)
826 return filter_preprw_read(cmd, exp, oa, objcount, obj,
827 nb, npages, res, oti, capa);
832 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
833 int objcount, struct obd_ioobj *obj,
834 struct niobuf_remote *rnb,
835 int npages, struct niobuf_local *res,
836 struct obd_trans_info *oti, int rc)
838 struct filter_obd *fo = &exp->exp_obd->u.filter;
839 struct inode *inode = NULL;
840 struct ldlm_res_id res_id;
841 struct ldlm_resource *resource = NULL;
842 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
843 struct niobuf_local *lnb;
847 osc_build_res_name(obj->ioo_id, obj->ioo_gr, &res_id);
848 /* If oa != NULL then filter_preprw_read updated the inode atime
849 * and we should update the lvb so that other glimpses will also
850 * get the updated value. bug 5972 */
851 if (oa && ns && ns->ns_lvbo && ns->ns_lvbo->lvbo_update) {
852 resource = ldlm_resource_get(ns, NULL, &res_id, LDLM_EXTENT, 0);
854 if (resource != NULL) {
855 LDLM_RESOURCE_ADDREF(resource);
856 ns->ns_lvbo->lvbo_update(resource, NULL, 0, 1);
857 LDLM_RESOURCE_DELREF(resource);
858 ldlm_resource_putref(resource);
862 if (res->dentry != NULL)
863 inode = res->dentry->d_inode;
865 for (i = 0, lnb = res; i < npages; i++, lnb++) {
866 if (lnb->page != NULL) {
867 page_cache_release(lnb->page);
872 if (inode && (fo->fo_read_cache == 0 ||
873 i_size_read(inode) > fo->fo_readcache_max_filesize))
874 filter_invalidate_cache(exp->exp_obd, obj, rnb, inode);
876 if (res->dentry != NULL)
881 void filter_grant_commit(struct obd_export *exp, int niocount,
882 struct niobuf_local *res)
884 struct filter_obd *filter = &exp->exp_obd->u.filter;
885 struct niobuf_local *lnb = res;
886 unsigned long pending = 0;
889 spin_lock(&exp->exp_obd->obd_osfs_lock);
890 for (i = 0, lnb = res; i < niocount; i++, lnb++)
891 pending += lnb->lnb_grant_used;
893 LASSERTF(exp->exp_filter_data.fed_pending >= pending,
894 "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
895 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
896 exp->exp_filter_data.fed_pending, pending);
897 exp->exp_filter_data.fed_pending -= pending;
898 LASSERTF(filter->fo_tot_granted >= pending,
899 "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
900 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
901 exp->exp_obd->u.filter.fo_tot_granted, pending);
902 filter->fo_tot_granted -= pending;
903 LASSERTF(filter->fo_tot_pending >= pending,
904 "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
905 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
906 filter->fo_tot_pending, pending);
907 filter->fo_tot_pending -= pending;
909 spin_unlock(&exp->exp_obd->obd_osfs_lock);
912 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
913 int objcount, struct obd_ioobj *obj,
914 struct niobuf_remote *nb, int npages,
915 struct niobuf_local *res, struct obd_trans_info *oti,
918 if (cmd == OBD_BRW_WRITE)
919 return filter_commitrw_write(exp, oa, objcount, obj,
920 nb, npages, res, oti, rc);
921 if (cmd == OBD_BRW_READ)
922 return filter_commitrw_read(exp, oa, objcount, obj,
923 nb, npages, res, oti, rc);
928 int filter_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
929 obd_count oa_bufs, struct brw_page *pga,
930 struct obd_trans_info *oti)
932 struct obd_ioobj ioo;
933 struct niobuf_local *lnb;
934 struct niobuf_remote *rnb;
939 OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
940 OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
942 if (lnb == NULL || rnb == NULL)
943 GOTO(out, ret = -ENOMEM);
945 for (i = 0; i < oa_bufs; i++) {
946 lnb[i].page = pga[i].pg;
947 rnb[i].offset = pga[i].off;
948 rnb[i].len = pga[i].count;
951 obdo_to_ioobj(oinfo->oi_oa, &ioo);
952 ioo.ioo_bufcnt = oa_bufs;
955 ret = filter_preprw(cmd, exp, oinfo->oi_oa, 1, &ioo,
956 rnb, &npages, lnb, oti, oinfo_capa(oinfo));
959 LASSERTF(oa_bufs == npages, "%u != %u\n", oa_bufs, npages);
961 ret = filter_commitrw(cmd, exp, oinfo->oi_oa, 1, &ioo, rnb,
962 npages, lnb, oti, ret);
966 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
968 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));