1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter_io.c
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
11 * This file is part of the Lustre file system, http://www.lustre.org
12 * Lustre is a trademark of Cluster File Systems, Inc.
14 * You may have signed or agreed to another license before downloading
15 * this software. If so, you are bound by the terms and conditions
16 * of that agreement, and the following does not apply to you. See the
17 * LICENSE file included with this distribution for more information.
19 * If you did not agree to a different license, then this copy of Lustre
20 * is open source software; you can redistribute it and/or modify it
21 * under the terms of version 2 of the GNU General Public License as
22 * published by the Free Software Foundation.
24 * In either case, Lustre is distributed in the hope that it will be
25 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
26 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * license text for more details.
30 #define DEBUG_SUBSYSTEM S_FILTER
32 #ifndef AUTOCONF_INCLUDED
33 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/pagemap.h> // XXX kill me soon
37 #include <linux/version.h>
39 #include <obd_class.h>
40 #include <lustre_fsfilt.h>
41 #include "filter_internal.h"
43 int *obdfilter_created_scratchpad;
45 static int filter_alloc_dio_page(struct obd_device *obd, struct inode *inode,
46 struct niobuf_local *lnb)
50 LASSERT(lnb->page != NULL);
54 POISON_PAGE(page, 0xf1);
55 if (lnb->len != CFS_PAGE_SIZE) {
56 memset(kmap(page) + lnb->len, 0, CFS_PAGE_SIZE - lnb->len);
60 page->index = lnb->offset >> CFS_PAGE_SHIFT;
65 static void filter_free_dio_pages(int objcount, struct obd_ioobj *obj,
66 int niocount, struct niobuf_local *res)
70 for (i = 0; i < objcount; i++, obj++) {
71 for (j = 0 ; j < obj->ioo_bufcnt ; j++, res++)
76 /* Grab the dirty and seen grant announcements from the incoming obdo.
77 * We will later calculate the clients new grant and return it.
78 * Caller must hold osfs lock */
79 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
81 struct filter_export_data *fed;
82 struct obd_device *obd = exp->exp_obd;
83 static unsigned long last_msg;
84 static int last_count;
88 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
90 if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
91 (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
92 oa->o_valid &= ~OBD_MD_FLGRANT;
97 fed = &exp->exp_filter_data;
99 /* Don't print this to the console the first time it happens, since
100 * it can happen legitimately on occasion, but only rarely. */
101 if (time_after(jiffies, last_msg + 60 * HZ)) {
105 if ((last_count & (-last_count)) == last_count)
106 mask = D_HA /* until bug 3273 is fixed D_WARNING */;
109 /* Add some margin, since there is a small race if other RPCs arrive
110 * out-or-order and have already consumed some grant. We want to
111 * leave this here in case there is a large error in accounting. */
112 CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ? mask:D_CACHE,
113 "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
114 obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
115 oa->o_dropped, fed->fed_grant);
117 /* Update our accounting now so that statfs takes it into account.
118 * Note that fed_dirty is only approximate and can become incorrect
119 * if RPCs arrive out-of-order. No important calculations depend
120 * on fed_dirty however, but we must check sanity to not assert. */
121 if ((long long)oa->o_dirty < 0)
123 else if (oa->o_dirty > fed->fed_grant + 4 * FILTER_GRANT_CHUNK)
124 oa->o_dirty = fed->fed_grant + 4 * FILTER_GRANT_CHUNK;
125 obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
126 if (fed->fed_grant < oa->o_dropped) {
127 CDEBUG(D_HA,"%s: cli %s/%p reports %u dropped > fedgrant %lu\n",
128 obd->obd_name, exp->exp_client_uuid.uuid, exp,
129 oa->o_dropped, fed->fed_grant);
132 if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
133 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
134 obd->obd_name, exp->exp_client_uuid.uuid, exp,
135 oa->o_dropped, obd->u.filter.fo_tot_granted);
138 obd->u.filter.fo_tot_granted -= oa->o_dropped;
139 fed->fed_grant -= oa->o_dropped;
140 fed->fed_dirty = oa->o_dirty;
141 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
142 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
143 obd->obd_name, exp->exp_client_uuid.uuid, exp,
144 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
145 spin_unlock(&obd->obd_osfs_lock);
151 /* Figure out how much space is available between what we've granted
152 * and what remains in the filesystem. Compensate for ext3 indirect
153 * block overhead when computing how much free space is left ungranted.
155 * Caller must hold obd_osfs_lock. */
156 obd_size filter_grant_space_left(struct obd_export *exp)
158 struct obd_device *obd = exp->exp_obd;
159 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
160 obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
161 int rc, statfs_done = 0;
163 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
165 if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
167 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
168 cfs_time_current_64() + HZ);
169 if (rc) /* N.B. statfs can't really fail */
174 avail = obd->obd_osfs.os_bavail;
175 left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
176 if (left > GRANT_FOR_LLOG(obd)) {
177 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
179 left = 0 /* << blockbits */;
182 if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
183 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
187 if (left >= tot_granted) {
190 if (left < tot_granted - obd->u.filter.fo_tot_pending) {
191 CERROR("%s: cli %s/%p grant "LPU64" > available "
192 LPU64" and pending "LPU64"\n", obd->obd_name,
193 exp->exp_client_uuid.uuid, exp, tot_granted,
194 left, obd->u.filter.fo_tot_pending);
199 CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
200 " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
201 exp->exp_client_uuid.uuid, exp,
202 obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
203 tot_granted, left, obd->u.filter.fo_tot_pending);
208 /* Calculate how much grant space to allocate to this client, based on how
209 * much space is currently free and how much of that is already granted.
211 * Caller must hold obd_osfs_lock. */
212 long filter_grant(struct obd_export *exp, obd_size current_grant,
213 obd_size want, obd_size fs_space_left)
215 struct obd_device *obd = exp->exp_obd;
216 struct filter_export_data *fed = &exp->exp_filter_data;
217 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
220 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
222 /* Grant some fraction of the client's requested grant space so that
223 * they are not always waiting for write credits (not all of it to
224 * avoid overgranting in face of multiple RPCs in flight). This
225 * essentially will be able to control the OSC_MAX_RIF for a client.
227 * If we do have a large disparity between what the client thinks it
228 * has and what we think it has, don't grant very much and let the
229 * client consume its grant first. Either it just has lots of RPCs
230 * in flight, or it was evicted and its grants will soon be used up. */
231 if (want > 0x7fffffff) {
232 CERROR("%s: client %s/%p requesting > 2GB grant "LPU64"\n",
233 obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
234 } else if (current_grant < want &&
235 current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
236 grant = min((want >> blockbits),
237 (fs_space_left >> blockbits) / 8);
241 /* Allow >FILTER_GRANT_CHUNK size when clients
242 * reconnect due to a server reboot.
244 if ((grant > FILTER_GRANT_CHUNK) &&
245 (!obd->obd_recovering))
246 grant = FILTER_GRANT_CHUNK;
248 obd->u.filter.fo_tot_granted += grant;
249 fed->fed_grant += grant;
250 if (fed->fed_grant < 0) {
251 CERROR("%s: cli %s/%p grant %ld want "LPU64
253 obd->obd_name, exp->exp_client_uuid.uuid,
254 exp, fed->fed_grant, want,current_grant);
255 spin_unlock(&obd->obd_osfs_lock);
262 "%s: cli %s/%p wants: "LPU64" current grant "LPU64
263 " granting: "LPU64"\n", obd->obd_name, exp->exp_client_uuid.uuid,
264 exp, want, current_grant, grant);
266 "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
267 " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
268 exp, obd->u.filter.fo_tot_dirty,
269 obd->u.filter.fo_tot_granted, obd->obd_num_exports);
274 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
275 int objcount, struct obd_ioobj *obj,
276 int niocount, struct niobuf_remote *nb,
277 struct niobuf_local *res,
278 struct obd_trans_info *oti,
279 struct lustre_capa *capa)
281 struct obd_device *obd = exp->exp_obd;
282 struct lvfs_run_ctxt saved;
283 struct niobuf_remote *rnb;
284 struct niobuf_local *lnb;
285 struct dentry *dentry = NULL;
288 int rc = 0, i, tot_bytes = 0;
289 unsigned long now = jiffies;
292 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
293 * When we do this function's dentry cleanup will need to be fixed.
294 * These values are verified in ost_brw_write() from the wire. */
295 LASSERTF(objcount == 1, "%d\n", objcount);
296 LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
298 rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
303 if (oa && oa->o_valid & OBD_MD_FLGRANT) {
304 spin_lock(&obd->obd_osfs_lock);
305 filter_grant_incoming(exp, oa);
308 spin_unlock(&obd->obd_osfs_lock);
311 iobuf = filter_iobuf_get(&obd->u.filter, oti);
313 RETURN(PTR_ERR(iobuf));
315 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
316 dentry = filter_oa2dentry(obd, oa);
317 if (IS_ERR(dentry)) {
318 rc = PTR_ERR(dentry);
323 inode = dentry->d_inode;
325 obdo_to_inode(inode, oa, OBD_MD_FLATIME);
326 fsfilt_check_slow(obd, now, obd_timeout, "preprw_read setup");
328 for (i = 0, lnb = res, rnb = nb; i < obj->ioo_bufcnt;
330 lnb->dentry = dentry;
331 lnb->offset = rnb->offset;
333 lnb->flags = rnb->flags;
336 * ost_brw_write()->ost_nio_pages_get() already initialized
337 * lnb->page to point to the page from the per-thread page
338 * pool (bug 5137), initialize page.
340 LASSERT(lnb->page != NULL);
342 if (i_size_read(inode) <= rnb->offset)
343 /* If there's no more data, abort early. lnb->rc == 0,
344 * so it's easy to detect later. */
347 filter_alloc_dio_page(obd, inode, lnb);
349 if (i_size_read(inode) < lnb->offset + lnb->len - 1)
350 lnb->rc = i_size_read(inode) - lnb->offset;
354 tot_bytes += lnb->rc;
356 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
359 fsfilt_check_slow(obd, now, obd_timeout, "start_page_read");
361 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf,
362 exp, NULL, NULL, NULL);
366 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
368 lprocfs_counter_add(exp->exp_ops_stats, LPROC_FILTER_READ_BYTES,
375 filter_free_dio_pages(objcount, obj, niocount, res);
381 filter_iobuf_put(&obd->u.filter, iobuf, oti);
383 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
385 CERROR("io error %d\n", rc);
390 /* When clients have dirtied as much space as they've been granted they
391 * fall through to sync writes. These sync writes haven't been expressed
392 * in grants and need to error with ENOSPC when there isn't room in the
393 * filesystem for them after grants are taken into account. However,
394 * writeback of the dirty data that was already granted space can write
397 * Caller must hold obd_osfs_lock. */
398 static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
399 int objcount, struct fsfilt_objinfo *fso,
400 int niocount, struct niobuf_remote *rnb,
401 struct niobuf_local *lnb, obd_size *left,
404 struct filter_export_data *fed = &exp->exp_filter_data;
405 int blocksize = exp->exp_obd->u.obt.obt_sb->s_blocksize;
406 unsigned long used = 0, ungranted = 0, using;
407 int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
409 LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
411 for (obj = 0; obj < objcount; obj++) {
412 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
415 /* should match the code in osc_exit_cache */
417 bytes += rnb[n].offset & (blocksize - 1);
418 tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
420 bytes += blocksize - tmp;
422 if ((rnb[n].flags & OBD_BRW_FROM_GRANT) &&
423 (oa->o_valid & OBD_MD_FLGRANT)) {
424 if (fed->fed_grant < used + bytes) {
426 "%s: cli %s/%p claims %ld+%d "
427 "GRANT, real grant %lu idx %d\n",
428 exp->exp_obd->obd_name,
429 exp->exp_client_uuid.uuid, exp,
430 used, bytes, fed->fed_grant, n);
434 rnb[n].flags |= OBD_BRW_GRANTED;
435 lnb[n].lnb_grant_used = bytes;
436 CDEBUG(0, "idx %d used=%lu\n", n, used);
441 if (*left > ungranted + bytes) {
442 /* if enough space, pretend it was granted */
444 rnb[n].flags |= OBD_BRW_GRANTED;
445 lnb[n].lnb_grant_used = bytes;
446 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
451 /* We can't check for already-mapped blocks here, as
452 * it requires dropping the osfs lock to do the bmap.
453 * Instead, we return ENOSPC and in that case we need
454 * to go through and verify if all of the blocks not
455 * marked BRW_GRANTED are already mapped and we can
456 * ignore this error. */
458 rnb[n].flags &= ~OBD_BRW_GRANTED;
459 CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
460 exp->exp_obd->obd_name,
461 exp->exp_client_uuid.uuid, exp, n, bytes);
465 /* Now substract what client have used already. We don't subtract
466 * this from the tot_granted yet, so that other client's can't grab
467 * that space before we have actually allocated our blocks. That
468 * happens in filter_grant_commit() after the writes are done. */
470 fed->fed_grant -= used;
471 fed->fed_pending += used + ungranted;
472 exp->exp_obd->u.filter.fo_tot_granted += ungranted;
473 exp->exp_obd->u.filter.fo_tot_pending += used + ungranted;
476 "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
477 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
478 ungranted, fed->fed_grant, fed->fed_dirty);
480 /* Rough calc in case we don't refresh cached statfs data */
481 using = (used + ungranted + 1 ) >>
482 exp->exp_obd->u.obt.obt_sb->s_blocksize_bits;
483 if (exp->exp_obd->obd_osfs.os_bavail > using)
484 exp->exp_obd->obd_osfs.os_bavail -= using;
486 exp->exp_obd->obd_osfs.os_bavail = 0;
488 if (fed->fed_dirty < used) {
489 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
490 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
491 used, fed->fed_dirty);
492 used = fed->fed_dirty;
494 exp->exp_obd->u.filter.fo_tot_dirty -= used;
495 fed->fed_dirty -= used;
497 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
498 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
499 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
500 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
501 spin_unlock(&exp->exp_obd->obd_osfs_lock);
507 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
508 * on mulitple inodes. That isn't all, because there still exists the
509 * possibility of a truncate starting a new transaction while holding the ext3
510 * rwsem = write while some writes (which have started their transactions here)
511 * blocking on the ext3 rwsem = read => lock inversion.
513 * The handling gets very ugly when dealing with locked pages. It may be easier
514 * to just get rid of the locked page code (which has problems of its own) and
515 * either discover we do not need it anymore (i.e. it was a symptom of another
516 * bug) or ensure we get the page locks in an appropriate order. */
517 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
518 int objcount, struct obd_ioobj *obj,
519 int niocount, struct niobuf_remote *nb,
520 struct niobuf_local *res,
521 struct obd_trans_info *oti,
522 struct lustre_capa *capa)
524 struct lvfs_run_ctxt saved;
525 struct niobuf_remote *rnb;
526 struct niobuf_local *lnb = res;
527 struct fsfilt_objinfo fso;
528 struct filter_mod_data *fmd;
529 struct dentry *dentry = NULL;
532 unsigned long now = jiffies;
533 int rc = 0, i, tot_bytes = 0, cleanup_phase = 0;
535 LASSERT(objcount == 1);
536 LASSERT(obj->ioo_bufcnt > 0);
538 rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
543 push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
544 iobuf = filter_iobuf_get(&exp->exp_obd->u.filter, oti);
546 GOTO(cleanup, rc = PTR_ERR(iobuf));
549 dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
552 GOTO(cleanup, rc = PTR_ERR(dentry));
555 if (dentry->d_inode == NULL) {
556 CERROR("%s: trying to BRW to non-existent file "LPU64"\n",
557 exp->exp_obd->obd_name, obj->ioo_id);
558 GOTO(cleanup, rc = -ENOENT);
561 fso.fso_dentry = dentry;
562 fso.fso_bufcnt = obj->ioo_bufcnt;
564 fsfilt_check_slow(exp->exp_obd, now, obd_timeout, "preprw_write setup");
566 /* Don't update inode timestamps if this write is older than a
567 * setattr which modifies the timestamps. b=10150 */
568 /* XXX when we start having persistent reservations this needs to
569 * be changed to filter_fmd_get() to create the fmd if it doesn't
570 * already exist so we can store the reservation handle there. */
571 fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
574 spin_lock(&exp->exp_obd->obd_osfs_lock);
575 filter_grant_incoming(exp, oa);
576 if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
577 oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
580 obdo_to_inode(dentry->d_inode, oa, OBD_MD_FLATIME |
581 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
584 left = filter_grant_space_left(exp);
586 rc = filter_grant_check(exp, oa, objcount, &fso, niocount, nb, res,
587 &left, dentry->d_inode);
589 /* do not zero out oa->o_valid as it is used in filter_commitrw_write()
590 * for setting UID/GID and fid EA in first write time. */
591 if (oa->o_valid & OBD_MD_FLGRANT)
592 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
594 spin_unlock(&exp->exp_obd->obd_osfs_lock);
595 filter_fmd_put(exp, fmd);
600 for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
602 /* We still set up for ungranted pages so that granted pages
603 * can be written to disk as they were promised, and portals
604 * needs to keep the pages all aligned properly. */
605 lnb->dentry = dentry;
606 lnb->offset = rnb->offset;
608 lnb->flags = rnb->flags;
611 * ost_brw_write()->ost_nio_pages_get() already initialized
612 * lnb->page to point to the page from the per-thread page
613 * pool (bug 5137), initialize page.
615 LASSERT(lnb->page != NULL);
616 if (lnb->len != CFS_PAGE_SIZE) {
617 memset(kmap(lnb->page) + lnb->len,
618 0, CFS_PAGE_SIZE - lnb->len);
621 lnb->page->index = lnb->offset >> CFS_PAGE_SHIFT;
625 /* If the filter writes a partial page, then has the file
626 * extended, the client will read in the whole page. the
627 * filter has to be careful to zero the rest of the partial
628 * page on disk. we do it by hand for partial extending
629 * writes, send_bio() is responsible for zeroing pages when
630 * asked to read unmapped blocks -- brw_kiovec() does this. */
631 if (lnb->len != CFS_PAGE_SIZE) {
634 maxidx = ((i_size_read(dentry->d_inode) +
635 CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
636 if (maxidx >= lnb->page->index) {
637 LL_CDEBUG_PAGE(D_PAGE, lnb->page, "write %u @ "
638 LPU64" flg %x before EOF %llu\n",
639 lnb->len, lnb->offset,lnb->flags,
640 i_size_read(dentry->d_inode));
641 filter_iobuf_add_page(exp->exp_obd, iobuf,
646 char *p = kmap(lnb->page);
648 off = lnb->offset & ~CFS_PAGE_MASK;
651 off = (lnb->offset + lnb->len) & ~CFS_PAGE_MASK;
653 memset(p + off, 0, CFS_PAGE_SIZE - off);
658 tot_bytes += lnb->len;
661 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
664 fsfilt_check_slow(exp->exp_obd, now, obd_timeout, "start_page_write");
666 lprocfs_counter_add(exp->exp_ops_stats, LPROC_FILTER_WRITE_BYTES,
670 switch(cleanup_phase) {
673 filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
675 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
680 filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
682 spin_lock(&exp->exp_obd->obd_osfs_lock);
684 filter_grant_incoming(exp, oa);
685 spin_unlock(&exp->exp_obd->obd_osfs_lock);
686 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
693 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
694 int objcount, struct obd_ioobj *obj, int niocount,
695 struct niobuf_remote *nb, struct niobuf_local *res,
696 struct obd_trans_info *oti, struct lustre_capa *capa)
698 if (cmd == OBD_BRW_WRITE)
699 return filter_preprw_write(cmd, exp, oa, objcount, obj,
700 niocount, nb, res, oti, capa);
701 if (cmd == OBD_BRW_READ)
702 return filter_preprw_read(cmd, exp, oa, objcount, obj,
703 niocount, nb, res, oti, capa);
708 void filter_release_read_page(struct filter_obd *filter, struct inode *inode,
714 (i_size_read(inode) > filter->fo_readcache_max_filesize))
717 /* drop from cache like truncate_list_pages() */
718 if (drop && !TryLockPage(page)) {
720 ll_truncate_complete_page(page);
723 page_cache_release(page);
726 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
727 int objcount, struct obd_ioobj *obj,
728 int niocount, struct niobuf_local *res,
729 struct obd_trans_info *oti, int rc)
731 struct inode *inode = NULL;
732 struct ldlm_res_id res_id = { .name = { obj->ioo_id, 0,
734 struct ldlm_resource *resource = NULL;
735 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
738 /* If oa != NULL then filter_preprw_read updated the inode atime
739 * and we should update the lvb so that other glimpses will also
740 * get the updated value. bug 5972 */
741 if (oa && ns && ns->ns_lvbo && ns->ns_lvbo->lvbo_update) {
742 resource = ldlm_resource_get(ns, NULL, &res_id, LDLM_EXTENT, 0);
744 if (resource != NULL) {
745 ns->ns_lvbo->lvbo_update(resource, NULL, 0, 1);
746 ldlm_resource_putref(resource);
750 if (res->dentry != NULL)
751 inode = res->dentry->d_inode;
753 filter_free_dio_pages(objcount, obj, niocount, res);
755 if (res->dentry != NULL)
760 void flip_into_page_cache(struct inode *inode, struct page *new_page)
762 struct page *old_page;
766 /* the dlm is protecting us from read/write concurrency, so we
767 * expect this find_lock_page to return quickly. even if we
768 * race with another writer it won't be doing much work with
769 * the page locked. we do this 'cause t_c_p expects a
770 * locked page, and it wants to grab the pagecache lock
772 old_page = find_lock_page(inode->i_mapping, new_page->index);
774 ll_truncate_complete_page(old_page);
775 unlock_page(old_page);
776 page_cache_release(old_page);
779 #if 0 /* this should be a /proc tunable someday */
780 /* racing o_directs (no locking ioctl) could race adding
781 * their pages, so we repeat the page invalidation unless
782 * we successfully added our new page */
783 rc = add_to_page_cache_unique(new_page, inode->i_mapping,
785 page_hash(inode->i_mapping,
788 /* add_to_page_cache clears uptodate|dirty and locks
790 SetPageUptodate(new_page);
791 unlock_page(new_page);
799 void filter_grant_commit(struct obd_export *exp, int niocount,
800 struct niobuf_local *res)
802 struct filter_obd *filter = &exp->exp_obd->u.filter;
803 struct niobuf_local *lnb = res;
804 unsigned long pending = 0;
807 spin_lock(&exp->exp_obd->obd_osfs_lock);
808 for (i = 0, lnb = res; i < niocount; i++, lnb++)
809 pending += lnb->lnb_grant_used;
811 LASSERTF(exp->exp_filter_data.fed_pending >= pending,
812 "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
813 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
814 exp->exp_filter_data.fed_pending, pending);
815 exp->exp_filter_data.fed_pending -= pending;
816 LASSERTF(filter->fo_tot_granted >= pending,
817 "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
818 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
819 exp->exp_obd->u.filter.fo_tot_granted, pending);
820 filter->fo_tot_granted -= pending;
821 LASSERTF(filter->fo_tot_pending >= pending,
822 "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
823 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
824 filter->fo_tot_pending, pending);
825 filter->fo_tot_pending -= pending;
827 spin_unlock(&exp->exp_obd->obd_osfs_lock);
830 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
831 int objcount, struct obd_ioobj *obj, int niocount,
832 struct niobuf_local *res, struct obd_trans_info *oti,
835 if (cmd == OBD_BRW_WRITE)
836 return filter_commitrw_write(exp, oa, objcount, obj, niocount,
838 if (cmd == OBD_BRW_READ)
839 return filter_commitrw_read(exp, oa, objcount, obj, niocount,
845 int filter_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
846 obd_count oa_bufs, struct brw_page *pga,
847 struct obd_trans_info *oti)
849 struct obd_ioobj ioo;
850 struct niobuf_local *lnb;
851 struct niobuf_remote *rnb;
856 OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
857 OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
859 if (lnb == NULL || rnb == NULL)
860 GOTO(out, ret = -ENOMEM);
862 for (i = 0; i < oa_bufs; i++) {
863 lnb[i].page = pga[i].pg;
864 rnb[i].offset = pga[i].off;
865 rnb[i].len = pga[i].count;
868 obdo_to_ioobj(oinfo->oi_oa, &ioo);
869 ioo.ioo_bufcnt = oa_bufs;
871 ret = filter_preprw(cmd, exp, oinfo->oi_oa, 1, &ioo,
872 oa_bufs, rnb, lnb, oti, oinfo_capa(oinfo));
876 ret = filter_commitrw(cmd, exp, oinfo->oi_oa, 1, &ioo,
877 oa_bufs, lnb, oti, ret);
881 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
883 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));