1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter_io.c
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
11 * This file is part of Lustre, http://www.lustre.org.
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #define DEBUG_SUBSYSTEM S_FILTER
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h> // XXX kill me soon
32 #include <linux/version.h>
34 #include <linux/obd_class.h>
35 #include <linux/lustre_fsfilt.h>
36 #include "filter_internal.h"
38 static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
40 struct address_space *mapping = inode->i_mapping;
42 unsigned long index = lnb->offset >> PAGE_SHIFT;
45 page = grab_cache_page(mapping, index); /* locked page */
47 return lnb->rc = -ENOMEM;
49 LASSERT(page->mapping == mapping);
53 if (inode->i_size < lnb->offset + lnb->len - 1)
54 lnb->rc = inode->i_size - lnb->offset;
58 if (PageUptodate(page)) {
63 rc = mapping->a_ops->readpage(NULL, page);
65 CERROR("page index %lu, rc = %d\n", index, rc);
67 page_cache_release(page);
74 static int filter_finish_page_read(struct niobuf_local *lnb)
76 if (lnb->page == NULL)
79 if (PageUptodate(lnb->page))
82 wait_on_page(lnb->page);
83 if (!PageUptodate(lnb->page)) {
84 CERROR("page index %lu/offset "LPX64" not uptodate\n",
85 lnb->page->index, lnb->offset);
86 GOTO(err_page, lnb->rc = -EIO);
88 if (PageError(lnb->page)) {
89 CERROR("page index %lu/offset "LPX64" has error\n",
90 lnb->page->index, lnb->offset);
91 GOTO(err_page, lnb->rc = -EIO);
97 page_cache_release(lnb->page);
102 /* Grab the dirty and seen grant announcements from the incoming obdo.
103 * We will later calculate the clients new grant and return it.
104 * Caller must hold osfs lock */
105 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
107 struct filter_export_data *fed;
108 struct obd_device *obd = exp->exp_obd;
111 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
113 if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
114 (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
115 oa->o_valid &= ~OBD_MD_FLGRANT;
120 fed = &exp->exp_filter_data;
122 /* Add some margin, since there is a small race if other RPCs arrive
123 * out-or-order and have already consumed some grant. We want to
124 * leave this here in case there is a large error in accounting. */
125 CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ?
127 "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
128 obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
129 oa->o_dropped, fed->fed_grant);
131 /* Update our accounting now so that statfs takes it into account.
132 * Note that fed_dirty is only approximate and can become incorrect
133 * if RPCs arrive out-of-order. No important calculations depend
134 * on fed_dirty however. */
135 obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
136 if (fed->fed_grant < oa->o_dropped) {
137 CERROR("%s: cli %s/%p reports %u dropped > fed_grant %lu\n",
138 obd->obd_name, exp->exp_client_uuid.uuid, exp,
139 oa->o_dropped, fed->fed_grant);
142 if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
143 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
144 obd->obd_name, exp->exp_client_uuid.uuid, exp,
145 oa->o_dropped, obd->u.filter.fo_tot_granted);
148 obd->u.filter.fo_tot_granted -= oa->o_dropped;
149 fed->fed_grant -= oa->o_dropped;
150 fed->fed_dirty = oa->o_dirty;
154 #define GRANT_FOR_LLOG(obd) 16
156 /* Figure out how much space is available between what we've granted
157 * and what remains in the filesystem. Compensate for ext3 indirect
158 * block overhead when computing how much free space is left ungranted.
160 * Caller must hold obd_osfs_lock. */
161 obd_size filter_grant_space_left(struct obd_export *exp)
163 struct obd_device *obd = exp->exp_obd;
164 int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
165 obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
166 int rc, statfs_done = 0;
168 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
170 if (time_before(obd->obd_osfs_age, jiffies - HZ)) {
172 rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, jiffies + 1);
173 if (rc) /* N.B. statfs can't really fail */
178 avail = obd->obd_osfs.os_bavail;
179 left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
180 if (left > GRANT_FOR_LLOG(obd)) {
181 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
183 left = 0 /* << blockbits */;
186 if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
187 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
191 if (left >= tot_granted) {
194 static unsigned long next;
195 if (left < tot_granted - obd->u.filter.fo_tot_pending &&
196 time_after(jiffies, next)) {
197 spin_unlock(&obd->obd_osfs_lock);
198 CERROR("%s: cli %s/%p grant "LPU64" > available "
199 LPU64" and pending "LPU64"\n", obd->obd_name,
200 exp->exp_client_uuid.uuid, exp, tot_granted,
201 left, obd->u.filter.fo_tot_pending);
203 portals_debug_dumplog();
204 next = jiffies + 20 * HZ;
205 spin_lock(&obd->obd_osfs_lock);
210 CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
211 " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
212 exp->exp_client_uuid.uuid, exp,
213 obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
214 tot_granted, left, obd->u.filter.fo_tot_pending);
219 /* Calculate how much grant space to allocate to this client, based on how
220 * much space is currently free and how much of that is already granted.
222 * Caller must hold obd_osfs_lock. */
223 long filter_grant(struct obd_export *exp, obd_size current_grant,
224 obd_size want, obd_size fs_space_left)
226 struct obd_device *obd = exp->exp_obd;
227 struct filter_export_data *fed = &exp->exp_filter_data;
228 int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
231 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
233 /* Grant some fraction of the client's requested grant space so that
234 * they are not always waiting for write credits (not all of it to
235 * avoid overgranting in face of multiple RPCs in flight). This
236 * essentially will be able to control the OSC_MAX_RIF for a client.
238 * If we do have a large disparity between what the client thinks it
239 * has and what we think it has, don't grant very much and let the
240 * client consume its grant first. Either it just has lots of RPCs
241 * in flight, or it was evicted and its grants will soon be used up. */
242 if (current_grant < want) {
243 if (current_grant > fed->fed_grant + FILTER_GRANT_CHUNK)
245 grant = min((want >> blockbits) / 2,
246 (fs_space_left >> blockbits) / 8);
250 if (grant > FILTER_GRANT_CHUNK)
251 grant = FILTER_GRANT_CHUNK;
253 obd->u.filter.fo_tot_granted += grant;
254 fed->fed_grant += grant;
258 CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
259 obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
261 "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
262 " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
263 exp, obd->u.filter.fo_tot_dirty,
264 obd->u.filter.fo_tot_granted, obd->obd_num_exports);
269 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
270 int objcount, struct obd_ioobj *obj,
271 int niocount, struct niobuf_remote *nb,
272 struct niobuf_local *res,
273 struct obd_trans_info *oti)
275 struct obd_device *obd = exp->exp_obd;
276 struct obd_run_ctxt saved;
278 struct niobuf_remote *rnb;
279 struct niobuf_local *lnb = NULL;
280 struct fsfilt_objinfo *fso;
281 struct dentry *dentry;
283 int rc = 0, i, j, tot_bytes = 0, cleanup_phase = 0;
284 unsigned long now = jiffies;
287 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
288 * When we do this function's dentry cleanup will need to be fixed */
289 LASSERT(objcount == 1);
290 LASSERT(obj->ioo_bufcnt > 0);
292 if (oa && oa->o_valid & OBD_MD_FLGRANT) {
293 spin_lock(&obd->obd_osfs_lock);
294 filter_grant_incoming(exp, oa);
297 /* Reads do not increase grants */
298 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
299 filter_grant_space_left(exp));
303 spin_unlock(&obd->obd_osfs_lock);
306 OBD_ALLOC(fso, objcount * sizeof(*fso));
310 memset(res, 0, niocount * sizeof(*res));
312 push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
313 for (i = 0, o = obj; i < objcount; i++, o++) {
314 LASSERT(o->ioo_bufcnt);
316 dentry = filter_oa2dentry(obd, oa);
318 GOTO(cleanup, rc = PTR_ERR(dentry));
320 if (dentry->d_inode == NULL) {
321 CERROR("trying to BRW to non-existent file "LPU64"\n",
324 GOTO(cleanup, rc = -ENOENT);
327 fso[i].fso_dentry = dentry;
328 fso[i].fso_bufcnt = o->ioo_bufcnt;
331 if (time_after(jiffies, now + 15 * HZ))
332 CERROR("slow preprw_read setup %lus\n", (jiffies - now) / HZ);
334 CDEBUG(D_INFO, "preprw_read setup: %lu jiffies\n",
337 for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
338 dentry = fso[i].fso_dentry;
339 inode = dentry->d_inode;
341 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
342 lnb->dentry = dentry;
343 lnb->offset = rnb->offset;
345 lnb->flags = rnb->flags;
347 if (inode->i_size <= rnb->offset) {
348 /* If there's no more data, abort early.
349 * lnb->page == NULL and lnb->rc == 0, so it's
350 * easy to detect later. */
353 rc = filter_start_page_read(inode, lnb);
357 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
358 "page err %u@"LPU64" %u/%u %p: rc %d\n",
359 lnb->len, lnb->offset, j, o->ioo_bufcnt,
365 tot_bytes += lnb->rc;
366 if (lnb->rc < lnb->len) {
367 /* short read, be sure to wait on it */
374 if (time_after(jiffies, now + 15 * HZ))
375 CERROR("slow start_page_read %lus\n", (jiffies - now) / HZ);
377 CDEBUG(D_INFO, "start_page_read: %lu jiffies\n",
380 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
381 while (lnb-- > res) {
382 rc = filter_finish_page_read(lnb);
384 CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
385 lnb->offset, (int)(lnb - res), lnb->dentry, rc);
391 if (time_after(jiffies, now + 15 * HZ))
392 CERROR("slow finish_page_read %lus\n", (jiffies - now) / HZ);
394 CDEBUG(D_INFO, "finish_page_read: %lu jiffies\n",
397 filter_tally_read(&exp->exp_obd->u.filter, res, niocount);
402 switch (cleanup_phase) {
404 for (lnb = res; lnb < (res + niocount); lnb++) {
406 page_cache_release(lnb->page);
408 if (res->dentry != NULL)
411 CERROR("NULL dentry in cleanup -- tell CFS\n");
413 OBD_FREE(fso, objcount * sizeof(*fso));
414 pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
419 /* When clients have dirtied as much space as they've been granted they
420 * fall through to sync writes. These sync writes haven't been expressed
421 * in grants and need to error with ENOSPC when there isn't room in the
422 * filesystem for them after grants are taken into account. However,
423 * writeback of the dirty data that was already granted space can write
426 * Caller must hold obd_osfs_lock. */
427 static int filter_grant_check(struct obd_export *exp, int objcount,
428 struct fsfilt_objinfo *fso, int niocount,
429 struct niobuf_remote *rnb,
430 struct niobuf_local *lnb, obd_size *left,
433 struct filter_export_data *fed = &exp->exp_filter_data;
434 int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
435 unsigned long used = 0, ungranted = 0, using;
436 int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
438 LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
440 for (obj = 0; obj < objcount; obj++) {
441 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
444 /* FIXME: this is calculated with PAGE_SIZE on client */
446 bytes += rnb[n].offset & (blocksize - 1);
447 tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
449 bytes += blocksize - tmp;
451 if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
452 if (fed->fed_grant < used + bytes) {
454 "%s: cli %s/%p claims %ld+%d "
455 "GRANT, real grant %lu idx %d\n",
456 exp->exp_obd->obd_name,
457 exp->exp_client_uuid.uuid, exp,
458 used, bytes, fed->fed_grant, n);
462 rnb[n].flags |= OBD_BRW_GRANTED;
463 lnb[n].lnb_grant_used = bytes;
464 CDEBUG(0, "idx %d used=%lu\n", n, used);
469 if (*left > ungranted) {
470 /* if enough space, pretend it was granted */
472 rnb[n].flags |= OBD_BRW_GRANTED;
473 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
478 /* We can't check for already-mapped blocks here, as
479 * it requires dropping the osfs lock to do the bmap.
480 * Instead, we return ENOSPC and in that case we need
481 * to go through and verify if all of the blocks not
482 * marked BRW_GRANTED are already mapped and we can
483 * ignore this error. */
485 rnb[n].flags &= OBD_BRW_GRANTED;
486 CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
487 exp->exp_obd->obd_name,
488 exp->exp_client_uuid.uuid, exp, n, bytes);
492 /* Now substract what client have used already. We don't subtract
493 * this from the tot_granted yet, so that other client's can't grab
494 * that space before we have actually allocated our blocks. That
495 * happens in filter_grant_commit() after the writes are done. */
497 fed->fed_grant -= used;
498 fed->fed_pending += used;
499 exp->exp_obd->u.filter.fo_tot_pending += used;
502 "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
503 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
504 ungranted, fed->fed_grant, fed->fed_dirty);
506 /* Rough calc in case we don't refresh cached statfs data */
507 using = (used + ungranted + 1 ) >>
508 exp->exp_obd->u.filter.fo_sb->s_blocksize_bits;
509 if (exp->exp_obd->obd_osfs.os_bavail > using)
510 exp->exp_obd->obd_osfs.os_bavail -= using;
512 exp->exp_obd->obd_osfs.os_bavail = 0;
514 if (fed->fed_dirty < used) {
515 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
516 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
517 used, fed->fed_dirty);
518 used = fed->fed_dirty;
520 exp->exp_obd->u.filter.fo_tot_dirty -= used;
521 fed->fed_dirty -= used;
526 static int filter_start_page_write(struct inode *inode,
527 struct niobuf_local *lnb)
529 struct page *page = alloc_pages(GFP_HIGHUSER, 0);
531 CERROR("no memory for a temp page\n");
532 RETURN(lnb->rc = -ENOMEM);
534 POISON_PAGE(page, 0xf1);
535 if (lnb->len != PAGE_SIZE) {
536 memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
539 page->index = lnb->offset >> PAGE_SHIFT;
545 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
546 * on mulitple inodes. That isn't all, because there still exists the
547 * possibility of a truncate starting a new transaction while holding the ext3
548 * rwsem = write while some writes (which have started their transactions here)
549 * blocking on the ext3 rwsem = read => lock inversion.
551 * The handling gets very ugly when dealing with locked pages. It may be easier
552 * to just get rid of the locked page code (which has problems of its own) and
553 * either discover we do not need it anymore (i.e. it was a symptom of another
554 * bug) or ensure we get the page locks in an appropriate order. */
555 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
556 int objcount, struct obd_ioobj *obj,
557 int niocount, struct niobuf_remote *nb,
558 struct niobuf_local *res,
559 struct obd_trans_info *oti)
561 struct obd_run_ctxt saved;
562 struct niobuf_remote *rnb;
563 struct niobuf_local *lnb;
564 struct fsfilt_objinfo fso;
565 struct dentry *dentry;
567 unsigned long now = jiffies;
568 int rc = 0, i, tot_bytes = 0, cleanup_phase = 1;
570 LASSERT(objcount == 1);
571 LASSERT(obj->ioo_bufcnt > 0);
573 memset(res, 0, niocount * sizeof(*res));
575 push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
576 dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
579 GOTO(cleanup, rc = PTR_ERR(dentry));
581 if (dentry->d_inode == NULL) {
582 CERROR("trying to BRW to non-existent file "LPU64"\n",
585 GOTO(cleanup, rc = -ENOENT);
588 fso.fso_dentry = dentry;
589 fso.fso_bufcnt = obj->ioo_bufcnt;
591 if (time_after(jiffies, now + 15 * HZ))
592 CERROR("slow preprw_write setup %lus\n", (jiffies - now) / HZ);
594 CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
597 spin_lock(&exp->exp_obd->obd_osfs_lock);
599 filter_grant_incoming(exp, oa);
602 left = filter_grant_space_left(exp);
604 rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
605 &left, dentry->d_inode);
606 if (oa && oa->o_valid & OBD_MD_FLGRANT)
607 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
609 spin_unlock(&exp->exp_obd->obd_osfs_lock);
616 for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
618 /* We still set up for ungranted pages so that granted pages
619 * can be written to disk as they were promised, and portals
620 * needs to keep the pages all aligned properly. */
621 lnb->dentry = dentry;
622 lnb->offset = rnb->offset;
624 lnb->flags = rnb->flags;
626 rc = filter_start_page_write(dentry->d_inode, lnb);
628 CDEBUG(D_ERROR, "page err %u@"LPU64" %u/%u %p: rc %d\n",
629 lnb->len, lnb->offset,
630 i, obj->ioo_bufcnt, dentry, rc);
632 __free_pages(lnb->page, 0);
637 tot_bytes += lnb->len;
640 if (time_after(jiffies, now + 15 * HZ))
641 CERROR("slow start_page_write %lus\n", (jiffies - now) / HZ);
643 CDEBUG(D_INFO, "start_page_write: %lu jiffies\n",
646 lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
650 switch(cleanup_phase) {
652 spin_lock(&exp->exp_obd->obd_osfs_lock);
654 filter_grant_incoming(exp, oa);
655 spin_unlock(&exp->exp_obd->obd_osfs_lock);
658 pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
662 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
663 int objcount, struct obd_ioobj *obj, int niocount,
664 struct niobuf_remote *nb, struct niobuf_local *res,
665 struct obd_trans_info *oti)
667 if (cmd == OBD_BRW_WRITE)
668 return filter_preprw_write(cmd, exp, oa, objcount, obj,
669 niocount, nb, res, oti);
671 if (cmd == OBD_BRW_READ)
672 return filter_preprw_read(cmd, exp, oa, objcount, obj,
673 niocount, nb, res, oti);
679 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
680 int objcount, struct obd_ioobj *obj,
681 int niocount, struct niobuf_local *res,
682 struct obd_trans_info *oti, int rc)
685 struct niobuf_local *lnb;
689 if (res->dentry != NULL)
690 drop = (res->dentry->d_inode->i_size >
691 exp->exp_obd->u.filter.fo_readcache_max_filesize);
693 for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
694 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
695 if (lnb->page == NULL)
697 /* drop from cache like truncate_list_pages() */
698 if (drop && !TryLockPage(lnb->page)) {
699 if (lnb->page->mapping)
700 ll_truncate_complete_page(lnb->page);
701 unlock_page(lnb->page);
703 page_cache_release(lnb->page);
707 if (res->dentry != NULL)
712 void flip_into_page_cache(struct inode *inode, struct page *new_page)
714 struct page *old_page;
718 /* the dlm is protecting us from read/write concurrency, so we
719 * expect this find_lock_page to return quickly. even if we
720 * race with another writer it won't be doing much work with
721 * the page locked. we do this 'cause t_c_p expects a
722 * locked page, and it wants to grab the pagecache lock
724 old_page = find_lock_page(inode->i_mapping, new_page->index);
726 ll_truncate_complete_page(old_page);
727 unlock_page(old_page);
728 page_cache_release(old_page);
731 #if 0 /* this should be a /proc tunable someday */
732 /* racing o_directs (no locking ioctl) could race adding
733 * their pages, so we repeat the page invalidation unless
734 * we successfully added our new page */
735 rc = add_to_page_cache_unique(new_page, inode->i_mapping,
737 page_hash(inode->i_mapping,
740 /* add_to_page_cache clears uptodate|dirty and locks
742 SetPageUptodate(new_page);
743 unlock_page(new_page);
751 void filter_grant_commit(struct obd_export *exp, int niocount,
752 struct niobuf_local *res)
754 struct filter_obd *filter = &exp->exp_obd->u.filter;
755 struct niobuf_local *lnb = res;
756 unsigned long pending = 0;
759 spin_lock(&exp->exp_obd->obd_osfs_lock);
760 for (i = 0, lnb = res; i < niocount; i++, lnb++)
761 pending += lnb->lnb_grant_used;
763 LASSERTF(exp->exp_filter_data.fed_pending >= pending,
764 "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
765 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
766 exp->exp_filter_data.fed_pending, pending);
767 exp->exp_filter_data.fed_pending -= pending;
768 LASSERTF(filter->fo_tot_granted >= pending,
769 "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
770 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
771 exp->exp_obd->u.filter.fo_tot_granted, pending);
772 filter->fo_tot_granted -= pending;
773 LASSERTF(filter->fo_tot_pending >= pending,
774 "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
775 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
776 filter->fo_tot_pending, pending);
777 filter->fo_tot_pending -= pending;
779 spin_unlock(&exp->exp_obd->obd_osfs_lock);
782 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
783 int objcount, struct obd_ioobj *obj, int niocount,
784 struct niobuf_local *res, struct obd_trans_info *oti,int rc)
786 if (cmd == OBD_BRW_WRITE)
787 return filter_commitrw_write(exp, oa, objcount, obj, niocount,
789 if (cmd == OBD_BRW_READ)
790 return filter_commitrw_read(exp, oa, objcount, obj, niocount,
796 int filter_brw(int cmd, struct obd_export *exp, struct obdo *oa,
797 struct lov_stripe_md *lsm, obd_count oa_bufs,
798 struct brw_page *pga, struct obd_trans_info *oti)
800 struct obd_ioobj ioo;
801 struct niobuf_local *lnb;
802 struct niobuf_remote *rnb;
807 OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
808 OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
810 if (lnb == NULL || rnb == NULL)
811 GOTO(out, ret = -ENOMEM);
813 for (i = 0; i < oa_bufs; i++) {
814 rnb[i].offset = pga[i].off;
815 rnb[i].len = pga[i].count;
818 obdo_to_ioobj(oa, &ioo);
819 ioo.ioo_bufcnt = oa_bufs;
821 ret = filter_preprw(cmd, exp, oa, 1, &ioo, oa_bufs, rnb, lnb, oti);
825 for (i = 0; i < oa_bufs; i++) {
826 void *virt = kmap(pga[i].pg);
827 obd_off off = pga[i].off & ~PAGE_MASK;
828 void *addr = kmap(lnb[i].page);
830 /* 2 kmaps == vanishingly small deadlock opportunity */
832 if (cmd & OBD_BRW_WRITE)
833 memcpy(addr + off, virt + off, pga[i].count);
835 memcpy(virt + off, addr + off, pga[i].count);
841 ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti, ret);
845 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
847 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));