1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter_io.c
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
11 * This file is part of Lustre, http://www.lustre.org.
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #define DEBUG_SUBSYSTEM S_FILTER
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h> // XXX kill me soon
32 #include <linux/version.h>
34 #include <linux/obd_class.h>
35 #include <linux/lustre_fsfilt.h>
36 #include "filter_internal.h"
38 static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
40 struct address_space *mapping = inode->i_mapping;
42 unsigned long index = lnb->offset >> PAGE_SHIFT;
45 page = grab_cache_page(mapping, index); /* locked page */
47 return lnb->rc = -ENOMEM;
49 LASSERT(page->mapping == mapping);
53 if (inode->i_size < lnb->offset + lnb->len - 1)
54 lnb->rc = inode->i_size - lnb->offset;
58 if (PageUptodate(page)) {
63 rc = mapping->a_ops->readpage(NULL, page);
65 CERROR("page index %lu, rc = %d\n", index, rc);
67 page_cache_release(page);
74 static int filter_finish_page_read(struct niobuf_local *lnb)
76 if (lnb->page == NULL)
79 if (PageUptodate(lnb->page))
82 wait_on_page(lnb->page);
83 if (!PageUptodate(lnb->page)) {
84 CERROR("page index %lu/offset "LPX64" not uptodate\n",
85 lnb->page->index, lnb->offset);
86 GOTO(err_page, lnb->rc = -EIO);
88 if (PageError(lnb->page)) {
89 CERROR("page index %lu/offset "LPX64" has error\n",
90 lnb->page->index, lnb->offset);
91 GOTO(err_page, lnb->rc = -EIO);
97 page_cache_release(lnb->page);
102 /* Grab the dirty and seen grant announcements from the incoming obdo.
103 * We will later calculate the clients new grant and return it.
104 * Caller must hold osfs lock */
105 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
107 struct filter_export_data *fed;
108 struct obd_device *obd = exp->exp_obd;
111 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
113 if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
114 (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
115 oa->o_valid &= ~OBD_MD_FLGRANT;
120 fed = &exp->exp_filter_data;
122 /* Add some margin, since there is a small race if other RPCs arrive
123 * out-or-order and have already consumed some grant. We want to
124 * leave this here in case there is a large error in accounting. */
125 CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ?
127 "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
128 obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
129 oa->o_dropped, fed->fed_grant);
131 /* Update our accounting now so that statfs takes it into account.
132 * Note that fed_dirty is only approximate and can become incorrect
133 * if RPCs arrive out-of-order. No important calculations depend
134 * on fed_dirty however. */
135 obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
136 if (fed->fed_grant < oa->o_dropped) {
137 CERROR("%s: cli %s/%p reports %u dropped > fed_grant %lu\n",
138 obd->obd_name, exp->exp_client_uuid.uuid, exp,
139 oa->o_dropped, fed->fed_grant);
142 if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
143 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
144 obd->obd_name, exp->exp_client_uuid.uuid, exp,
145 oa->o_dropped, obd->u.filter.fo_tot_granted);
148 obd->u.filter.fo_tot_granted -= oa->o_dropped;
149 fed->fed_grant -= oa->o_dropped;
150 fed->fed_dirty = oa->o_dirty;
154 #define GRANT_FOR_LLOG 16
156 /* Figure out how much space is available between what we've granted
157 * and what remains in the filesystem. Compensate for ext3 indirect
158 * block overhead when computing how much free space is left ungranted.
160 * Caller must hold obd_osfs_lock. */
161 obd_size filter_grant_space_left(struct obd_export *exp)
163 struct obd_device *obd = exp->exp_obd;
164 int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
165 obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
166 int rc, statfs_done = 0;
168 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
170 if (time_before(obd->obd_osfs_age, jiffies - HZ)) {
172 rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, jiffies + 1);
173 if (rc) /* N.B. statfs can't really fail */
178 avail = obd->obd_osfs.os_bavail;
179 left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
180 if (left > GRANT_FOR_LLOG) {
181 left = (left - GRANT_FOR_LLOG) << blockbits;
183 left = 0 /* << blockbits */;
186 if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
187 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
191 if (left >= tot_granted) {
194 static unsigned long next;
195 if (left < tot_granted - obd->u.filter.fo_tot_pending &&
196 time_after(jiffies, next)) {
197 spin_unlock(&obd->obd_osfs_lock);
198 CERROR("%s: cli %s/%p grant "LPU64" > available "
199 LPU64" and pending "LPU64"\n", obd->obd_name,
200 exp->exp_client_uuid.uuid, exp, tot_granted,
201 left, obd->u.filter.fo_tot_pending);
203 portals_debug_dumplog();
204 next = jiffies + 20 * HZ;
205 spin_lock(&obd->obd_osfs_lock);
210 CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
211 " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
212 exp->exp_client_uuid.uuid, exp,
213 obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
214 tot_granted, left, obd->u.filter.fo_tot_pending);
219 /* Calculate how much grant space to allocate to this client, based on how
220 * much space is currently free and how much of that is already granted.
222 * Caller must hold obd_osfs_lock. */
223 long filter_grant(struct obd_export *exp, obd_size current_grant,
224 obd_size want, obd_size fs_space_left)
226 struct obd_device *obd = exp->exp_obd;
227 struct filter_export_data *fed = &exp->exp_filter_data;
228 int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
231 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
233 /* Grant some fraction of the client's requested grant space so that
234 * they are not always waiting for write credits (not all of it to
235 * avoid overgranting in face of multiple RPCs in flight). This
236 * essentially will be able to control the OSC_MAX_RIF for a client.
238 * If we do have a large disparity and multiple RPCs in flight we
239 * might grant "too much" but that's OK because it means we are
240 * dirtying a lot on the client and will likely use it up quickly. */
241 if (current_grant < want) {
242 grant = min((want >> blockbits) / 2,
243 (fs_space_left >> blockbits) / 8);
247 if (grant > FILTER_GRANT_CHUNK)
248 grant = FILTER_GRANT_CHUNK;
250 obd->u.filter.fo_tot_granted += grant;
251 fed->fed_grant += grant;
255 CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
256 obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
258 "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
259 " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
260 exp, obd->u.filter.fo_tot_dirty,
261 obd->u.filter.fo_tot_granted, obd->obd_num_exports);
266 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
267 int objcount, struct obd_ioobj *obj,
268 int niocount, struct niobuf_remote *nb,
269 struct niobuf_local *res,
270 struct obd_trans_info *oti)
272 struct obd_device *obd = exp->exp_obd;
273 struct obd_run_ctxt saved;
275 struct niobuf_remote *rnb;
276 struct niobuf_local *lnb = NULL;
277 struct fsfilt_objinfo *fso;
278 struct dentry *dentry;
280 int rc = 0, i, j, tot_bytes = 0, cleanup_phase = 0;
281 unsigned long now = jiffies;
284 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
285 * When we do this function's dentry cleanup will need to be fixed */
286 LASSERT(objcount == 1);
287 LASSERT(obj->ioo_bufcnt > 0);
289 if (oa && oa->o_valid & OBD_MD_FLGRANT) {
290 spin_lock(&obd->obd_osfs_lock);
291 filter_grant_incoming(exp, oa);
294 /* Reads do not increase grants */
295 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
296 filter_grant_space_left(exp));
300 spin_unlock(&obd->obd_osfs_lock);
303 OBD_ALLOC(fso, objcount * sizeof(*fso));
307 memset(res, 0, niocount * sizeof(*res));
309 push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
310 for (i = 0, o = obj; i < objcount; i++, o++) {
311 LASSERT(o->ioo_bufcnt);
313 dentry = filter_oa2dentry(obd, oa);
315 GOTO(cleanup, rc = PTR_ERR(dentry));
317 if (dentry->d_inode == NULL) {
318 CERROR("trying to BRW to non-existent file "LPU64"\n",
321 GOTO(cleanup, rc = -ENOENT);
324 fso[i].fso_dentry = dentry;
325 fso[i].fso_bufcnt = o->ioo_bufcnt;
328 if (time_after(jiffies, now + 15 * HZ))
329 CERROR("slow preprw_read setup %lus\n", (jiffies - now) / HZ);
331 CDEBUG(D_INFO, "preprw_read setup: %lu jiffies\n",
334 for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
335 dentry = fso[i].fso_dentry;
336 inode = dentry->d_inode;
338 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
339 lnb->dentry = dentry;
340 lnb->offset = rnb->offset;
342 lnb->flags = rnb->flags;
344 if (inode->i_size <= rnb->offset) {
345 /* If there's no more data, abort early.
346 * lnb->page == NULL and lnb->rc == 0, so it's
347 * easy to detect later. */
350 rc = filter_start_page_read(inode, lnb);
354 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
355 "page err %u@"LPU64" %u/%u %p: rc %d\n",
356 lnb->len, lnb->offset, j, o->ioo_bufcnt,
362 tot_bytes += lnb->rc;
363 if (lnb->rc < lnb->len) {
364 /* short read, be sure to wait on it */
371 if (time_after(jiffies, now + 15 * HZ))
372 CERROR("slow start_page_read %lus\n", (jiffies - now) / HZ);
374 CDEBUG(D_INFO, "start_page_read: %lu jiffies\n",
377 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
378 while (lnb-- > res) {
379 rc = filter_finish_page_read(lnb);
381 CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
382 lnb->offset, (int)(lnb - res), lnb->dentry, rc);
388 if (time_after(jiffies, now + 15 * HZ))
389 CERROR("slow finish_page_read %lus\n", (jiffies - now) / HZ);
391 CDEBUG(D_INFO, "finish_page_read: %lu jiffies\n",
394 filter_tally_read(&exp->exp_obd->u.filter, res, niocount);
399 switch (cleanup_phase) {
401 for (lnb = res; lnb < (res + niocount); lnb++) {
403 page_cache_release(lnb->page);
405 if (res->dentry != NULL)
408 CERROR("NULL dentry in cleanup -- tell CFS\n");
410 OBD_FREE(fso, objcount * sizeof(*fso));
411 pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
416 /* When clients have dirtied as much space as they've been granted they
417 * fall through to sync writes. These sync writes haven't been expressed
418 * in grants and need to error with ENOSPC when there isn't room in the
419 * filesystem for them after grants are taken into account. However,
420 * writeback of the dirty data that was already granted space can write
423 * Caller must hold obd_osfs_lock. */
424 static int filter_grant_check(struct obd_export *exp, int objcount,
425 struct fsfilt_objinfo *fso, int niocount,
426 struct niobuf_remote *rnb,
427 struct niobuf_local *lnb, obd_size *left,
430 struct filter_export_data *fed = &exp->exp_filter_data;
431 int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
432 unsigned long used = 0, ungranted = 0, using;
433 int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
435 LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
437 for (obj = 0; obj < objcount; obj++) {
438 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
441 /* FIXME: this is calculated with PAGE_SIZE on client */
443 bytes += rnb[n].offset & (blocksize - 1);
444 tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
446 bytes += blocksize - tmp;
448 if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
449 if (fed->fed_grant < used + bytes) {
451 "%s: cli %s/%p claims %ld+%d "
452 "GRANT, real grant %lu idx %d\n",
453 exp->exp_obd->obd_name,
454 exp->exp_client_uuid.uuid, exp,
455 used, bytes, fed->fed_grant, n);
459 rnb[n].flags |= OBD_BRW_GRANTED;
460 lnb[n].lnb_grant_used = bytes;
461 CDEBUG(0, "idx %d used=%lu\n", n, used);
466 if (*left > ungranted) {
467 /* if enough space, pretend it was granted */
469 rnb[n].flags |= OBD_BRW_GRANTED;
470 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
475 /* We can't check for already-mapped blocks here, as
476 * it requires dropping the osfs lock to do the bmap.
477 * Instead, we return ENOSPC and in that case we need
478 * to go through and verify if all of the blocks not
479 * marked BRW_GRANTED are already mapped and we can
480 * ignore this error. */
482 rnb[n].flags &= OBD_BRW_GRANTED;
483 CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
484 exp->exp_obd->obd_name,
485 exp->exp_client_uuid.uuid, exp, n, bytes);
489 /* Now substract what client have used already. We don't subtract
490 * this from the tot_granted yet, so that other client's can't grab
491 * that space before we have actually allocated our blocks. That
492 * happens in filter_grant_commit() after the writes are done. */
494 fed->fed_grant -= used;
495 fed->fed_pending += used;
496 exp->exp_obd->u.filter.fo_tot_pending += used;
499 "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
500 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
501 ungranted, fed->fed_grant, fed->fed_dirty);
503 /* Rough calc in case we don't refresh cached statfs data */
504 using = (used + ungranted + 1 ) >>
505 exp->exp_obd->u.filter.fo_sb->s_blocksize_bits;
506 if (exp->exp_obd->obd_osfs.os_bavail > using)
507 exp->exp_obd->obd_osfs.os_bavail -= using;
509 exp->exp_obd->obd_osfs.os_bavail = 0;
511 if (fed->fed_dirty < used) {
512 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
513 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
514 used, fed->fed_dirty);
515 used = fed->fed_dirty;
517 exp->exp_obd->u.filter.fo_tot_dirty -= used;
518 fed->fed_dirty -= used;
523 static int filter_start_page_write(struct inode *inode,
524 struct niobuf_local *lnb)
526 struct page *page = alloc_pages(GFP_HIGHUSER, 0);
528 CERROR("no memory for a temp page\n");
529 RETURN(lnb->rc = -ENOMEM);
531 POISON_PAGE(page, 0xf1);
532 if (lnb->len != PAGE_SIZE) {
533 memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
536 page->index = lnb->offset >> PAGE_SHIFT;
542 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
543 * on mulitple inodes. That isn't all, because there still exists the
544 * possibility of a truncate starting a new transaction while holding the ext3
545 * rwsem = write while some writes (which have started their transactions here)
546 * blocking on the ext3 rwsem = read => lock inversion.
548 * The handling gets very ugly when dealing with locked pages. It may be easier
549 * to just get rid of the locked page code (which has problems of its own) and
550 * either discover we do not need it anymore (i.e. it was a symptom of another
551 * bug) or ensure we get the page locks in an appropriate order. */
552 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
553 int objcount, struct obd_ioobj *obj,
554 int niocount, struct niobuf_remote *nb,
555 struct niobuf_local *res,
556 struct obd_trans_info *oti)
558 struct obd_run_ctxt saved;
559 struct niobuf_remote *rnb;
560 struct niobuf_local *lnb;
561 struct fsfilt_objinfo fso;
562 struct dentry *dentry;
564 unsigned long now = jiffies;
565 int rc = 0, i, tot_bytes = 0, cleanup_phase = 1;
567 LASSERT(objcount == 1);
568 LASSERT(obj->ioo_bufcnt > 0);
570 memset(res, 0, niocount * sizeof(*res));
572 push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
573 dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
576 GOTO(cleanup, rc = PTR_ERR(dentry));
578 if (dentry->d_inode == NULL) {
579 CERROR("trying to BRW to non-existent file "LPU64"\n",
582 GOTO(cleanup, rc = -ENOENT);
585 fso.fso_dentry = dentry;
586 fso.fso_bufcnt = obj->ioo_bufcnt;
588 if (time_after(jiffies, now + 15 * HZ))
589 CERROR("slow preprw_write setup %lus\n", (jiffies - now) / HZ);
591 CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
594 spin_lock(&exp->exp_obd->obd_osfs_lock);
596 filter_grant_incoming(exp, oa);
599 left = filter_grant_space_left(exp);
601 rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
602 &left, dentry->d_inode);
603 if (oa && oa->o_valid & OBD_MD_FLGRANT)
604 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
606 spin_unlock(&exp->exp_obd->obd_osfs_lock);
613 for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
615 /* We still set up for ungranted pages so that granted pages
616 * can be written to disk as they were promised, and portals
617 * needs to keep the pages all aligned properly. */
618 lnb->dentry = dentry;
619 lnb->offset = rnb->offset;
621 lnb->flags = rnb->flags;
623 rc = filter_start_page_write(dentry->d_inode, lnb);
625 CDEBUG(D_ERROR, "page err %u@"LPU64" %u/%u %p: rc %d\n",
626 lnb->len, lnb->offset,
627 i, obj->ioo_bufcnt, dentry, rc);
629 __free_pages(lnb->page, 0);
634 tot_bytes += lnb->len;
637 if (time_after(jiffies, now + 15 * HZ))
638 CERROR("slow start_page_write %lus\n", (jiffies - now) / HZ);
640 CDEBUG(D_INFO, "start_page_write: %lu jiffies\n",
643 lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
647 switch(cleanup_phase) {
649 spin_lock(&exp->exp_obd->obd_osfs_lock);
651 filter_grant_incoming(exp, oa);
652 spin_unlock(&exp->exp_obd->obd_osfs_lock);
655 pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
659 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
660 int objcount, struct obd_ioobj *obj, int niocount,
661 struct niobuf_remote *nb, struct niobuf_local *res,
662 struct obd_trans_info *oti)
664 if (cmd == OBD_BRW_WRITE)
665 return filter_preprw_write(cmd, exp, oa, objcount, obj,
666 niocount, nb, res, oti);
668 if (cmd == OBD_BRW_READ)
669 return filter_preprw_read(cmd, exp, oa, objcount, obj,
670 niocount, nb, res, oti);
676 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
677 int objcount, struct obd_ioobj *obj,
678 int niocount, struct niobuf_local *res,
679 struct obd_trans_info *oti, int rc)
682 struct niobuf_local *lnb;
686 if (res->dentry != NULL)
687 drop = (res->dentry->d_inode->i_size >
688 exp->exp_obd->u.filter.fo_readcache_max_filesize);
690 for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
691 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
692 if (lnb->page == NULL)
694 /* drop from cache like truncate_list_pages() */
695 if (drop && !TryLockPage(lnb->page)) {
696 if (lnb->page->mapping)
697 ll_truncate_complete_page(lnb->page);
698 unlock_page(lnb->page);
700 page_cache_release(lnb->page);
704 if (res->dentry != NULL)
709 void flip_into_page_cache(struct inode *inode, struct page *new_page)
711 struct page *old_page;
715 /* the dlm is protecting us from read/write concurrency, so we
716 * expect this find_lock_page to return quickly. even if we
717 * race with another writer it won't be doing much work with
718 * the page locked. we do this 'cause t_c_p expects a
719 * locked page, and it wants to grab the pagecache lock
721 old_page = find_lock_page(inode->i_mapping, new_page->index);
723 ll_truncate_complete_page(old_page);
724 unlock_page(old_page);
725 page_cache_release(old_page);
728 #if 0 /* this should be a /proc tunable someday */
729 /* racing o_directs (no locking ioctl) could race adding
730 * their pages, so we repeat the page invalidation unless
731 * we successfully added our new page */
732 rc = add_to_page_cache_unique(new_page, inode->i_mapping,
734 page_hash(inode->i_mapping,
737 /* add_to_page_cache clears uptodate|dirty and locks
739 SetPageUptodate(new_page);
740 unlock_page(new_page);
748 void filter_grant_commit(struct obd_export *exp, int niocount,
749 struct niobuf_local *res)
751 struct filter_obd *filter = &exp->exp_obd->u.filter;
752 struct niobuf_local *lnb = res;
753 unsigned long pending = 0;
756 spin_lock(&exp->exp_obd->obd_osfs_lock);
757 for (i = 0, lnb = res; i < niocount; i++, lnb++)
758 pending += lnb->lnb_grant_used;
760 LASSERTF(exp->exp_filter_data.fed_pending >= pending,
761 "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
762 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
763 exp->exp_filter_data.fed_pending, pending);
764 exp->exp_filter_data.fed_pending -= pending;
765 LASSERTF(filter->fo_tot_granted >= pending,
766 "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
767 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
768 exp->exp_obd->u.filter.fo_tot_granted, pending);
769 filter->fo_tot_granted -= pending;
770 LASSERTF(filter->fo_tot_pending >= pending,
771 "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
772 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
773 filter->fo_tot_pending, pending);
774 filter->fo_tot_pending -= pending;
776 spin_unlock(&exp->exp_obd->obd_osfs_lock);
779 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
780 int objcount, struct obd_ioobj *obj, int niocount,
781 struct niobuf_local *res, struct obd_trans_info *oti,int rc)
783 if (cmd == OBD_BRW_WRITE)
784 return filter_commitrw_write(exp, oa, objcount, obj, niocount,
786 if (cmd == OBD_BRW_READ)
787 return filter_commitrw_read(exp, oa, objcount, obj, niocount,
793 int filter_brw(int cmd, struct obd_export *exp, struct obdo *oa,
794 struct lov_stripe_md *lsm, obd_count oa_bufs,
795 struct brw_page *pga, struct obd_trans_info *oti)
797 struct obd_ioobj ioo;
798 struct niobuf_local *lnb;
799 struct niobuf_remote *rnb;
804 OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
805 OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
807 if (lnb == NULL || rnb == NULL)
808 GOTO(out, ret = -ENOMEM);
810 for (i = 0; i < oa_bufs; i++) {
811 rnb[i].offset = pga[i].off;
812 rnb[i].len = pga[i].count;
815 obdo_to_ioobj(oa, &ioo);
816 ioo.ioo_bufcnt = oa_bufs;
818 ret = filter_preprw(cmd, exp, oa, 1, &ioo, oa_bufs, rnb, lnb, oti);
822 for (i = 0; i < oa_bufs; i++) {
823 void *virt = kmap(pga[i].pg);
824 obd_off off = pga[i].off & ~PAGE_MASK;
825 void *addr = kmap(lnb[i].page);
827 /* 2 kmaps == vanishingly small deadlock opportunity */
829 if (cmd & OBD_BRW_WRITE)
830 memcpy(addr + off, virt + off, pga[i].count);
832 memcpy(virt + off, addr + off, pga[i].count);
838 ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti, ret);
842 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
844 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));