1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * linux/fs/obdfilter/filter_io.c
6 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
7 * Author: Peter Braam <braam@clusterfs.com>
8 * Author: Andreas Dilger <adilger@clusterfs.com>
9 * Author: Phil Schwan <phil@clusterfs.com>
11 * This file is part of Lustre, http://www.lustre.org.
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 #define DEBUG_SUBSYSTEM S_FILTER
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h> // XXX kill me soon
32 #include <linux/version.h>
34 #include <linux/obd_class.h>
35 #include <linux/lustre_fsfilt.h>
36 #include "filter_internal.h"
38 static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
40 struct address_space *mapping = inode->i_mapping;
42 unsigned long index = lnb->offset >> PAGE_SHIFT;
45 page = grab_cache_page(mapping, index); /* locked page */
47 return lnb->rc = PTR_ERR(page);
49 LASSERT(page->mapping == mapping);
53 if (inode->i_size < lnb->offset + lnb->len - 1)
54 lnb->rc = inode->i_size - lnb->offset;
58 if (PageUptodate(page)) {
63 rc = mapping->a_ops->readpage(NULL, page);
65 CERROR("page index %lu, rc = %d\n", index, rc);
67 page_cache_release(page);
74 static int filter_finish_page_read(struct niobuf_local *lnb)
76 if (lnb->page == NULL)
79 if (PageUptodate(lnb->page))
82 wait_on_page(lnb->page);
83 if (!PageUptodate(lnb->page)) {
84 CERROR("page index %lu/offset "LPX64" not uptodate\n",
85 lnb->page->index, lnb->offset);
86 GOTO(err_page, lnb->rc = -EIO);
88 if (PageError(lnb->page)) {
89 CERROR("page index %lu/offset "LPX64" has error\n",
90 lnb->page->index, lnb->offset);
91 GOTO(err_page, lnb->rc = -EIO);
97 page_cache_release(lnb->page);
102 static struct page *lustre_get_page_write(struct inode *inode,
105 struct address_space *mapping = inode->i_mapping;
109 page = grab_cache_page(mapping, index); /* locked page */
112 /* Note: Called with "O" and "PAGE_SIZE" this is essentially
113 * a no-op for most filesystems, because we write the whole
114 * page. For partial-page I/O this will read in the page.
116 rc = mapping->a_ops->prepare_write(NULL, page, 0, PAGE_SIZE);
118 CERROR("page index %lu, rc = %d\n", index, rc);
121 GOTO(err_unlock, rc);
123 /* XXX not sure if we need this if we are overwriting page */
124 if (PageError(page)) {
125 CERROR("error on page index %lu, rc = %d\n", index, rc);
127 GOTO(err_unlock, rc = -EIO);
134 page_cache_release(page);
138 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
139 int wait_on_page_locked(struct page *page)
141 waitfor_one_page(page);
145 /* We should only change the file mtime (and not the ctime, like
146 * update_inode_times() in generic_file_write()) when we only change data. */
147 static inline void inode_update_time(struct inode *inode, int ctime_too)
149 time_t now = CURRENT_TIME;
150 if (inode->i_mtime == now && (!ctime_too || inode->i_ctime == now))
152 inode->i_mtime = now;
154 inode->i_ctime = now;
155 mark_inode_dirty_sync(inode);
159 static int lustre_commit_write(struct niobuf_local *lnb)
161 struct page *page = lnb->page;
162 unsigned from = lnb->offset & ~PAGE_MASK;
163 unsigned to = from + lnb->len;
164 struct inode *inode = page->mapping->host;
167 LASSERT(to <= PAGE_SIZE);
168 err = page->mapping->a_ops->commit_write(NULL, page, from, to);
169 #warning 2.4 folks: wait_on_page_locked does NOT return its error here.
170 if (!err && IS_SYNC(inode))
171 wait_on_page_locked(page);
172 //SetPageUptodate(page); // the client commit_write will do this
174 SetPageReferenced(page);
176 page_cache_release(page);
180 int filter_get_page_write(struct inode *inode, struct niobuf_local *lnb,
183 unsigned long index = lnb->offset >> PAGE_SHIFT;
184 struct address_space *mapping = inode->i_mapping;
188 //ASSERT_PAGE_INDEX(index, GOTO(err, rc = -EINVAL));
190 page = grab_cache_page_nowait(mapping, index); /* locked page */
192 page = grab_cache_page(mapping, index); /* locked page */
195 /* This page is currently locked, so get a temporary page instead. */
197 CDEBUG(D_INFO, "ino %lu page %ld locked\n", inode->i_ino,index);
198 page = alloc_pages(GFP_KERNEL, 0); /* locked page */
200 CERROR("no memory for a temp page\n");
201 GOTO(err, rc = -ENOMEM);
205 lnb->flags |= N_LOCAL_TEMP_PAGE;
206 } else if (!IS_ERR(page)) {
207 unsigned from = lnb->offset & ~PAGE_MASK, to = from + lnb->len;
210 rc = mapping->a_ops->prepare_write(NULL, page, from, to);
213 CERROR("page index %lu, rc = %d\n", index, rc);
214 GOTO(err_unlock, rc);
216 /* XXX not sure if we need this if we are overwriting page */
217 if (PageError(page)) {
218 CERROR("error on page index %lu, rc = %d\n", index, rc);
220 GOTO(err_unlock, rc = -EIO);
229 page_cache_release(page);
234 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
235 int objcount, struct obd_ioobj *obj,
236 int niocount, struct niobuf_remote *nb,
237 struct niobuf_local *res,
238 struct obd_trans_info *oti)
240 struct obd_run_ctxt saved;
242 struct niobuf_remote *rnb;
243 struct niobuf_local *lnb;
244 struct fsfilt_objinfo *fso;
245 struct dentry *dentry;
247 int rc = 0, i, j, tot_bytes = 0, cleanup_phase = 0;
248 unsigned long now = jiffies;
251 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
252 * When we do this function's dentry cleanup will need to be fixed */
253 LASSERT(objcount == 1);
255 OBD_ALLOC(fso, objcount * sizeof(*fso));
259 memset(res, 0, niocount * sizeof(*res));
261 push_ctxt(&saved, &exp->exp_obd->u.filter.fo_ctxt, NULL);
262 for (i = 0, o = obj; i < objcount; i++, o++) {
263 struct filter_dentry_data *fdd;
264 LASSERT(o->ioo_bufcnt);
266 dentry = filter_oa2dentry(exp->exp_obd, oa);
268 GOTO(cleanup, rc = PTR_ERR(dentry));
270 if (dentry->d_inode == NULL) {
271 CERROR("trying to BRW to non-existent file "LPU64"\n",
274 GOTO(cleanup, rc = -ENOENT);
277 fso[i].fso_dentry = dentry;
278 fso[i].fso_bufcnt = o->ioo_bufcnt;
280 fdd = dentry->d_fsdata;
281 if (fdd == NULL || !atomic_read(&fdd->fdd_open_count))
282 CDEBUG(D_PAGE, "I/O to unopened object "LPU64"\n",
286 if (time_after(jiffies, now + 15 * HZ))
287 CERROR("slow prep setup %lus\n", (jiffies - now) / HZ);
289 for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
290 dentry = fso[i].fso_dentry;
291 inode = dentry->d_inode;
293 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
294 lnb->dentry = dentry;
295 lnb->offset = rnb->offset;
297 lnb->flags = rnb->flags;
298 lnb->start = jiffies;
300 if (inode->i_size <= rnb->offset) {
301 /* If there's no more data, abort early.
302 * lnb->page == NULL and lnb->rc == 0, so it's
303 * easy to detect later. */
306 rc = filter_start_page_read(inode, lnb);
310 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
311 "page err %u@"LPU64" %u/%u %p: rc %d\n",
312 lnb->len, lnb->offset, j, o->ioo_bufcnt,
318 tot_bytes += lnb->rc;
319 if (lnb->rc < lnb->len) {
320 /* short read, be sure to wait on it */
327 if (time_after(jiffies, now + 15 * HZ))
328 CERROR("slow prep get page %lus\n", (jiffies - now) / HZ);
330 lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_READ_BYTES,
332 while (lnb-- > res) {
333 rc = filter_finish_page_read(lnb);
335 CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
336 lnb->offset, (int)(lnb - res), lnb->dentry, rc);
342 if (time_after(jiffies, now + 15 * HZ))
343 CERROR("slow prep finish page %lus\n", (jiffies - now) / HZ);
348 switch (cleanup_phase) {
350 for (lnb = res; lnb < (res + niocount); lnb++) {
352 page_cache_release(lnb->page);
354 if (res->dentry != NULL)
357 CERROR("NULL dentry in cleanup -- tell CFS\n");
360 OBD_FREE(fso, objcount * sizeof(*fso));
361 pop_ctxt(&saved, &exp->exp_obd->u.filter.fo_ctxt, NULL);
366 /* We need to balance prepare_write() calls with commit_write() calls.
367 * If the page has been prepared, but we have no data for it, we don't
368 * want to overwrite valid data on disk, but we still need to zero out
369 * data for space which was newly allocated. Like part of what happens
370 * in __block_prepare_write() for newly allocated blocks.
372 * XXX currently __block_prepare_write() creates buffers for all the
373 * pages, and the filesystems mark these buffers as BH_New if they
374 * were newly allocated from disk. We use the BH_New flag similarly. */
375 static int filter_commit_write(struct niobuf_local *lnb, int err)
377 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
379 unsigned block_start, block_end;
380 struct buffer_head *bh, *head = lnb->page->buffers;
381 unsigned blocksize = head->b_size;
383 /* debugging: just seeing if this ever happens */
384 CDEBUG(err == -ENOSPC ? D_INODE : D_ERROR,
385 "called for ino %lu:%lu on err %d\n",
386 lnb->page->mapping->host->i_ino, lnb->page->index, err);
388 /* Currently one buffer per page, but in the future... */
389 for (bh = head, block_start = 0; bh != head || !block_start;
390 block_start = block_end, bh = bh->b_this_page) {
391 block_end = block_start + blocksize;
392 if (buffer_new(bh)) {
393 memset(kmap(lnb->page) + block_start, 0,
400 return lustre_commit_write(lnb);
403 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
404 * on mulitple inodes. That isn't all, because there still exists the
405 * possibility of a truncate starting a new transaction while holding the ext3
406 * rwsem = write while some writes (which have started their transactions here)
407 * blocking on the ext3 rwsem = read => lock inversion.
409 * The handling gets very ugly when dealing with locked pages. It may be easier
410 * to just get rid of the locked page code (which has problems of its own) and
411 * either discover we do not need it anymore (i.e. it was a symptom of another
412 * bug) or ensure we get the page locks in an appropriate order. */
413 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
414 int objcount, struct obd_ioobj *obj,
415 int niocount, struct niobuf_remote *nb,
416 struct niobuf_local *res,
417 struct obd_trans_info *oti)
419 struct obd_run_ctxt saved;
421 struct niobuf_remote *rnb;
422 struct niobuf_local *lnb;
423 struct fsfilt_objinfo *fso;
424 struct dentry *dentry;
425 int pglocked = 0, rc = 0, i, j, tot_bytes = 0;
426 unsigned long now = jiffies;
428 LASSERT(objcount == 1);
430 OBD_ALLOC(fso, objcount * sizeof(*fso));
434 memset(res, 0, niocount * sizeof(*res));
436 push_ctxt(&saved, &exp->exp_obd->u.filter.fo_ctxt, NULL);
437 for (i = 0, o = obj; i < objcount; i++, o++) {
438 struct filter_dentry_data *fdd;
439 LASSERT(o->ioo_bufcnt);
441 dentry = filter_oa2dentry(exp->exp_obd, oa);
443 GOTO(out_objinfo, rc = PTR_ERR(dentry));
445 if (dentry->d_inode == NULL) {
446 CERROR("trying to BRW to non-existent file "LPU64"\n",
449 GOTO(out_objinfo, rc = -ENOENT);
452 fso[i].fso_dentry = dentry;
453 fso[i].fso_bufcnt = o->ioo_bufcnt;
455 down(&dentry->d_inode->i_sem);
456 fdd = dentry->d_fsdata;
457 if (fdd == NULL || !atomic_read(&fdd->fdd_open_count))
458 CDEBUG(D_PAGE, "I/O to unopened object "LPU64"\n",
462 if (time_after(jiffies, now + 15 * HZ))
463 CERROR("slow prep setup %lus\n", (jiffies - now) / HZ);
465 LASSERT(oti != NULL);
466 oti->oti_handle = fsfilt_brw_start(exp->exp_obd, objcount, fso,
468 if (IS_ERR(oti->oti_handle)) {
469 rc = PTR_ERR(oti->oti_handle);
470 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
471 "error starting transaction: rc = %d\n", rc);
472 oti->oti_handle = NULL;
473 GOTO(out_objinfo, rc);
476 for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
477 dentry = fso[i].fso_dentry;
478 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
480 lnb->dentry = dentry;
482 lnb->dentry = dget(dentry);
484 lnb->offset = rnb->offset;
486 lnb->flags = rnb->flags;
487 lnb->start = jiffies;
489 rc = filter_get_page_write(dentry->d_inode, lnb,
492 up(&dentry->d_inode->i_sem);
495 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
496 "page err %u@"LPU64" %u/%u %p: rc %d\n",
497 lnb->len, lnb->offset, j, o->ioo_bufcnt,
502 tot_bytes += lnb->len;
506 if (time_after(jiffies, now + 15 * HZ))
507 CERROR("slow prep get page %lus\n", (jiffies - now) / HZ);
509 lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
514 OBD_FREE(fso, objcount * sizeof(*fso));
515 /* we saved the journal handle into oti->oti_handle instead */
516 current->journal_info = NULL;
517 pop_ctxt(&saved, &exp->exp_obd->u.filter.fo_ctxt, NULL);
521 while (lnb-- > res) {
522 filter_commit_write(lnb, rc);
523 up(&lnb->dentry->d_inode->i_sem);
526 filter_finish_transno(exp, oti, rc);
527 fsfilt_commit(exp->exp_obd,
528 filter_parent(exp->exp_obd,S_IFREG,obj->ioo_id)->d_inode,
530 goto out; /* dropped the dentry refs already (one per page) */
533 for (i = 0; i < objcount && fso[i].fso_dentry; i++) {
534 up(&fso[i].fso_dentry->d_inode->i_sem);
535 f_dput(fso[i].fso_dentry);
540 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
541 int objcount, struct obd_ioobj *obj, int niocount,
542 struct niobuf_remote *nb, struct niobuf_local *res,
543 struct obd_trans_info *oti)
545 if (cmd == OBD_BRW_WRITE)
546 return filter_preprw_write(cmd, exp, oa, objcount, obj,
547 niocount, nb, res, oti);
549 if (cmd == OBD_BRW_READ)
550 return filter_preprw_read(cmd, exp, oa, objcount, obj,
551 niocount, nb, res, oti);
558 /* It is highly unlikely that we would ever get an error here. The page we want
559 * to get was previously locked, so it had to have already allocated the space,
560 * and we were just writing over the same data, so there would be no hole in the
563 * XXX: possibility of a race with truncate could exist, need to check that.
564 * There are no guarantees w.r.t. write order even on a local filesystem,
565 * although the normal response would be to return the number of bytes
566 * successfully written and leave the rest to the app. */
567 static int filter_write_locked_page(struct niobuf_local *lnb)
570 void *lpage_addr, *lnb_addr;
574 lpage = lustre_get_page_write(lnb->dentry->d_inode, lnb->page->index);
577 CERROR("error getting locked page index %ld: rc = %d\n",
578 lnb->page->index, rc);
580 lustre_commit_write(lnb);
584 /* 2 kmaps == vanishingly small deadlock opportunity */
585 lpage_addr = kmap(lpage);
586 lnb_addr = kmap(lnb->page);
588 memcpy(lpage_addr, lnb_addr, PAGE_SIZE);
593 page_cache_release(lnb->page);
596 rc = lustre_commit_write(lnb);
598 CERROR("error committing locked page %ld: rc = %d\n",
599 lnb->page->index, rc);
603 static int filter_commitrw_read(struct obd_export *exp, int objcount,
604 struct obd_ioobj *obj, int niocount,
605 struct niobuf_local *res,
606 struct obd_trans_info *oti)
609 struct niobuf_local *lnb;
613 for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
614 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
615 if (lnb->page != NULL)
616 page_cache_release(lnb->page);
619 if (res->dentry != NULL)
625 filter_commitrw_write(int cmd, struct obd_export *exp, struct obdo *oa,
626 int objcount, struct obd_ioobj *obj, int niocount,
627 struct niobuf_local *res, struct obd_trans_info *oti)
629 struct obd_run_ctxt saved;
631 struct niobuf_local *lnb;
632 struct obd_device *obd = exp->exp_obd;
633 int found_locked = 0, rc = 0, i;
634 int nested_trans = current->journal_info != NULL;
635 unsigned long now = jiffies; /* DEBUGGING OST TIMEOUTS */
638 push_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
640 if (cmd & OBD_BRW_WRITE) {
642 LASSERT(current->journal_info == NULL ||
643 current->journal_info == oti->oti_handle);
644 current->journal_info = oti->oti_handle;
647 for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
651 /* If all of the page reads were beyond EOF, let's pretend
652 * this read didn't really happen at all. */
653 if (lnb->dentry == NULL) {
654 oa->o_valid = OBD_MD_FLID|(oa->o_valid&OBD_MD_FLCKSUM);
658 inode = igrab(lnb->dentry->d_inode);
660 if (cmd & OBD_BRW_WRITE) {
661 /* FIXME: MULTI OBJECT BRW */
662 if (oa && oa->o_valid & (OBD_MD_FLMTIME|OBD_MD_FLCTIME))
663 obdo_refresh_inode(inode, oa, OBD_MD_FLATIME |
667 inode_update_time(lnb->dentry->d_inode, 1);
668 } else if (oa && oa->o_valid & OBD_MD_FLATIME) {
669 /* Note that we don't necessarily write this to disk */
670 obdo_refresh_inode(inode, oa, OBD_MD_FLATIME);
673 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
674 if (lnb->page == NULL) {
678 if (lnb->flags & N_LOCAL_TEMP_PAGE) {
683 if (time_after(jiffies, lnb->start + 15 * HZ))
684 CERROR("slow commitrw %lusi (%lus)\n",
685 (jiffies - lnb->start) / HZ,
686 (jiffies - now) / HZ);
688 if (cmd & OBD_BRW_WRITE) {
689 int err = filter_commit_write(lnb, 0);
694 page_cache_release(lnb->page);
698 if (time_after(jiffies, lnb->start + 15 * HZ))
699 CERROR("slow commit_write %lus (%lus)\n",
700 (jiffies - lnb->start) / HZ,
701 (jiffies - now) / HZ);
704 /* FIXME: MULTI OBJECT BRW */
706 oa->o_valid = OBD_MD_FLID|(oa->o_valid&OBD_MD_FLCKSUM);
707 obdo_from_inode(oa, inode, FILTER_VALID_FLAGS);
710 if (cmd & OBD_BRW_WRITE)
716 for (i = 0, o = obj, lnb = res; found_locked > 0 && i < objcount;
720 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
722 if (!(lnb->flags & N_LOCAL_TEMP_PAGE))
725 if (time_after(jiffies, lnb->start + 15 * HZ))
726 CERROR("slow commitrw locked %lus (%lus)\n",
727 (jiffies - lnb->start) / HZ,
728 (jiffies - now) / HZ);
730 err = filter_write_locked_page(lnb);
736 if (time_after(jiffies, lnb->start + 15 * HZ))
737 CERROR("slow commit_write locked %lus (%lus)\n",
738 (jiffies - lnb->start) / HZ,
739 (jiffies - now) / HZ);
743 if (cmd & OBD_BRW_WRITE) {
744 /* We just want any dentry for the commit, for now */
745 struct dentry *dparent = filter_parent(obd, S_IFREG, 0);
748 rc = filter_finish_transno(exp, oti, rc);
749 err = fsfilt_commit(obd, dparent->d_inode, oti->oti_handle,
754 LASSERT(oti->oti_transno <= obd->obd_last_committed);
755 if (time_after(jiffies, now + 15 * HZ))
756 CERROR("slow commitrw commit %lus\n", (jiffies-now)/HZ);
759 LASSERT(nested_trans || current->journal_info == NULL);
760 pop_ctxt(&saved, &obd->u.filter.fo_ctxt, NULL);
764 /* XXX needs to trickle its oa down */
765 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
766 int objcount, struct obd_ioobj *obj, int niocount,
767 struct niobuf_local *res, struct obd_trans_info *oti)
769 if (cmd == OBD_BRW_WRITE)
770 return filter_commitrw_write(cmd, exp, oa, objcount, obj,
772 if (cmd == OBD_BRW_READ)
773 return filter_commitrw_read(exp, objcount, obj, niocount,
779 int filter_brw(int cmd, struct lustre_handle *conn, struct obdo *oa,
780 struct lov_stripe_md *lsm, obd_count oa_bufs,
781 struct brw_page *pga, struct obd_trans_info *oti)
783 struct obd_export *exp;
784 struct obd_ioobj ioo;
785 struct niobuf_local *lnb;
786 struct niobuf_remote *rnb;
791 exp = class_conn2export(conn);
793 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",conn->cookie);
797 OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
798 OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
800 if (lnb == NULL || rnb == NULL)
801 GOTO(out, ret = -ENOMEM);
803 for (i = 0; i < oa_bufs; i++) {
804 rnb[i].offset = pga[i].off;
805 rnb[i].len = pga[i].count;
808 ioo.ioo_id = oa->o_id;
810 ioo.ioo_type = oa->o_mode & S_IFMT;
811 ioo.ioo_bufcnt = oa_bufs;
813 ret = filter_preprw(cmd, exp, oa, 1, &ioo, oa_bufs, rnb, lnb, oti);
817 for (i = 0; i < oa_bufs; i++) {
818 void *virt = kmap(pga[i].pg);
819 obd_off off = pga[i].off & ~PAGE_MASK;
820 void *addr = kmap(lnb[i].page);
822 /* 2 kmaps == vanishingly small deadlock opportunity */
824 if (cmd & OBD_BRW_WRITE)
825 memcpy(addr + off, virt + off, pga[i].count);
827 memcpy(virt + off, addr + off, pga[i].count);
833 ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti);
837 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
839 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));
840 class_export_put(exp);