Whamcloud - gitweb
Land b_smallfix onto HEAD (20040423_1603)
[fs/lustre-release.git] / lustre / obdfilter / filter_io.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  linux/fs/obdfilter/filter_io.c
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *   Author: Peter Braam <braam@clusterfs.com>
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *   Author: Phil Schwan <phil@clusterfs.com>
10  *
11  *   This file is part of Lustre, http://www.lustre.org.
12  *
13  *   Lustre is free software; you can redistribute it and/or
14  *   modify it under the terms of version 2 of the GNU General Public
15  *   License as published by the Free Software Foundation.
16  *
17  *   Lustre is distributed in the hope that it will be useful,
18  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *   GNU General Public License for more details.
21  *
22  *   You should have received a copy of the GNU General Public License
23  *   along with Lustre; if not, write to the Free Software
24  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #define DEBUG_SUBSYSTEM S_FILTER
28
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h> // XXX kill me soon
32 #include <linux/version.h>
33
34 #include <linux/obd_class.h>
35 #include <linux/lustre_fsfilt.h>
36 #include "filter_internal.h"
37
38 static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
39 {
40         struct address_space *mapping = inode->i_mapping;
41         struct page *page;
42         unsigned long index = lnb->offset >> PAGE_SHIFT;
43         int rc;
44
45         page = grab_cache_page(mapping, index); /* locked page */
46         if (page == NULL)
47                 return lnb->rc = -ENOMEM;
48
49         LASSERT(page->mapping == mapping);
50
51         lnb->page = page;
52
53         if (inode->i_size < lnb->offset + lnb->len - 1)
54                 lnb->rc = inode->i_size - lnb->offset;
55         else
56                 lnb->rc = lnb->len;
57
58         if (PageUptodate(page)) {
59                 unlock_page(page);
60                 return 0;
61         }
62
63         rc = mapping->a_ops->readpage(NULL, page);
64         if (rc < 0) {
65                 CERROR("page index %lu, rc = %d\n", index, rc);
66                 lnb->page = NULL;
67                 page_cache_release(page);
68                 return lnb->rc = rc;
69         }
70
71         return 0;
72 }
73
74 static int filter_finish_page_read(struct niobuf_local *lnb)
75 {
76         if (lnb->page == NULL)
77                 return 0;
78
79         if (PageUptodate(lnb->page))
80                 return 0;
81
82         wait_on_page(lnb->page);
83         if (!PageUptodate(lnb->page)) {
84                 CERROR("page index %lu/offset "LPX64" not uptodate\n",
85                        lnb->page->index, lnb->offset);
86                 GOTO(err_page, lnb->rc = -EIO);
87         }
88         if (PageError(lnb->page)) {
89                 CERROR("page index %lu/offset "LPX64" has error\n",
90                        lnb->page->index, lnb->offset);
91                 GOTO(err_page, lnb->rc = -EIO);
92         }
93
94         return 0;
95
96 err_page:
97         page_cache_release(lnb->page);
98         lnb->page = NULL;
99         return lnb->rc;
100 }
101
102 /* Grab the dirty and seen grant announcements from the incoming obdo.
103  * We will later calculate the clients new grant and return it.
104  * Caller must hold osfs lock */
105 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
106 {
107         struct filter_export_data *fed;
108         struct obd_device *obd = exp->exp_obd;
109         static unsigned long last_msg;
110         static int last_count;
111         int mask = D_CACHE;
112         ENTRY;
113
114         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
115
116         if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
117                                         (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
118                 oa->o_valid &= ~OBD_MD_FLGRANT;
119                 EXIT;
120                 return;
121         }
122
123         fed = &exp->exp_filter_data;
124
125         /* Don't print this to the console the first time it happens, since
126          * it can happen legitimately on occasion, but only rarely. */
127         if (time_after(jiffies, last_msg + 60 * HZ)) {
128                 last_count = 0;
129                 last_msg = jiffies;
130         }
131         if ((last_count & (-last_count)) == last_count)
132                 mask = D_WARNING;
133         last_count++;
134
135         /* Add some margin, since there is a small race if other RPCs arrive
136          * out-or-order and have already consumed some grant.  We want to
137          * leave this here in case there is a large error in accounting. */
138         CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ? mask:D_CACHE,
139                "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
140                obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
141                oa->o_dropped, fed->fed_grant);
142
143         /* Update our accounting now so that statfs takes it into account.
144          * Note that fed_dirty is only approximate and can become incorrect
145          * if RPCs arrive out-of-order.  No important calculations depend
146          * on fed_dirty however. */
147         obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
148         if (fed->fed_grant < oa->o_dropped) {
149                 CERROR("%s: cli %s/%p reports %u dropped > fed_grant %lu\n",
150                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
151                        oa->o_dropped, fed->fed_grant);
152                 oa->o_dropped = 0;
153         }
154         if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
155                 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
156                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
157                        oa->o_dropped, obd->u.filter.fo_tot_granted);
158                 oa->o_dropped = 0;
159         }
160         obd->u.filter.fo_tot_granted -= oa->o_dropped;
161         fed->fed_grant -= oa->o_dropped;
162         fed->fed_dirty = oa->o_dirty;
163         EXIT;
164 }
165
166 #define GRANT_FOR_LLOG(obd) 16
167
168 /* Figure out how much space is available between what we've granted
169  * and what remains in the filesystem.  Compensate for ext3 indirect
170  * block overhead when computing how much free space is left ungranted.
171  *
172  * Caller must hold obd_osfs_lock. */
173 obd_size filter_grant_space_left(struct obd_export *exp)
174 {
175         struct obd_device *obd = exp->exp_obd;
176         int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
177         obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
178         int rc, statfs_done = 0;
179
180         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
181
182         if (time_before(obd->obd_osfs_age, jiffies - HZ)) {
183 restat:
184                 rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, jiffies + 1);
185                 if (rc) /* N.B. statfs can't really fail */
186                         RETURN(0);
187                 statfs_done = 1;
188         }
189
190         avail = obd->obd_osfs.os_bavail;
191         left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
192         if (left > GRANT_FOR_LLOG(obd)) {
193                 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
194         } else {
195                 left = 0 /* << blockbits */;
196         }
197
198         if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
199                 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
200                 goto restat;
201         }
202
203         if (left >= tot_granted) {
204                 left -= tot_granted;
205         } else {
206                 static unsigned long next;
207                 if (left < tot_granted - obd->u.filter.fo_tot_pending &&
208                     time_after(jiffies, next)) {
209                         spin_unlock(&obd->obd_osfs_lock);
210                         CERROR("%s: cli %s/%p grant "LPU64" > available "
211                                LPU64" and pending "LPU64"\n", obd->obd_name,
212                                exp->exp_client_uuid.uuid, exp, tot_granted,
213                                left, obd->u.filter.fo_tot_pending);
214                         if (next == 0)
215                                 portals_debug_dumplog();
216                         next = jiffies + 20 * HZ;
217                         spin_lock(&obd->obd_osfs_lock);
218                 }
219                 left = 0;
220         }
221
222         CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
223                " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
224                exp->exp_client_uuid.uuid, exp,
225                obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
226                tot_granted, left, obd->u.filter.fo_tot_pending);
227
228         return left;
229 }
230
231 /* Calculate how much grant space to allocate to this client, based on how
232  * much space is currently free and how much of that is already granted.
233  *
234  * Caller must hold obd_osfs_lock. */
235 long filter_grant(struct obd_export *exp, obd_size current_grant,
236                   obd_size want, obd_size fs_space_left)
237 {
238         struct obd_device *obd = exp->exp_obd;
239         struct filter_export_data *fed = &exp->exp_filter_data;
240         int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
241         __u64 grant = 0;
242
243         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
244
245         /* Grant some fraction of the client's requested grant space so that
246          * they are not always waiting for write credits (not all of it to
247          * avoid overgranting in face of multiple RPCs in flight).  This
248          * essentially will be able to control the OSC_MAX_RIF for a client.
249          *
250          * If we do have a large disparity between what the client thinks it
251          * has and what we think it has, don't grant very much and let the
252          * client consume its grant first.  Either it just has lots of RPCs
253          * in flight, or it was evicted and its grants will soon be used up. */
254         if (current_grant < want &&
255             current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
256                 grant = min((want >> blockbits) / 2,
257                             (fs_space_left >> blockbits) / 8);
258                 grant <<= blockbits;
259
260                 if (grant) {
261                         if (grant > FILTER_GRANT_CHUNK)
262                                 grant = FILTER_GRANT_CHUNK;
263
264                         obd->u.filter.fo_tot_granted += grant;
265                         fed->fed_grant += grant;
266                 }
267         }
268
269         CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
270                obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
271         CDEBUG(D_CACHE,
272                "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
273                " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
274                exp, obd->u.filter.fo_tot_dirty,
275                obd->u.filter.fo_tot_granted, obd->obd_num_exports);
276
277         return grant;
278 }
279
280 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
281                               int objcount, struct obd_ioobj *obj,
282                               int niocount, struct niobuf_remote *nb,
283                               struct niobuf_local *res,
284                               struct obd_trans_info *oti)
285 {
286         struct obd_device *obd = exp->exp_obd;
287         struct obd_run_ctxt saved;
288         struct obd_ioobj *o;
289         struct niobuf_remote *rnb;
290         struct niobuf_local *lnb = NULL;
291         struct fsfilt_objinfo *fso;
292         struct dentry *dentry;
293         struct inode *inode;
294         int rc = 0, i, j, tot_bytes = 0, cleanup_phase = 0;
295         unsigned long now = jiffies;
296         ENTRY;
297
298         /* We are currently not supporting multi-obj BRW_READ RPCS at all.
299          * When we do this function's dentry cleanup will need to be fixed */
300         LASSERT(objcount == 1);
301         LASSERT(obj->ioo_bufcnt > 0);
302
303         if (oa && oa->o_valid & OBD_MD_FLGRANT) {
304                 spin_lock(&obd->obd_osfs_lock);
305                 filter_grant_incoming(exp, oa);
306
307 #if 0
308                 /* Reads do not increase grants */
309                 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
310                                            filter_grant_space_left(exp));
311 #else
312                 oa->o_grant = 0;
313 #endif
314                 spin_unlock(&obd->obd_osfs_lock);
315         }
316
317         OBD_ALLOC(fso, objcount * sizeof(*fso));
318         if (fso == NULL)
319                 RETURN(-ENOMEM);
320
321         memset(res, 0, niocount * sizeof(*res));
322
323         push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
324         for (i = 0, o = obj; i < objcount; i++, o++) {
325                 LASSERT(o->ioo_bufcnt);
326
327                 dentry = filter_oa2dentry(obd, oa);
328                 if (IS_ERR(dentry))
329                         GOTO(cleanup, rc = PTR_ERR(dentry));
330
331                 if (dentry->d_inode == NULL) {
332                         CERROR("trying to BRW to non-existent file "LPU64"\n",
333                                o->ioo_id);
334                         f_dput(dentry);
335                         GOTO(cleanup, rc = -ENOENT);
336                 }
337
338                 fso[i].fso_dentry = dentry;
339                 fso[i].fso_bufcnt = o->ioo_bufcnt;
340         }
341
342         if (time_after(jiffies, now + 15 * HZ))
343                 CERROR("slow preprw_read setup %lus\n", (jiffies - now) / HZ);
344         else
345                 CDEBUG(D_INFO, "preprw_read setup: %lu jiffies\n",
346                        (jiffies - now));
347
348         for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
349                 dentry = fso[i].fso_dentry;
350                 inode = dentry->d_inode;
351
352                 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
353                         lnb->dentry = dentry;
354                         lnb->offset = rnb->offset;
355                         lnb->len    = rnb->len;
356                         lnb->flags  = rnb->flags;
357
358                         if (inode->i_size <= rnb->offset) {
359                                 /* If there's no more data, abort early.
360                                  * lnb->page == NULL and lnb->rc == 0, so it's
361                                  * easy to detect later. */
362                                 break;
363                         } else {
364                                 rc = filter_start_page_read(inode, lnb);
365                         }
366
367                         if (rc) {
368                                 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
369                                        "page err %u@"LPU64" %u/%u %p: rc %d\n",
370                                        lnb->len, lnb->offset, j, o->ioo_bufcnt,
371                                        dentry, rc);
372                                 cleanup_phase = 1;
373                                 GOTO(cleanup, rc);
374                         }
375
376                         tot_bytes += lnb->rc;
377                         if (lnb->rc < lnb->len) {
378                                 /* short read, be sure to wait on it */
379                                 lnb++;
380                                 break;
381                         }
382                 }
383         }
384
385         if (time_after(jiffies, now + 15 * HZ))
386                 CERROR("slow start_page_read %lus\n", (jiffies - now) / HZ);
387         else
388                 CDEBUG(D_INFO, "start_page_read: %lu jiffies\n",
389                        (jiffies - now));
390
391         lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
392         while (lnb-- > res) {
393                 rc = filter_finish_page_read(lnb);
394                 if (rc) {
395                         CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
396                                lnb->offset, (int)(lnb - res), lnb->dentry, rc);
397                         cleanup_phase = 1;
398                         GOTO(cleanup, rc);
399                 }
400         }
401
402         if (time_after(jiffies, now + 15 * HZ))
403                 CERROR("slow finish_page_read %lus\n", (jiffies - now) / HZ);
404         else
405                 CDEBUG(D_INFO, "finish_page_read: %lu jiffies\n",
406                        (jiffies - now));
407
408         filter_tally_read(&exp->exp_obd->u.filter, res, niocount);
409
410         EXIT;
411
412  cleanup:
413         switch (cleanup_phase) {
414         case 1:
415                 for (lnb = res; lnb < (res + niocount); lnb++) {
416                         if (lnb->page)
417                                 page_cache_release(lnb->page);
418                 }
419                 if (res->dentry != NULL)
420                         f_dput(res->dentry);
421                 else
422                         CERROR("NULL dentry in cleanup -- tell CFS\n");
423         case 0:
424                 OBD_FREE(fso, objcount * sizeof(*fso));
425                 pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
426         }
427         return rc;
428 }
429
430 /* When clients have dirtied as much space as they've been granted they
431  * fall through to sync writes.  These sync writes haven't been expressed
432  * in grants and need to error with ENOSPC when there isn't room in the
433  * filesystem for them after grants are taken into account.  However,
434  * writeback of the dirty data that was already granted space can write
435  * right on through.
436  *
437  * Caller must hold obd_osfs_lock. */
438 static int filter_grant_check(struct obd_export *exp, int objcount,
439                               struct fsfilt_objinfo *fso, int niocount,
440                               struct niobuf_remote *rnb,
441                               struct niobuf_local *lnb, obd_size *left,
442                               struct inode *inode)
443 {
444         struct filter_export_data *fed = &exp->exp_filter_data;
445         int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
446         unsigned long used = 0, ungranted = 0, using;
447         int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
448
449         LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
450
451         for (obj = 0; obj < objcount; obj++) {
452                 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
453                         int tmp, bytes;
454
455                         /* FIXME: this is calculated with PAGE_SIZE on client */
456                         bytes = rnb[n].len;
457                         bytes += rnb[n].offset & (blocksize - 1);
458                         tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
459                         if (tmp)
460                                 bytes += blocksize - tmp;
461
462                         if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
463                                 if (fed->fed_grant < used + bytes) {
464                                         CDEBUG(D_CACHE,
465                                                "%s: cli %s/%p claims %ld+%d "
466                                                "GRANT, real grant %lu idx %d\n",
467                                                exp->exp_obd->obd_name,
468                                                exp->exp_client_uuid.uuid, exp,
469                                                used, bytes, fed->fed_grant, n);
470                                         mask = D_ERROR;
471                                 } else {
472                                         used += bytes;
473                                         rnb[n].flags |= OBD_BRW_GRANTED;
474                                         lnb[n].lnb_grant_used = bytes;
475                                         CDEBUG(0, "idx %d used=%lu\n", n, used);
476                                         rc = 0;
477                                         continue;
478                                 }
479                         }
480                         if (*left > ungranted) {
481                                 /* if enough space, pretend it was granted */
482                                 ungranted += bytes;
483                                 rnb[n].flags |= OBD_BRW_GRANTED;
484                                 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
485                                 rc = 0;
486                                 continue;
487                         }
488
489                         /* We can't check for already-mapped blocks here, as
490                          * it requires dropping the osfs lock to do the bmap.
491                          * Instead, we return ENOSPC and in that case we need
492                          * to go through and verify if all of the blocks not
493                          * marked BRW_GRANTED are already mapped and we can
494                          * ignore this error. */
495                         lnb[n].rc = -ENOSPC;
496                         rnb[n].flags &= OBD_BRW_GRANTED;
497                         CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
498                                exp->exp_obd->obd_name,
499                                exp->exp_client_uuid.uuid, exp, n, bytes);
500                 }
501         }
502
503         /* Now substract what client have used already.  We don't subtract
504          * this from the tot_granted yet, so that other client's can't grab
505          * that space before we have actually allocated our blocks.  That
506          * happens in filter_grant_commit() after the writes are done. */
507         *left -= ungranted;
508         fed->fed_grant -= used;
509         fed->fed_pending += used;
510         exp->exp_obd->u.filter.fo_tot_pending += used;
511
512         CDEBUG(mask,
513                "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
514                exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
515                ungranted, fed->fed_grant, fed->fed_dirty);
516
517         /* Rough calc in case we don't refresh cached statfs data */
518         using = (used + ungranted + 1 ) >>
519                 exp->exp_obd->u.filter.fo_sb->s_blocksize_bits;
520         if (exp->exp_obd->obd_osfs.os_bavail > using)
521                 exp->exp_obd->obd_osfs.os_bavail -= using;
522         else
523                 exp->exp_obd->obd_osfs.os_bavail = 0;
524
525         if (fed->fed_dirty < used) {
526                 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
527                        exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
528                        used, fed->fed_dirty);
529                 used = fed->fed_dirty;
530         }
531         exp->exp_obd->u.filter.fo_tot_dirty -= used;
532         fed->fed_dirty -= used;
533
534         return rc;
535 }
536
537 static int filter_start_page_write(struct inode *inode,
538                                    struct niobuf_local *lnb)
539 {
540         struct page *page = alloc_pages(GFP_HIGHUSER, 0);
541         if (page == NULL) {
542                 CERROR("no memory for a temp page\n");
543                 RETURN(lnb->rc = -ENOMEM);
544         }
545         POISON_PAGE(page, 0xf1);
546         if (lnb->len != PAGE_SIZE) {
547                 memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
548                 kunmap(page);
549         }
550         page->index = lnb->offset >> PAGE_SHIFT;
551         lnb->page = page;
552
553         return 0;
554 }
555
556 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
557  * on mulitple inodes.  That isn't all, because there still exists the
558  * possibility of a truncate starting a new transaction while holding the ext3
559  * rwsem = write while some writes (which have started their transactions here)
560  * blocking on the ext3 rwsem = read => lock inversion.
561  *
562  * The handling gets very ugly when dealing with locked pages.  It may be easier
563  * to just get rid of the locked page code (which has problems of its own) and
564  * either discover we do not need it anymore (i.e. it was a symptom of another
565  * bug) or ensure we get the page locks in an appropriate order. */
566 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
567                                int objcount, struct obd_ioobj *obj,
568                                int niocount, struct niobuf_remote *nb,
569                                struct niobuf_local *res,
570                                struct obd_trans_info *oti)
571 {
572         struct obd_run_ctxt saved;
573         struct niobuf_remote *rnb;
574         struct niobuf_local *lnb;
575         struct fsfilt_objinfo fso;
576         struct dentry *dentry;
577         obd_size left;
578         unsigned long now = jiffies;
579         int rc = 0, i, tot_bytes = 0, cleanup_phase = 1;
580         ENTRY;
581         LASSERT(objcount == 1);
582         LASSERT(obj->ioo_bufcnt > 0);
583
584         memset(res, 0, niocount * sizeof(*res));
585
586         push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
587         dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
588                                    obj->ioo_id);
589         if (IS_ERR(dentry))
590                 GOTO(cleanup, rc = PTR_ERR(dentry));
591
592         if (dentry->d_inode == NULL) {
593                 CERROR("trying to BRW to non-existent file "LPU64"\n",
594                        obj->ioo_id);
595                 f_dput(dentry);
596                 GOTO(cleanup, rc = -ENOENT);
597         }
598
599         fso.fso_dentry = dentry;
600         fso.fso_bufcnt = obj->ioo_bufcnt;
601
602         if (time_after(jiffies, now + 15 * HZ))
603                 CERROR("slow preprw_write setup %lus\n", (jiffies - now) / HZ);
604         else
605                 CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
606                        (jiffies - now));
607
608         spin_lock(&exp->exp_obd->obd_osfs_lock);
609         if (oa)
610                 filter_grant_incoming(exp, oa);
611         cleanup_phase = 0;
612
613         left = filter_grant_space_left(exp);
614
615         rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
616                                 &left, dentry->d_inode);
617         if (oa && oa->o_valid & OBD_MD_FLGRANT)
618                 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
619
620         spin_unlock(&exp->exp_obd->obd_osfs_lock);
621
622         if (rc) {
623                 f_dput(dentry);
624                 GOTO(cleanup, rc);
625         }
626
627         for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
628              i++, lnb++, rnb++) {
629                 /* We still set up for ungranted pages so that granted pages
630                  * can be written to disk as they were promised, and portals
631                  * needs to keep the pages all aligned properly. */ 
632                 lnb->dentry = dentry;
633                 lnb->offset = rnb->offset;
634                 lnb->len    = rnb->len;
635                 lnb->flags  = rnb->flags;
636
637                 rc = filter_start_page_write(dentry->d_inode, lnb);
638                 if (rc) {
639                         CERROR("page err %u@"LPU64" %u/%u %p: rc %d\n",
640                                lnb->len, lnb->offset,
641                                i, obj->ioo_bufcnt, dentry, rc);
642                         while (lnb-- > res)
643                                 __free_pages(lnb->page, 0);
644                         f_dput(dentry);
645                         GOTO(cleanup, rc);
646                 }
647                 if (lnb->rc == 0)
648                         tot_bytes += lnb->len;
649         }
650
651         if (time_after(jiffies, now + 15 * HZ))
652                 CERROR("slow start_page_write %lus\n", (jiffies - now) / HZ);
653         else
654                 CDEBUG(D_INFO, "start_page_write: %lu jiffies\n",
655                        (jiffies - now));
656
657         lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
658                             tot_bytes);
659         EXIT;
660 cleanup:
661         switch(cleanup_phase) {
662         case 1:
663                 spin_lock(&exp->exp_obd->obd_osfs_lock);
664                 if (oa)
665                         filter_grant_incoming(exp, oa);
666                 spin_unlock(&exp->exp_obd->obd_osfs_lock);
667         default: ;
668         }
669         pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
670         return rc;
671 }
672
673 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
674                   int objcount, struct obd_ioobj *obj, int niocount,
675                   struct niobuf_remote *nb, struct niobuf_local *res,
676                   struct obd_trans_info *oti)
677 {
678         if (cmd == OBD_BRW_WRITE)
679                 return filter_preprw_write(cmd, exp, oa, objcount, obj,
680                                            niocount, nb, res, oti);
681
682         if (cmd == OBD_BRW_READ)
683                 return filter_preprw_read(cmd, exp, oa, objcount, obj,
684                                           niocount, nb, res, oti);
685
686         LBUG();
687         return -EPROTO;
688 }
689
690 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
691                                 int objcount, struct obd_ioobj *obj,
692                                 int niocount, struct niobuf_local *res,
693                                 struct obd_trans_info *oti, int rc)
694 {
695         struct obd_ioobj *o;
696         struct niobuf_local *lnb;
697         int i, j, drop = 0;
698         ENTRY;
699
700         if (res->dentry != NULL)
701                 drop = (res->dentry->d_inode->i_size >
702                         exp->exp_obd->u.filter.fo_readcache_max_filesize);
703
704         for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
705                 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
706                         if (lnb->page == NULL)
707                                 continue;
708                         /* drop from cache like truncate_list_pages() */
709                         if (drop && !TryLockPage(lnb->page)) {
710                                 if (lnb->page->mapping)
711                                         ll_truncate_complete_page(lnb->page);
712                                 unlock_page(lnb->page);
713                         }
714                         page_cache_release(lnb->page);
715                 }
716         }
717
718         if (res->dentry != NULL)
719                 f_dput(res->dentry);
720         RETURN(rc);
721 }
722
723 void flip_into_page_cache(struct inode *inode, struct page *new_page)
724 {
725         struct page *old_page;
726         int rc;
727
728         do {
729                 /* the dlm is protecting us from read/write concurrency, so we
730                  * expect this find_lock_page to return quickly.  even if we
731                  * race with another writer it won't be doing much work with
732                  * the page locked.  we do this 'cause t_c_p expects a
733                  * locked page, and it wants to grab the pagecache lock
734                  * as well. */
735                 old_page = find_lock_page(inode->i_mapping, new_page->index);
736                 if (old_page) {
737                         ll_truncate_complete_page(old_page);
738                         unlock_page(old_page);
739                         page_cache_release(old_page);
740                 }
741
742 #if 0 /* this should be a /proc tunable someday */
743                 /* racing o_directs (no locking ioctl) could race adding
744                  * their pages, so we repeat the page invalidation unless
745                  * we successfully added our new page */
746                 rc = add_to_page_cache_unique(new_page, inode->i_mapping,
747                                               new_page->index,
748                                               page_hash(inode->i_mapping,
749                                                         new_page->index));
750                 if (rc == 0) {
751                         /* add_to_page_cache clears uptodate|dirty and locks
752                          * the page */
753                         SetPageUptodate(new_page);
754                         unlock_page(new_page);
755                 }
756 #else
757                 rc = 0;
758 #endif
759         } while (rc != 0);
760 }
761
762 void filter_grant_commit(struct obd_export *exp, int niocount,
763                          struct niobuf_local *res)
764 {
765         struct filter_obd *filter = &exp->exp_obd->u.filter;
766         struct niobuf_local *lnb = res;
767         unsigned long pending = 0;
768         int i;
769
770         spin_lock(&exp->exp_obd->obd_osfs_lock);
771         for (i = 0, lnb = res; i < niocount; i++, lnb++)
772                 pending += lnb->lnb_grant_used;
773
774         LASSERTF(exp->exp_filter_data.fed_pending >= pending,
775                  "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
776                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
777                  exp->exp_filter_data.fed_pending, pending);
778         exp->exp_filter_data.fed_pending -= pending;
779         LASSERTF(filter->fo_tot_granted >= pending,
780                  "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
781                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
782                  exp->exp_obd->u.filter.fo_tot_granted, pending);
783         filter->fo_tot_granted -= pending;
784         LASSERTF(filter->fo_tot_pending >= pending,
785                  "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
786                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
787                  filter->fo_tot_pending, pending);
788         filter->fo_tot_pending -= pending;
789
790         spin_unlock(&exp->exp_obd->obd_osfs_lock);
791 }
792
793 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
794                     int objcount, struct obd_ioobj *obj, int niocount,
795                     struct niobuf_local *res, struct obd_trans_info *oti,int rc)
796 {
797         if (cmd == OBD_BRW_WRITE)
798                 return filter_commitrw_write(exp, oa, objcount, obj, niocount,
799                                              res, oti, rc);
800         if (cmd == OBD_BRW_READ)
801                 return filter_commitrw_read(exp, oa, objcount, obj, niocount,
802                                             res, oti, rc);
803         LBUG();
804         return -EPROTO;
805 }
806
807 int filter_brw(int cmd, struct obd_export *exp, struct obdo *oa,
808                struct lov_stripe_md *lsm, obd_count oa_bufs,
809                struct brw_page *pga, struct obd_trans_info *oti)
810 {
811         struct obd_ioobj ioo;
812         struct niobuf_local *lnb;
813         struct niobuf_remote *rnb;
814         obd_count i;
815         int ret = 0;
816         ENTRY;
817
818         OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
819         OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
820
821         if (lnb == NULL || rnb == NULL)
822                 GOTO(out, ret = -ENOMEM);
823
824         for (i = 0; i < oa_bufs; i++) {
825                 rnb[i].offset = pga[i].off;
826                 rnb[i].len = pga[i].count;
827         }
828
829         obdo_to_ioobj(oa, &ioo);
830         ioo.ioo_bufcnt = oa_bufs;
831
832         ret = filter_preprw(cmd, exp, oa, 1, &ioo, oa_bufs, rnb, lnb, oti);
833         if (ret != 0)
834                 GOTO(out, ret);
835
836         for (i = 0; i < oa_bufs; i++) {
837                 void *virt = kmap(pga[i].pg);
838                 obd_off off = pga[i].off & ~PAGE_MASK;
839                 void *addr = kmap(lnb[i].page);
840
841                 /* 2 kmaps == vanishingly small deadlock opportunity */
842
843                 if (cmd & OBD_BRW_WRITE)
844                         memcpy(addr + off, virt + off, pga[i].count);
845                 else
846                         memcpy(virt + off, addr + off, pga[i].count);
847
848                 kunmap(lnb[i].page);
849                 kunmap(pga[i].pg);
850         }
851
852         ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti, ret);
853
854 out:
855         if (lnb)
856                 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
857         if (rnb)
858                 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));
859         RETURN(ret);
860 }