Whamcloud - gitweb
Handle SF IOR better by working harder to avoid granting conflicting locks.
[fs/lustre-release.git] / lustre / obdfilter / filter_io.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  linux/fs/obdfilter/filter_io.c
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *   Author: Peter Braam <braam@clusterfs.com>
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *   Author: Phil Schwan <phil@clusterfs.com>
10  *
11  *   This file is part of Lustre, http://www.lustre.org.
12  *
13  *   Lustre is free software; you can redistribute it and/or
14  *   modify it under the terms of version 2 of the GNU General Public
15  *   License as published by the Free Software Foundation.
16  *
17  *   Lustre is distributed in the hope that it will be useful,
18  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *   GNU General Public License for more details.
21  *
22  *   You should have received a copy of the GNU General Public License
23  *   along with Lustre; if not, write to the Free Software
24  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25  */
26
27 #define DEBUG_SUBSYSTEM S_FILTER
28
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pagemap.h> // XXX kill me soon
32 #include <linux/version.h>
33
34 #include <linux/obd_class.h>
35 #include <linux/lustre_fsfilt.h>
36 #include "filter_internal.h"
37
38 static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
39 {
40         struct address_space *mapping = inode->i_mapping;
41         struct page *page;
42         unsigned long index = lnb->offset >> PAGE_SHIFT;
43         int rc;
44
45         page = grab_cache_page(mapping, index); /* locked page */
46         if (page == NULL)
47                 return lnb->rc = -ENOMEM;
48
49         LASSERT(page->mapping == mapping);
50
51         lnb->page = page;
52
53         if (inode->i_size < lnb->offset + lnb->len - 1)
54                 lnb->rc = inode->i_size - lnb->offset;
55         else
56                 lnb->rc = lnb->len;
57
58         if (PageUptodate(page)) {
59                 unlock_page(page);
60                 return 0;
61         }
62
63         rc = mapping->a_ops->readpage(NULL, page);
64         if (rc < 0) {
65                 CERROR("page index %lu, rc = %d\n", index, rc);
66                 lnb->page = NULL;
67                 page_cache_release(page);
68                 return lnb->rc = rc;
69         }
70
71         return 0;
72 }
73
74 static int filter_finish_page_read(struct niobuf_local *lnb)
75 {
76         if (lnb->page == NULL)
77                 return 0;
78
79         if (PageUptodate(lnb->page))
80                 return 0;
81
82         wait_on_page(lnb->page);
83         if (!PageUptodate(lnb->page)) {
84                 CERROR("page index %lu/offset "LPX64" not uptodate\n",
85                        lnb->page->index, lnb->offset);
86                 GOTO(err_page, lnb->rc = -EIO);
87         }
88         if (PageError(lnb->page)) {
89                 CERROR("page index %lu/offset "LPX64" has error\n",
90                        lnb->page->index, lnb->offset);
91                 GOTO(err_page, lnb->rc = -EIO);
92         }
93
94         return 0;
95
96 err_page:
97         page_cache_release(lnb->page);
98         lnb->page = NULL;
99         return lnb->rc;
100 }
101
102 /* Grab the dirty and seen grant announcements from the incoming obdo.
103  * We will later calculate the clients new grant and return it.
104  * Caller must hold osfs lock */
105 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
106 {
107         struct filter_export_data *fed;
108         struct obd_device *obd = exp->exp_obd;
109         ENTRY;
110
111         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
112
113         if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
114                                         (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
115                 oa->o_valid &= ~OBD_MD_FLGRANT;
116                 EXIT;
117                 return;
118         }
119
120         fed = &exp->exp_filter_data;
121
122         /* Add some margin, since there is a small race if other RPCs arrive
123          * out-or-order and have already consumed some grant.  We want to
124          * leave this here in case there is a large error in accounting. */
125         CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ?
126                D_WARNING : D_CACHE,
127                "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
128                obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
129                oa->o_dropped, fed->fed_grant);
130
131         /* Update our accounting now so that statfs takes it into account.
132          * Note that fed_dirty is only approximate and can become incorrect
133          * if RPCs arrive out-of-order.  No important calculations depend
134          * on fed_dirty however. */
135         obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
136         if (fed->fed_grant < oa->o_dropped) {
137                 CERROR("%s: cli %s/%p reports %u dropped > fed_grant %lu\n",
138                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
139                        oa->o_dropped, fed->fed_grant);
140                 oa->o_dropped = 0;
141         }
142         if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
143                 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
144                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
145                        oa->o_dropped, obd->u.filter.fo_tot_granted);
146                 oa->o_dropped = 0;
147         }
148         obd->u.filter.fo_tot_granted -= oa->o_dropped;
149         fed->fed_grant -= oa->o_dropped;
150         fed->fed_dirty = oa->o_dirty;
151         EXIT;
152 }
153
154 #define GRANT_FOR_LLOG(obd) 16
155
156 /* Figure out how much space is available between what we've granted
157  * and what remains in the filesystem.  Compensate for ext3 indirect
158  * block overhead when computing how much free space is left ungranted.
159  *
160  * Caller must hold obd_osfs_lock. */
161 obd_size filter_grant_space_left(struct obd_export *exp)
162 {
163         struct obd_device *obd = exp->exp_obd;
164         int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
165         obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
166         int rc, statfs_done = 0;
167
168         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
169
170         if (time_before(obd->obd_osfs_age, jiffies - HZ)) {
171 restat:
172                 rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, jiffies + 1);
173                 if (rc) /* N.B. statfs can't really fail */
174                         RETURN(0);
175                 statfs_done = 1;
176         }
177
178         avail = obd->obd_osfs.os_bavail;
179         left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
180         if (left > GRANT_FOR_LLOG(obd)) {
181                 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
182         } else {
183                 left = 0 /* << blockbits */;
184         }
185
186         if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
187                 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
188                 goto restat;
189         }
190
191         if (left >= tot_granted) {
192                 left -= tot_granted;
193         } else {
194                 static unsigned long next;
195                 if (left < tot_granted - obd->u.filter.fo_tot_pending &&
196                     time_after(jiffies, next)) {
197                         spin_unlock(&obd->obd_osfs_lock);
198                         CERROR("%s: cli %s/%p grant "LPU64" > available "
199                                LPU64" and pending "LPU64"\n", obd->obd_name,
200                                exp->exp_client_uuid.uuid, exp, tot_granted,
201                                left, obd->u.filter.fo_tot_pending);
202                         if (next == 0)
203                                 portals_debug_dumplog();
204                         next = jiffies + 20 * HZ;
205                         spin_lock(&obd->obd_osfs_lock);
206                 }
207                 left = 0;
208         }
209
210         CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
211                " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
212                exp->exp_client_uuid.uuid, exp,
213                obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
214                tot_granted, left, obd->u.filter.fo_tot_pending);
215
216         return left;
217 }
218
219 /* Calculate how much grant space to allocate to this client, based on how
220  * much space is currently free and how much of that is already granted.
221  *
222  * Caller must hold obd_osfs_lock. */
223 long filter_grant(struct obd_export *exp, obd_size current_grant,
224                   obd_size want, obd_size fs_space_left)
225 {
226         struct obd_device *obd = exp->exp_obd;
227         struct filter_export_data *fed = &exp->exp_filter_data;
228         int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
229         __u64 grant = 0;
230
231         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
232
233         /* Grant some fraction of the client's requested grant space so that
234          * they are not always waiting for write credits (not all of it to
235          * avoid overgranting in face of multiple RPCs in flight).  This
236          * essentially will be able to control the OSC_MAX_RIF for a client.
237          *
238          * If we do have a large disparity between what the client thinks it
239          * has and what we think it has, don't grant very much and let the
240          * client consume its grant first.  Either it just has lots of RPCs
241          * in flight, or it was evicted and its grants will soon be used up. */
242         if (current_grant < want) {
243                 if (current_grant > fed->fed_grant + FILTER_GRANT_CHUNK)
244                         want = 65536;
245                 grant = min((want >> blockbits) / 2,
246                             (fs_space_left >> blockbits) / 8);
247                 grant <<= blockbits;
248
249                 if (grant) {
250                         if (grant > FILTER_GRANT_CHUNK)
251                                 grant = FILTER_GRANT_CHUNK;
252
253                         obd->u.filter.fo_tot_granted += grant;
254                         fed->fed_grant += grant;
255                 }
256         }
257
258         CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
259                obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
260         CDEBUG(D_CACHE,
261                "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
262                " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
263                exp, obd->u.filter.fo_tot_dirty,
264                obd->u.filter.fo_tot_granted, obd->obd_num_exports);
265
266         return grant;
267 }
268
269 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
270                               int objcount, struct obd_ioobj *obj,
271                               int niocount, struct niobuf_remote *nb,
272                               struct niobuf_local *res,
273                               struct obd_trans_info *oti)
274 {
275         struct obd_device *obd = exp->exp_obd;
276         struct obd_run_ctxt saved;
277         struct obd_ioobj *o;
278         struct niobuf_remote *rnb;
279         struct niobuf_local *lnb = NULL;
280         struct fsfilt_objinfo *fso;
281         struct dentry *dentry;
282         struct inode *inode;
283         int rc = 0, i, j, tot_bytes = 0, cleanup_phase = 0;
284         unsigned long now = jiffies;
285         ENTRY;
286
287         /* We are currently not supporting multi-obj BRW_READ RPCS at all.
288          * When we do this function's dentry cleanup will need to be fixed */
289         LASSERT(objcount == 1);
290         LASSERT(obj->ioo_bufcnt > 0);
291
292         if (oa && oa->o_valid & OBD_MD_FLGRANT) {
293                 spin_lock(&obd->obd_osfs_lock);
294                 filter_grant_incoming(exp, oa);
295
296 #if 0
297                 /* Reads do not increase grants */
298                 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
299                                            filter_grant_space_left(exp));
300 #else
301                 oa->o_grant = 0;
302 #endif
303                 spin_unlock(&obd->obd_osfs_lock);
304         }
305
306         OBD_ALLOC(fso, objcount * sizeof(*fso));
307         if (fso == NULL)
308                 RETURN(-ENOMEM);
309
310         memset(res, 0, niocount * sizeof(*res));
311
312         push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
313         for (i = 0, o = obj; i < objcount; i++, o++) {
314                 LASSERT(o->ioo_bufcnt);
315
316                 dentry = filter_oa2dentry(obd, oa);
317                 if (IS_ERR(dentry))
318                         GOTO(cleanup, rc = PTR_ERR(dentry));
319
320                 if (dentry->d_inode == NULL) {
321                         CERROR("trying to BRW to non-existent file "LPU64"\n",
322                                o->ioo_id);
323                         f_dput(dentry);
324                         GOTO(cleanup, rc = -ENOENT);
325                 }
326
327                 fso[i].fso_dentry = dentry;
328                 fso[i].fso_bufcnt = o->ioo_bufcnt;
329         }
330
331         if (time_after(jiffies, now + 15 * HZ))
332                 CERROR("slow preprw_read setup %lus\n", (jiffies - now) / HZ);
333         else
334                 CDEBUG(D_INFO, "preprw_read setup: %lu jiffies\n",
335                        (jiffies - now));
336
337         for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
338                 dentry = fso[i].fso_dentry;
339                 inode = dentry->d_inode;
340
341                 for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
342                         lnb->dentry = dentry;
343                         lnb->offset = rnb->offset;
344                         lnb->len    = rnb->len;
345                         lnb->flags  = rnb->flags;
346
347                         if (inode->i_size <= rnb->offset) {
348                                 /* If there's no more data, abort early.
349                                  * lnb->page == NULL and lnb->rc == 0, so it's
350                                  * easy to detect later. */
351                                 break;
352                         } else {
353                                 rc = filter_start_page_read(inode, lnb);
354                         }
355
356                         if (rc) {
357                                 CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
358                                        "page err %u@"LPU64" %u/%u %p: rc %d\n",
359                                        lnb->len, lnb->offset, j, o->ioo_bufcnt,
360                                        dentry, rc);
361                                 cleanup_phase = 1;
362                                 GOTO(cleanup, rc);
363                         }
364
365                         tot_bytes += lnb->rc;
366                         if (lnb->rc < lnb->len) {
367                                 /* short read, be sure to wait on it */
368                                 lnb++;
369                                 break;
370                         }
371                 }
372         }
373
374         if (time_after(jiffies, now + 15 * HZ))
375                 CERROR("slow start_page_read %lus\n", (jiffies - now) / HZ);
376         else
377                 CDEBUG(D_INFO, "start_page_read: %lu jiffies\n",
378                        (jiffies - now));
379
380         lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
381         while (lnb-- > res) {
382                 rc = filter_finish_page_read(lnb);
383                 if (rc) {
384                         CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
385                                lnb->offset, (int)(lnb - res), lnb->dentry, rc);
386                         cleanup_phase = 1;
387                         GOTO(cleanup, rc);
388                 }
389         }
390
391         if (time_after(jiffies, now + 15 * HZ))
392                 CERROR("slow finish_page_read %lus\n", (jiffies - now) / HZ);
393         else
394                 CDEBUG(D_INFO, "finish_page_read: %lu jiffies\n",
395                        (jiffies - now));
396
397         filter_tally_read(&exp->exp_obd->u.filter, res, niocount);
398
399         EXIT;
400
401  cleanup:
402         switch (cleanup_phase) {
403         case 1:
404                 for (lnb = res; lnb < (res + niocount); lnb++) {
405                         if (lnb->page)
406                                 page_cache_release(lnb->page);
407                 }
408                 if (res->dentry != NULL)
409                         f_dput(res->dentry);
410                 else
411                         CERROR("NULL dentry in cleanup -- tell CFS\n");
412         case 0:
413                 OBD_FREE(fso, objcount * sizeof(*fso));
414                 pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
415         }
416         return rc;
417 }
418
419 /* When clients have dirtied as much space as they've been granted they
420  * fall through to sync writes.  These sync writes haven't been expressed
421  * in grants and need to error with ENOSPC when there isn't room in the
422  * filesystem for them after grants are taken into account.  However,
423  * writeback of the dirty data that was already granted space can write
424  * right on through.
425  *
426  * Caller must hold obd_osfs_lock. */
427 static int filter_grant_check(struct obd_export *exp, int objcount,
428                               struct fsfilt_objinfo *fso, int niocount,
429                               struct niobuf_remote *rnb,
430                               struct niobuf_local *lnb, obd_size *left,
431                               struct inode *inode)
432 {
433         struct filter_export_data *fed = &exp->exp_filter_data;
434         int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
435         unsigned long used = 0, ungranted = 0, using;
436         int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
437
438         LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
439
440         for (obj = 0; obj < objcount; obj++) {
441                 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
442                         int tmp, bytes;
443
444                         /* FIXME: this is calculated with PAGE_SIZE on client */
445                         bytes = rnb[n].len;
446                         bytes += rnb[n].offset & (blocksize - 1);
447                         tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
448                         if (tmp)
449                                 bytes += blocksize - tmp;
450
451                         if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
452                                 if (fed->fed_grant < used + bytes) {
453                                         CDEBUG(D_CACHE,
454                                                "%s: cli %s/%p claims %ld+%d "
455                                                "GRANT, real grant %lu idx %d\n",
456                                                exp->exp_obd->obd_name,
457                                                exp->exp_client_uuid.uuid, exp,
458                                                used, bytes, fed->fed_grant, n);
459                                         mask = D_ERROR;
460                                 } else {
461                                         used += bytes;
462                                         rnb[n].flags |= OBD_BRW_GRANTED;
463                                         lnb[n].lnb_grant_used = bytes;
464                                         CDEBUG(0, "idx %d used=%lu\n", n, used);
465                                         rc = 0;
466                                         continue;
467                                 }
468                         }
469                         if (*left > ungranted) {
470                                 /* if enough space, pretend it was granted */
471                                 ungranted += bytes;
472                                 rnb[n].flags |= OBD_BRW_GRANTED;
473                                 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
474                                 rc = 0;
475                                 continue;
476                         }
477
478                         /* We can't check for already-mapped blocks here, as
479                          * it requires dropping the osfs lock to do the bmap.
480                          * Instead, we return ENOSPC and in that case we need
481                          * to go through and verify if all of the blocks not
482                          * marked BRW_GRANTED are already mapped and we can
483                          * ignore this error. */
484                         lnb[n].rc = -ENOSPC;
485                         rnb[n].flags &= OBD_BRW_GRANTED;
486                         CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
487                                exp->exp_obd->obd_name,
488                                exp->exp_client_uuid.uuid, exp, n, bytes);
489                 }
490         }
491
492         /* Now substract what client have used already.  We don't subtract
493          * this from the tot_granted yet, so that other client's can't grab
494          * that space before we have actually allocated our blocks.  That
495          * happens in filter_grant_commit() after the writes are done. */
496         *left -= ungranted;
497         fed->fed_grant -= used;
498         fed->fed_pending += used;
499         exp->exp_obd->u.filter.fo_tot_pending += used;
500
501         CDEBUG(mask,
502                "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
503                exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
504                ungranted, fed->fed_grant, fed->fed_dirty);
505
506         /* Rough calc in case we don't refresh cached statfs data */
507         using = (used + ungranted + 1 ) >>
508                 exp->exp_obd->u.filter.fo_sb->s_blocksize_bits;
509         if (exp->exp_obd->obd_osfs.os_bavail > using)
510                 exp->exp_obd->obd_osfs.os_bavail -= using;
511         else
512                 exp->exp_obd->obd_osfs.os_bavail = 0;
513
514         if (fed->fed_dirty < used) {
515                 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
516                        exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
517                        used, fed->fed_dirty);
518                 used = fed->fed_dirty;
519         }
520         exp->exp_obd->u.filter.fo_tot_dirty -= used;
521         fed->fed_dirty -= used;
522
523         return rc;
524 }
525
526 static int filter_start_page_write(struct inode *inode,
527                                    struct niobuf_local *lnb)
528 {
529         struct page *page = alloc_pages(GFP_HIGHUSER, 0);
530         if (page == NULL) {
531                 CERROR("no memory for a temp page\n");
532                 RETURN(lnb->rc = -ENOMEM);
533         }
534         POISON_PAGE(page, 0xf1);
535         if (lnb->len != PAGE_SIZE) {
536                 memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
537                 kunmap(page);
538         }
539         page->index = lnb->offset >> PAGE_SHIFT;
540         lnb->page = page;
541
542         return 0;
543 }
544
545 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
546  * on mulitple inodes.  That isn't all, because there still exists the
547  * possibility of a truncate starting a new transaction while holding the ext3
548  * rwsem = write while some writes (which have started their transactions here)
549  * blocking on the ext3 rwsem = read => lock inversion.
550  *
551  * The handling gets very ugly when dealing with locked pages.  It may be easier
552  * to just get rid of the locked page code (which has problems of its own) and
553  * either discover we do not need it anymore (i.e. it was a symptom of another
554  * bug) or ensure we get the page locks in an appropriate order. */
555 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
556                                int objcount, struct obd_ioobj *obj,
557                                int niocount, struct niobuf_remote *nb,
558                                struct niobuf_local *res,
559                                struct obd_trans_info *oti)
560 {
561         struct obd_run_ctxt saved;
562         struct niobuf_remote *rnb;
563         struct niobuf_local *lnb;
564         struct fsfilt_objinfo fso;
565         struct dentry *dentry;
566         obd_size left;
567         unsigned long now = jiffies;
568         int rc = 0, i, tot_bytes = 0, cleanup_phase = 1;
569         ENTRY;
570         LASSERT(objcount == 1);
571         LASSERT(obj->ioo_bufcnt > 0);
572
573         memset(res, 0, niocount * sizeof(*res));
574
575         push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
576         dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
577                                    obj->ioo_id);
578         if (IS_ERR(dentry))
579                 GOTO(cleanup, rc = PTR_ERR(dentry));
580
581         if (dentry->d_inode == NULL) {
582                 CERROR("trying to BRW to non-existent file "LPU64"\n",
583                        obj->ioo_id);
584                 f_dput(dentry);
585                 GOTO(cleanup, rc = -ENOENT);
586         }
587
588         fso.fso_dentry = dentry;
589         fso.fso_bufcnt = obj->ioo_bufcnt;
590
591         if (time_after(jiffies, now + 15 * HZ))
592                 CERROR("slow preprw_write setup %lus\n", (jiffies - now) / HZ);
593         else
594                 CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
595                        (jiffies - now));
596
597         spin_lock(&exp->exp_obd->obd_osfs_lock);
598         if (oa)
599                 filter_grant_incoming(exp, oa);
600         cleanup_phase = 0;
601
602         left = filter_grant_space_left(exp);
603
604         rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
605                                 &left, dentry->d_inode);
606         if (oa && oa->o_valid & OBD_MD_FLGRANT)
607                 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
608
609         spin_unlock(&exp->exp_obd->obd_osfs_lock);
610
611         if (rc) {
612                 f_dput(dentry);
613                 GOTO(cleanup, rc);
614         }
615
616         for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
617              i++, lnb++, rnb++) {
618                 /* We still set up for ungranted pages so that granted pages
619                  * can be written to disk as they were promised, and portals
620                  * needs to keep the pages all aligned properly. */ 
621                 lnb->dentry = dentry;
622                 lnb->offset = rnb->offset;
623                 lnb->len    = rnb->len;
624                 lnb->flags  = rnb->flags;
625
626                 rc = filter_start_page_write(dentry->d_inode, lnb);
627                 if (rc) {
628                         CERROR("page err %u@"LPU64" %u/%u %p: rc %d\n",
629                                lnb->len, lnb->offset,
630                                i, obj->ioo_bufcnt, dentry, rc);
631                         while (lnb-- > res)
632                                 __free_pages(lnb->page, 0);
633                         f_dput(dentry);
634                         GOTO(cleanup, rc);
635                 }
636                 if (lnb->rc == 0)
637                         tot_bytes += lnb->len;
638         }
639
640         if (time_after(jiffies, now + 15 * HZ))
641                 CERROR("slow start_page_write %lus\n", (jiffies - now) / HZ);
642         else
643                 CDEBUG(D_INFO, "start_page_write: %lu jiffies\n",
644                        (jiffies - now));
645
646         lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
647                             tot_bytes);
648         EXIT;
649 cleanup:
650         switch(cleanup_phase) {
651         case 1:
652                 spin_lock(&exp->exp_obd->obd_osfs_lock);
653                 if (oa)
654                         filter_grant_incoming(exp, oa);
655                 spin_unlock(&exp->exp_obd->obd_osfs_lock);
656         default: ;
657         }
658         pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
659         return rc;
660 }
661
662 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
663                   int objcount, struct obd_ioobj *obj, int niocount,
664                   struct niobuf_remote *nb, struct niobuf_local *res,
665                   struct obd_trans_info *oti)
666 {
667         if (cmd == OBD_BRW_WRITE)
668                 return filter_preprw_write(cmd, exp, oa, objcount, obj,
669                                            niocount, nb, res, oti);
670
671         if (cmd == OBD_BRW_READ)
672                 return filter_preprw_read(cmd, exp, oa, objcount, obj,
673                                           niocount, nb, res, oti);
674
675         LBUG();
676         return -EPROTO;
677 }
678
679 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
680                                 int objcount, struct obd_ioobj *obj,
681                                 int niocount, struct niobuf_local *res,
682                                 struct obd_trans_info *oti, int rc)
683 {
684         struct obd_ioobj *o;
685         struct niobuf_local *lnb;
686         int i, j, drop = 0;
687         ENTRY;
688
689         if (res->dentry != NULL)
690                 drop = (res->dentry->d_inode->i_size >
691                         exp->exp_obd->u.filter.fo_readcache_max_filesize);
692
693         for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
694                 for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
695                         if (lnb->page == NULL)
696                                 continue;
697                         /* drop from cache like truncate_list_pages() */
698                         if (drop && !TryLockPage(lnb->page)) {
699                                 if (lnb->page->mapping)
700                                         ll_truncate_complete_page(lnb->page);
701                                 unlock_page(lnb->page);
702                         }
703                         page_cache_release(lnb->page);
704                 }
705         }
706
707         if (res->dentry != NULL)
708                 f_dput(res->dentry);
709         RETURN(rc);
710 }
711
712 void flip_into_page_cache(struct inode *inode, struct page *new_page)
713 {
714         struct page *old_page;
715         int rc;
716
717         do {
718                 /* the dlm is protecting us from read/write concurrency, so we
719                  * expect this find_lock_page to return quickly.  even if we
720                  * race with another writer it won't be doing much work with
721                  * the page locked.  we do this 'cause t_c_p expects a
722                  * locked page, and it wants to grab the pagecache lock
723                  * as well. */
724                 old_page = find_lock_page(inode->i_mapping, new_page->index);
725                 if (old_page) {
726                         ll_truncate_complete_page(old_page);
727                         unlock_page(old_page);
728                         page_cache_release(old_page);
729                 }
730
731 #if 0 /* this should be a /proc tunable someday */
732                 /* racing o_directs (no locking ioctl) could race adding
733                  * their pages, so we repeat the page invalidation unless
734                  * we successfully added our new page */
735                 rc = add_to_page_cache_unique(new_page, inode->i_mapping,
736                                               new_page->index,
737                                               page_hash(inode->i_mapping,
738                                                         new_page->index));
739                 if (rc == 0) {
740                         /* add_to_page_cache clears uptodate|dirty and locks
741                          * the page */
742                         SetPageUptodate(new_page);
743                         unlock_page(new_page);
744                 }
745 #else
746                 rc = 0;
747 #endif
748         } while (rc != 0);
749 }
750
751 void filter_grant_commit(struct obd_export *exp, int niocount,
752                          struct niobuf_local *res)
753 {
754         struct filter_obd *filter = &exp->exp_obd->u.filter;
755         struct niobuf_local *lnb = res;
756         unsigned long pending = 0;
757         int i;
758
759         spin_lock(&exp->exp_obd->obd_osfs_lock);
760         for (i = 0, lnb = res; i < niocount; i++, lnb++)
761                 pending += lnb->lnb_grant_used;
762
763         LASSERTF(exp->exp_filter_data.fed_pending >= pending,
764                  "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
765                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
766                  exp->exp_filter_data.fed_pending, pending);
767         exp->exp_filter_data.fed_pending -= pending;
768         LASSERTF(filter->fo_tot_granted >= pending,
769                  "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
770                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
771                  exp->exp_obd->u.filter.fo_tot_granted, pending);
772         filter->fo_tot_granted -= pending;
773         LASSERTF(filter->fo_tot_pending >= pending,
774                  "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
775                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
776                  filter->fo_tot_pending, pending);
777         filter->fo_tot_pending -= pending;
778
779         spin_unlock(&exp->exp_obd->obd_osfs_lock);
780 }
781
782 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
783                     int objcount, struct obd_ioobj *obj, int niocount,
784                     struct niobuf_local *res, struct obd_trans_info *oti,int rc)
785 {
786         if (cmd == OBD_BRW_WRITE)
787                 return filter_commitrw_write(exp, oa, objcount, obj, niocount,
788                                              res, oti, rc);
789         if (cmd == OBD_BRW_READ)
790                 return filter_commitrw_read(exp, oa, objcount, obj, niocount,
791                                             res, oti, rc);
792         LBUG();
793         return -EPROTO;
794 }
795
796 int filter_brw(int cmd, struct obd_export *exp, struct obdo *oa,
797                struct lov_stripe_md *lsm, obd_count oa_bufs,
798                struct brw_page *pga, struct obd_trans_info *oti)
799 {
800         struct obd_ioobj ioo;
801         struct niobuf_local *lnb;
802         struct niobuf_remote *rnb;
803         obd_count i;
804         int ret = 0;
805         ENTRY;
806
807         OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
808         OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
809
810         if (lnb == NULL || rnb == NULL)
811                 GOTO(out, ret = -ENOMEM);
812
813         for (i = 0; i < oa_bufs; i++) {
814                 rnb[i].offset = pga[i].off;
815                 rnb[i].len = pga[i].count;
816         }
817
818         obdo_to_ioobj(oa, &ioo);
819         ioo.ioo_bufcnt = oa_bufs;
820
821         ret = filter_preprw(cmd, exp, oa, 1, &ioo, oa_bufs, rnb, lnb, oti);
822         if (ret != 0)
823                 GOTO(out, ret);
824
825         for (i = 0; i < oa_bufs; i++) {
826                 void *virt = kmap(pga[i].pg);
827                 obd_off off = pga[i].off & ~PAGE_MASK;
828                 void *addr = kmap(lnb[i].page);
829
830                 /* 2 kmaps == vanishingly small deadlock opportunity */
831
832                 if (cmd & OBD_BRW_WRITE)
833                         memcpy(addr + off, virt + off, pga[i].count);
834                 else
835                         memcpy(virt + off, addr + off, pga[i].count);
836
837                 kunmap(lnb[i].page);
838                 kunmap(pga[i].pg);
839         }
840
841         ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti, ret);
842
843 out:
844         if (lnb)
845                 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
846         if (rnb)
847                 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));
848         RETURN(ret);
849 }