Whamcloud - gitweb
- fixes in split about using correct byte order;
[fs/lustre-release.git] / lustre / obdfilter / filter_io.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  linux/fs/obdfilter/filter_io.c
5  *
6  *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
7  *   Author: Peter Braam <braam@clusterfs.com>
8  *   Author: Andreas Dilger <adilger@clusterfs.com>
9  *   Author: Phil Schwan <phil@clusterfs.com>
10  *
11  *   This file is part of the Lustre file system, http://www.lustre.org
12  *   Lustre is a trademark of Cluster File Systems, Inc.
13  *
14  *   You may have signed or agreed to another license before downloading
15  *   this software.  If so, you are bound by the terms and conditions
16  *   of that agreement, and the following does not apply to you.  See the
17  *   LICENSE file included with this distribution for more information.
18  *
19  *   If you did not agree to a different license, then this copy of Lustre
20  *   is open source software; you can redistribute it and/or modify it
21  *   under the terms of version 2 of the GNU General Public License as
22  *   published by the Free Software Foundation.
23  *
24  *   In either case, Lustre is distributed in the hope that it will be
25  *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty
26  *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  *   license text for more details.
28  */
29
30 #define DEBUG_SUBSYSTEM S_FILTER
31
32 #include <linux/config.h>
33 #include <linux/module.h>
34 #include <linux/pagemap.h> // XXX kill me soon
35 #include <linux/version.h>
36
37 #include <obd_class.h>
38 #include <lustre_fsfilt.h>
39 #include "filter_internal.h"
40
41 int *obdfilter_created_scratchpad;
42
43 static int filter_alloc_dio_page(struct obd_device *obd, struct inode *inode,
44                                  struct niobuf_local *lnb)
45 {
46         struct page *page;
47
48         LASSERT(lnb->page != NULL);
49
50         page = lnb->page;
51 #if 0
52         POISON_PAGE(page, 0xf1);
53         if (lnb->len != PAGE_SIZE) {
54                 memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
55                 kunmap(page);
56         }
57 #endif
58         page->index = lnb->offset >> PAGE_SHIFT;
59
60         RETURN(0);
61 }
62
63 static void filter_free_dio_pages(int objcount, struct obd_ioobj *obj,
64                            int niocount, struct niobuf_local *res)
65 {
66         int i, j;
67
68         for (i = 0; i < objcount; i++, obj++) {
69                 for (j = 0 ; j < obj->ioo_bufcnt ; j++, res++)
70                                 res->page = NULL;
71         }
72 }
73
74 /* Grab the dirty and seen grant announcements from the incoming obdo.
75  * We will later calculate the clients new grant and return it.
76  * Caller must hold osfs lock */
77 static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
78 {
79         struct filter_export_data *fed;
80         struct obd_device *obd = exp->exp_obd;
81         static unsigned long last_msg;
82         static int last_count;
83         int mask = D_CACHE;
84         ENTRY;
85
86         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
87
88         if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
89                                         (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
90                 oa->o_valid &= ~OBD_MD_FLGRANT;
91                 EXIT;
92                 return;
93         }
94
95         fed = &exp->exp_filter_data;
96
97         /* Don't print this to the console the first time it happens, since
98          * it can happen legitimately on occasion, but only rarely. */
99         if (time_after(jiffies, last_msg + 60 * HZ)) {
100                 last_count = 0;
101                 last_msg = jiffies;
102         }
103         if ((last_count & (-last_count)) == last_count)
104                 mask = D_HA /* until bug 3273 is fixed D_WARNING */;
105         last_count++;
106
107         /* Add some margin, since there is a small race if other RPCs arrive
108          * out-or-order and have already consumed some grant.  We want to
109          * leave this here in case there is a large error in accounting. */
110         CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ? mask:D_CACHE,
111                "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
112                obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
113                oa->o_dropped, fed->fed_grant);
114
115         /* Update our accounting now so that statfs takes it into account.
116          * Note that fed_dirty is only approximate and can become incorrect
117          * if RPCs arrive out-of-order.  No important calculations depend
118          * on fed_dirty however, but we must check sanity to not assert. */
119         if ((long long)oa->o_dirty < 0)
120                 oa->o_dirty = 0;
121         else if (oa->o_dirty > fed->fed_grant + 4 * FILTER_GRANT_CHUNK)
122                 oa->o_dirty = fed->fed_grant + 4 * FILTER_GRANT_CHUNK;
123         obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
124         if (fed->fed_grant < oa->o_dropped) {
125                 CDEBUG(D_HA,"%s: cli %s/%p reports %u dropped > fedgrant %lu\n",
126                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
127                        oa->o_dropped, fed->fed_grant);
128                 oa->o_dropped = 0;
129         }
130         if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
131                 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
132                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
133                        oa->o_dropped, obd->u.filter.fo_tot_granted);
134                 oa->o_dropped = 0;
135         }
136         obd->u.filter.fo_tot_granted -= oa->o_dropped;
137         fed->fed_grant -= oa->o_dropped;
138         fed->fed_dirty = oa->o_dirty;
139         if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
140                 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
141                        obd->obd_name, exp->exp_client_uuid.uuid, exp,
142                        fed->fed_dirty, fed->fed_pending, fed->fed_grant);
143                 spin_unlock(&obd->obd_osfs_lock);
144                 LBUG();
145         }
146         EXIT;
147 }
148
149 /* Figure out how much space is available between what we've granted
150  * and what remains in the filesystem.  Compensate for ext3 indirect
151  * block overhead when computing how much free space is left ungranted.
152  *
153  * Caller must hold obd_osfs_lock. */
154 obd_size filter_grant_space_left(struct obd_export *exp)
155 {
156         struct obd_device *obd = exp->exp_obd;
157         int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
158         obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
159         int rc, statfs_done = 0;
160
161         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
162
163         if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
164 restat:
165                 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
166                                    cfs_time_current_64() + HZ);
167                 if (rc) /* N.B. statfs can't really fail */
168                         RETURN(0);
169                 statfs_done = 1;
170         }
171
172         avail = obd->obd_osfs.os_bavail;
173         left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
174         if (left > GRANT_FOR_LLOG(obd)) {
175                 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
176         } else {
177                 left = 0 /* << blockbits */;
178         }
179
180         if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
181                 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
182                 goto restat;
183         }
184
185         if (left >= tot_granted) {
186                 left -= tot_granted;
187         } else {
188                 if (left < tot_granted - obd->u.filter.fo_tot_pending) {
189                         CERROR("%s: cli %s/%p grant "LPU64" > available "
190                                LPU64" and pending "LPU64"\n", obd->obd_name,
191                                exp->exp_client_uuid.uuid, exp, tot_granted,
192                                left, obd->u.filter.fo_tot_pending);
193                 }
194                 left = 0;
195         }
196
197         CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
198                " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
199                exp->exp_client_uuid.uuid, exp,
200                obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
201                tot_granted, left, obd->u.filter.fo_tot_pending);
202
203         return left;
204 }
205
206 /* Calculate how much grant space to allocate to this client, based on how
207  * much space is currently free and how much of that is already granted.
208  *
209  * Caller must hold obd_osfs_lock. */
210 long filter_grant(struct obd_export *exp, obd_size current_grant,
211                   obd_size want, obd_size fs_space_left)
212 {
213         struct obd_device *obd = exp->exp_obd;
214         struct filter_export_data *fed = &exp->exp_filter_data;
215         int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
216         __u64 grant = 0;
217
218         LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
219
220         /* Grant some fraction of the client's requested grant space so that
221          * they are not always waiting for write credits (not all of it to
222          * avoid overgranting in face of multiple RPCs in flight).  This
223          * essentially will be able to control the OSC_MAX_RIF for a client.
224          *
225          * If we do have a large disparity between what the client thinks it
226          * has and what we think it has, don't grant very much and let the
227          * client consume its grant first.  Either it just has lots of RPCs
228          * in flight, or it was evicted and its grants will soon be used up. */
229         if (want > 0x7fffffff) {
230                 CERROR("%s: client %s/%p requesting > 2GB grant "LPU64"\n",
231                        obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
232         } else if (current_grant < want &&
233                    current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
234                 grant = min((want >> blockbits),
235                             (fs_space_left >> blockbits) / 8);
236                 grant <<= blockbits;
237
238                 if (grant) {
239                         /* Allow >FILTER_GRANT_CHUNK size when clients
240                          * reconnect due to a server reboot.
241                          */
242                         if ((grant > FILTER_GRANT_CHUNK) &&
243                             (!obd->obd_recovering))
244                                 grant = FILTER_GRANT_CHUNK;
245
246                         obd->u.filter.fo_tot_granted += grant;
247                         fed->fed_grant += grant;
248                         if (fed->fed_grant < 0) {
249                                 CERROR("%s: cli %s/%p grant %ld want "LPU64
250                                        "current"LPU64"\n",
251                                        obd->obd_name, exp->exp_client_uuid.uuid,
252                                        exp, fed->fed_grant, want,current_grant);
253                                 spin_unlock(&obd->obd_osfs_lock);
254                                 LBUG();
255                         }
256                 }
257         }
258
259         CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
260                obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
261         CDEBUG(D_CACHE,
262                "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
263                " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
264                exp, obd->u.filter.fo_tot_dirty,
265                obd->u.filter.fo_tot_granted, obd->obd_num_exports);
266
267         return grant;
268 }
269
270 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
271                               int objcount, struct obd_ioobj *obj,
272                               int niocount, struct niobuf_remote *nb,
273                               struct niobuf_local *res,
274                               struct obd_trans_info *oti,
275                               struct lustre_capa *capa)
276 {
277         struct obd_device *obd = exp->exp_obd;
278         struct lvfs_run_ctxt saved;
279         struct niobuf_remote *rnb;
280         struct niobuf_local *lnb;
281         struct dentry *dentry = NULL;
282         struct inode *inode;
283         void *iobuf = NULL;
284         int rc = 0, i, tot_bytes = 0;
285         unsigned long now = jiffies;
286         ENTRY;
287
288         /* We are currently not supporting multi-obj BRW_READ RPCS at all.
289          * When we do this function's dentry cleanup will need to be fixed.
290          * These values are verified in ost_brw_write() from the wire. */
291         LASSERTF(objcount == 1, "%d\n", objcount);
292         LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
293
294         rc = filter_verify_capa(exp, NULL, obdo_mdsno(oa), capa,
295                                 CAPA_OPC_OSS_READ);
296         if (rc)
297                 RETURN(rc);
298
299         if (oa && oa->o_valid & OBD_MD_FLGRANT) {
300                 spin_lock(&obd->obd_osfs_lock);
301                 filter_grant_incoming(exp, oa);
302
303                 oa->o_grant = 0;
304                 spin_unlock(&obd->obd_osfs_lock);
305         }
306
307         iobuf = filter_iobuf_get(&obd->u.filter, oti);
308         if (IS_ERR(iobuf))
309                 RETURN(PTR_ERR(iobuf));
310
311         push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
312         dentry = filter_oa2dentry(obd, oa);
313         if (IS_ERR(dentry)) {
314                 rc = PTR_ERR(dentry);
315                 dentry = NULL;
316                 GOTO(cleanup, rc);
317         }
318
319         inode = dentry->d_inode;
320
321         obdo_to_inode(inode, oa, OBD_MD_FLATIME);
322         fsfilt_check_slow(obd, now, obd_timeout, "preprw_read setup");
323
324         for (i = 0, lnb = res, rnb = nb; i < obj->ioo_bufcnt;
325              i++, rnb++, lnb++) {
326                 lnb->dentry = dentry;
327                 lnb->offset = rnb->offset;
328                 lnb->len    = rnb->len;
329                 lnb->flags  = rnb->flags;
330
331                 /*
332                  * ost_brw_write()->ost_nio_pages_get() already initialized
333                  * lnb->page to point to the page from the per-thread page
334                  * pool (bug 5137), initialize page.
335                  */
336                 LASSERT(lnb->page != NULL);
337
338                 if (inode->i_size <= rnb->offset)
339                         /* If there's no more data, abort early.  lnb->rc == 0,
340                          * so it's easy to detect later. */
341                         break;
342                 else
343                         filter_alloc_dio_page(obd, inode, lnb);
344
345                 if (inode->i_size < lnb->offset + lnb->len - 1)
346                         lnb->rc = inode->i_size - lnb->offset;
347                 else
348                         lnb->rc = lnb->len;
349
350                 tot_bytes += lnb->rc;
351
352                 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
353         }
354
355         fsfilt_check_slow(obd, now, obd_timeout, "start_page_read");
356
357         rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf,
358                               exp, NULL, NULL, NULL);
359         if (rc)
360                 GOTO(cleanup, rc);
361
362         lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
363
364         filter_tally_read(&exp->exp_obd->u.filter, res, niocount);
365
366         EXIT;
367
368  cleanup:
369         if (rc != 0) {
370                 filter_free_dio_pages(objcount, obj, niocount, res);
371
372                 if (dentry != NULL)
373                         f_dput(dentry);
374         }
375
376         filter_iobuf_put(&obd->u.filter, iobuf, oti);
377
378         pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
379         if (rc)
380                 CERROR("io error %d\n", rc);
381
382         return rc;
383 }
384
385 /* When clients have dirtied as much space as they've been granted they
386  * fall through to sync writes.  These sync writes haven't been expressed
387  * in grants and need to error with ENOSPC when there isn't room in the
388  * filesystem for them after grants are taken into account.  However,
389  * writeback of the dirty data that was already granted space can write
390  * right on through.
391  *
392  * Caller must hold obd_osfs_lock. */
393 static int filter_grant_check(struct obd_export *exp, int objcount,
394                               struct fsfilt_objinfo *fso, int niocount,
395                               struct niobuf_remote *rnb,
396                               struct niobuf_local *lnb, obd_size *left,
397                               struct inode *inode)
398 {
399         struct filter_export_data *fed = &exp->exp_filter_data;
400         int blocksize = exp->exp_obd->u.obt.obt_sb->s_blocksize;
401         unsigned long used = 0, ungranted = 0, using;
402         int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
403
404         LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
405
406         for (obj = 0; obj < objcount; obj++) {
407                 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
408                         int tmp, bytes;
409
410                         /* should match the code in osc_exit_cache */
411                         bytes = rnb[n].len;
412                         bytes += rnb[n].offset & (blocksize - 1);
413                         tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
414                         if (tmp)
415                                 bytes += blocksize - tmp;
416
417                         if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
418                                 if (fed->fed_grant < used + bytes) {
419                                         CDEBUG(D_CACHE,
420                                                "%s: cli %s/%p claims %ld+%d "
421                                                "GRANT, real grant %lu idx %d\n",
422                                                exp->exp_obd->obd_name,
423                                                exp->exp_client_uuid.uuid, exp,
424                                                used, bytes, fed->fed_grant, n);
425                                         mask = D_RPCTRACE;
426                                 } else {
427                                         used += bytes;
428                                         rnb[n].flags |= OBD_BRW_GRANTED;
429                                         lnb[n].lnb_grant_used = bytes;
430                                         CDEBUG(0, "idx %d used=%lu\n", n, used);
431                                         rc = 0;
432                                         continue;
433                                 }
434                         }
435                         if (*left > ungranted) {
436                                 /* if enough space, pretend it was granted */
437                                 ungranted += bytes;
438                                 rnb[n].flags |= OBD_BRW_GRANTED;
439                                 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
440                                 rc = 0;
441                                 continue;
442                         }
443
444                         /* We can't check for already-mapped blocks here, as
445                          * it requires dropping the osfs lock to do the bmap.
446                          * Instead, we return ENOSPC and in that case we need
447                          * to go through and verify if all of the blocks not
448                          * marked BRW_GRANTED are already mapped and we can
449                          * ignore this error. */
450                         lnb[n].rc = -ENOSPC;
451                         rnb[n].flags &= ~OBD_BRW_GRANTED;
452                         CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
453                                exp->exp_obd->obd_name,
454                                exp->exp_client_uuid.uuid, exp, n, bytes);
455                 }
456         }
457
458         /* Now substract what client have used already.  We don't subtract
459          * this from the tot_granted yet, so that other client's can't grab
460          * that space before we have actually allocated our blocks.  That
461          * happens in filter_grant_commit() after the writes are done. */
462         *left -= ungranted;
463         fed->fed_grant -= used;
464         fed->fed_pending += used;
465         exp->exp_obd->u.filter.fo_tot_pending += used;
466
467         CDEBUG(mask,
468                "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
469                exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
470                ungranted, fed->fed_grant, fed->fed_dirty);
471
472         /* Rough calc in case we don't refresh cached statfs data */
473         using = (used + ungranted + 1 ) >>
474                 exp->exp_obd->u.obt.obt_sb->s_blocksize_bits;
475         if (exp->exp_obd->obd_osfs.os_bavail > using)
476                 exp->exp_obd->obd_osfs.os_bavail -= using;
477         else
478                 exp->exp_obd->obd_osfs.os_bavail = 0;
479
480         if (fed->fed_dirty < used) {
481                 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
482                        exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
483                        used, fed->fed_dirty);
484                 used = fed->fed_dirty;
485         }
486         exp->exp_obd->u.filter.fo_tot_dirty -= used;
487         fed->fed_dirty -= used;
488
489         if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
490                 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
491                        exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
492                        fed->fed_dirty, fed->fed_pending, fed->fed_grant);
493                 spin_unlock(&exp->exp_obd->obd_osfs_lock);
494                 LBUG();
495         }
496         return rc;
497 }
498
499 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
500  * on mulitple inodes.  That isn't all, because there still exists the
501  * possibility of a truncate starting a new transaction while holding the ext3
502  * rwsem = write while some writes (which have started their transactions here)
503  * blocking on the ext3 rwsem = read => lock inversion.
504  *
505  * The handling gets very ugly when dealing with locked pages.  It may be easier
506  * to just get rid of the locked page code (which has problems of its own) and
507  * either discover we do not need it anymore (i.e. it was a symptom of another
508  * bug) or ensure we get the page locks in an appropriate order. */
509 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
510                                int objcount, struct obd_ioobj *obj,
511                                int niocount, struct niobuf_remote *nb,
512                                struct niobuf_local *res,
513                                struct obd_trans_info *oti,
514                                struct lustre_capa *capa)
515 {
516         struct lvfs_run_ctxt saved;
517         struct niobuf_remote *rnb;
518         struct niobuf_local *lnb = res;
519         struct fsfilt_objinfo fso;
520         struct filter_mod_data *fmd;
521         struct dentry *dentry = NULL;
522         void *iobuf;
523         obd_size left;
524         unsigned long now = jiffies;
525         int rc = 0, i, tot_bytes = 0, cleanup_phase = 0;
526         ENTRY;
527         LASSERT(objcount == 1);
528         LASSERT(obj->ioo_bufcnt > 0);
529
530         rc = filter_verify_capa(exp, NULL, obdo_mdsno(oa), capa,
531                                 CAPA_OPC_OSS_WRITE);
532         if (rc)
533                 RETURN(rc);
534
535         push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
536         iobuf = filter_iobuf_get(&exp->exp_obd->u.filter, oti);
537         if (IS_ERR(iobuf))
538                 GOTO(cleanup, rc = PTR_ERR(iobuf));
539         cleanup_phase = 1;
540
541         dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
542                                    obj->ioo_id);
543         if (IS_ERR(dentry))
544                 GOTO(cleanup, rc = PTR_ERR(dentry));
545         cleanup_phase = 2;
546
547         if (dentry->d_inode == NULL) {
548                 CERROR("%s: trying to BRW to non-existent file "LPU64"\n",
549                        exp->exp_obd->obd_name, obj->ioo_id);
550                 GOTO(cleanup, rc = -ENOENT);
551         }
552
553         fso.fso_dentry = dentry;
554         fso.fso_bufcnt = obj->ioo_bufcnt;
555
556         fsfilt_check_slow(exp->exp_obd, now, obd_timeout, "preprw_write setup");
557
558         /* Don't update inode timestamps if this write is older than a
559          * setattr which modifies the timestamps. b=10150 */
560         /* XXX when we start having persistent reservations this needs to
561          * be changed to filter_fmd_get() to create the fmd if it doesn't
562          * already exist so we can store the reservation handle there. */
563         fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
564
565         spin_lock(&exp->exp_obd->obd_osfs_lock);
566         if (oa) {
567                 filter_grant_incoming(exp, oa);
568                 if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
569                         oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
570                                          OBD_MD_FLATIME);
571                 else
572                         obdo_to_inode(dentry->d_inode, oa, OBD_MD_FLATIME |
573                                       OBD_MD_FLMTIME | OBD_MD_FLCTIME);
574         }
575         cleanup_phase = 3;
576
577         left = filter_grant_space_left(exp);
578
579         rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
580                                 &left, dentry->d_inode);
581
582         /* do not zero out oa->o_valid as it is used in filter_commitrw_write()
583          * for setting UID/GID and fid EA in first write time. */
584         if (oa && oa->o_valid & OBD_MD_FLGRANT) {
585                 oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
586                 oa->o_valid |= OBD_MD_FLGRANT;
587         }
588
589         spin_unlock(&exp->exp_obd->obd_osfs_lock);
590         filter_fmd_put(exp, fmd);
591
592         if (rc)
593                 GOTO(cleanup, rc);
594
595         for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
596              i++, lnb++, rnb++) {
597                 /* We still set up for ungranted pages so that granted pages
598                  * can be written to disk as they were promised, and portals
599                  * needs to keep the pages all aligned properly. */
600                 lnb->dentry = dentry;
601                 lnb->offset = rnb->offset;
602                 lnb->len    = rnb->len;
603                 lnb->flags  = rnb->flags;
604
605                 /*
606                  * ost_brw_write()->ost_nio_pages_get() already initialized
607                  * lnb->page to point to the page from the per-thread page
608                  * pool (bug 5137), initialize page.
609                  */
610                 LASSERT(lnb->page != NULL);
611                 if (lnb->len != PAGE_SIZE) {
612                         memset(kmap(lnb->page) + lnb->len,
613                                0, PAGE_SIZE - lnb->len);
614                         kunmap(lnb->page);
615                 }
616                 lnb->page->index = lnb->offset >> PAGE_SHIFT;
617
618                 cleanup_phase = 4;
619
620                 /* If the filter writes a partial page, then has the file
621                  * extended, the client will read in the whole page.  the
622                  * filter has to be careful to zero the rest of the partial
623                  * page on disk.  we do it by hand for partial extending
624                  * writes, send_bio() is responsible for zeroing pages when
625                  * asked to read unmapped blocks -- brw_kiovec() does this. */
626                 if (lnb->len != PAGE_SIZE) {
627                         __s64 maxidx;
628
629                         maxidx = ((dentry->d_inode->i_size + PAGE_SIZE - 1) >>
630                                  PAGE_SHIFT) - 1;
631                         if (maxidx >= lnb->page->index) {
632                                 LL_CDEBUG_PAGE(D_PAGE, lnb->page, "write %u @ "
633                                                LPU64" flg %x before EOF %llu\n",
634                                                lnb->len, lnb->offset,lnb->flags,
635                                                dentry->d_inode->i_size);
636                                 filter_iobuf_add_page(exp->exp_obd, iobuf,
637                                                       dentry->d_inode,
638                                                       lnb->page);
639                         } else {
640                                 long off;
641                                 char *p = kmap(lnb->page);
642
643                                 off = lnb->offset & ~PAGE_MASK;
644                                 if (off)
645                                         memset(p, 0, off);
646                                 off = (lnb->offset + lnb->len) & ~PAGE_MASK;
647                                 if (off)
648                                         memset(p + off, 0, PAGE_SIZE - off);
649                                 kunmap(lnb->page);
650                         }
651                 }
652                 if (lnb->rc == 0)
653                         tot_bytes += lnb->len;
654         }
655
656         rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
657                               NULL, NULL, NULL);
658
659         fsfilt_check_slow(exp->exp_obd, now, obd_timeout, "start_page_write");
660
661         lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
662                             tot_bytes);
663         EXIT;
664 cleanup:
665         switch(cleanup_phase) {
666         case 4:
667         case 3:
668                 filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
669         case 2:
670                 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
671                 if (rc)
672                         f_dput(dentry);
673                 break;
674         case 1:
675                 filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
676         case 0:
677                 spin_lock(&exp->exp_obd->obd_osfs_lock);
678                 if (oa)
679                         filter_grant_incoming(exp, oa);
680                 spin_unlock(&exp->exp_obd->obd_osfs_lock);
681                 pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
682                 break;
683         default:;
684         }
685         return rc;
686 }
687
688 int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
689                   int objcount, struct obd_ioobj *obj, int niocount,
690                   struct niobuf_remote *nb, struct niobuf_local *res,
691                   struct obd_trans_info *oti, struct lustre_capa *capa)
692 {
693         if (cmd == OBD_BRW_WRITE)
694                 return filter_preprw_write(cmd, exp, oa, objcount, obj,
695                                            niocount, nb, res, oti, capa);
696         if (cmd == OBD_BRW_READ)
697                 return filter_preprw_read(cmd, exp, oa, objcount, obj,
698                                           niocount, nb, res, oti, capa);
699         LBUG();
700         return -EPROTO;
701 }
702
703 void filter_release_read_page(struct filter_obd *filter, struct inode *inode,
704                               struct page *page)
705 {
706         int drop = 0;
707
708         if (inode != NULL &&
709             (inode->i_size > filter->fo_readcache_max_filesize))
710                 drop = 1;
711
712         /* drop from cache like truncate_list_pages() */
713         if (drop && !TryLockPage(page)) {
714                 if (page->mapping)
715                         ll_truncate_complete_page(page);
716                 unlock_page(page);
717         }
718         page_cache_release(page);
719 }
720
721 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
722                                 int objcount, struct obd_ioobj *obj,
723                                 int niocount, struct niobuf_local *res,
724                                 struct obd_trans_info *oti, int rc)
725 {
726         struct inode *inode = NULL;
727         struct ldlm_res_id res_id = { .name = { obj->ioo_id, 0,
728                                                 obj->ioo_gr, 0} };
729         struct ldlm_resource *resource = NULL;
730         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
731         ENTRY;
732
733         /* If oa != NULL then filter_preprw_read updated the inode atime
734          * and we should update the lvb so that other glimpses will also
735          * get the updated value. bug 5972 */
736         if (oa && ns && ns->ns_lvbo && ns->ns_lvbo->lvbo_update) {
737                 resource = ldlm_resource_get(ns, NULL, &res_id, LDLM_EXTENT, 0);
738
739                 if (resource != NULL) {
740                         ns->ns_lvbo->lvbo_update(resource, NULL, 0, 1);
741                         ldlm_resource_putref(resource);
742                 }
743         }
744
745         if (res->dentry != NULL)
746                 inode = res->dentry->d_inode;
747
748         filter_free_dio_pages(objcount, obj, niocount, res);
749
750         if (res->dentry != NULL)
751                 f_dput(res->dentry);
752         RETURN(rc);
753 }
754
755 void flip_into_page_cache(struct inode *inode, struct page *new_page)
756 {
757         struct page *old_page;
758         int rc;
759
760         do {
761                 /* the dlm is protecting us from read/write concurrency, so we
762                  * expect this find_lock_page to return quickly.  even if we
763                  * race with another writer it won't be doing much work with
764                  * the page locked.  we do this 'cause t_c_p expects a
765                  * locked page, and it wants to grab the pagecache lock
766                  * as well. */
767                 old_page = find_lock_page(inode->i_mapping, new_page->index);
768                 if (old_page) {
769                         ll_truncate_complete_page(old_page);
770                         unlock_page(old_page);
771                         page_cache_release(old_page);
772                 }
773
774 #if 0 /* this should be a /proc tunable someday */
775                 /* racing o_directs (no locking ioctl) could race adding
776                  * their pages, so we repeat the page invalidation unless
777                  * we successfully added our new page */
778                 rc = add_to_page_cache_unique(new_page, inode->i_mapping,
779                                               new_page->index,
780                                               page_hash(inode->i_mapping,
781                                                         new_page->index));
782                 if (rc == 0) {
783                         /* add_to_page_cache clears uptodate|dirty and locks
784                          * the page */
785                         SetPageUptodate(new_page);
786                         unlock_page(new_page);
787                 }
788 #else
789                 rc = 0;
790 #endif
791         } while (rc != 0);
792 }
793
794 void filter_grant_commit(struct obd_export *exp, int niocount,
795                          struct niobuf_local *res)
796 {
797         struct filter_obd *filter = &exp->exp_obd->u.filter;
798         struct niobuf_local *lnb = res;
799         unsigned long pending = 0;
800         int i;
801
802         spin_lock(&exp->exp_obd->obd_osfs_lock);
803         for (i = 0, lnb = res; i < niocount; i++, lnb++)
804                 pending += lnb->lnb_grant_used;
805
806         LASSERTF(exp->exp_filter_data.fed_pending >= pending,
807                  "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
808                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
809                  exp->exp_filter_data.fed_pending, pending);
810         exp->exp_filter_data.fed_pending -= pending;
811         LASSERTF(filter->fo_tot_granted >= pending,
812                  "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
813                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
814                  exp->exp_obd->u.filter.fo_tot_granted, pending);
815         filter->fo_tot_granted -= pending;
816         LASSERTF(filter->fo_tot_pending >= pending,
817                  "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
818                  exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
819                  filter->fo_tot_pending, pending);
820         filter->fo_tot_pending -= pending;
821
822         spin_unlock(&exp->exp_obd->obd_osfs_lock);
823 }
824
825 int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
826                     int objcount, struct obd_ioobj *obj, int niocount,
827                     struct niobuf_local *res, struct obd_trans_info *oti,
828                     int rc)
829 {
830         if (cmd == OBD_BRW_WRITE)
831                 return filter_commitrw_write(exp, oa, objcount, obj, niocount,
832                                              res, oti, rc);
833         if (cmd == OBD_BRW_READ)
834                 return filter_commitrw_read(exp, oa, objcount, obj, niocount,
835                                             res, oti, rc);
836         LBUG();
837         return -EPROTO;
838 }
839
840 int filter_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
841                obd_count oa_bufs, struct brw_page *pga,
842                struct obd_trans_info *oti)
843 {
844         struct obd_ioobj ioo;
845         struct niobuf_local *lnb;
846         struct niobuf_remote *rnb;
847         obd_count i;
848         int ret = 0;
849         ENTRY;
850
851         OBD_ALLOC(lnb, oa_bufs * sizeof(struct niobuf_local));
852         OBD_ALLOC(rnb, oa_bufs * sizeof(struct niobuf_remote));
853
854         if (lnb == NULL || rnb == NULL)
855                 GOTO(out, ret = -ENOMEM);
856
857         for (i = 0; i < oa_bufs; i++) {
858                 lnb[i].page = pga[i].pg;
859                 rnb[i].offset = pga[i].off;
860                 rnb[i].len = pga[i].count;
861         }
862
863         obdo_to_ioobj(oinfo->oi_oa, &ioo);
864         ioo.ioo_bufcnt = oa_bufs;
865
866         ret = filter_preprw(cmd, exp, oinfo->oi_oa, 1, &ioo,
867                             oa_bufs, rnb, lnb, oti, oinfo_capa(oinfo));
868         if (ret != 0)
869                 GOTO(out, ret);
870
871         ret = filter_commitrw(cmd, exp, oinfo->oi_oa, 1, &ioo,
872                               oa_bufs, lnb, oti, ret);
873
874 out:
875         if (lnb)
876                 OBD_FREE(lnb, oa_bufs * sizeof(struct niobuf_local));
877         if (rnb)
878                 OBD_FREE(rnb, oa_bufs * sizeof(struct niobuf_remote));
879         RETURN(ret);
880 }