4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
31 * This file is part of Lustre, http://www.lustre.org/
32 * Lustre is a trademark of Sun Microsystems, Inc.
34 * lustre/obdfilter/filter_io.c
36 * Author: Peter Braam <braam@clusterfs.com>
37 * Author: Andreas Dilger <adilger@clusterfs.com>
38 * Author: Phil Schwan <phil@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_FILTER
43 #include <linux/module.h>
44 #include <linux/pagemap.h> // XXX kill me soon
45 #include <linux/version.h>
47 #include <obd_class.h>
49 #include <lustre_fsfilt.h>
50 #include "filter_internal.h"
52 int *obdfilter_created_scratchpad;
54 /* Grab the dirty and seen grant announcements from the incoming obdo.
55 * We will later calculate the clients new grant and return it.
56 * Caller must hold osfs lock */
57 void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
59 struct filter_export_data *fed;
60 struct obd_device *obd = exp->exp_obd;
63 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
65 if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
66 (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
67 oa->o_valid &= ~OBD_MD_FLGRANT;
72 fed = &exp->exp_filter_data;
74 /* Add some margin, since there is a small race if other RPCs arrive
75 * out-or-order and have already consumed some grant. We want to
76 * leave this here in case there is a large error in accounting. */
78 "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
79 obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
80 oa->o_dropped, fed->fed_grant);
82 /* Update our accounting now so that statfs takes it into account.
83 * Note that fed_dirty is only approximate and can become incorrect
84 * if RPCs arrive out-of-order. No important calculations depend
85 * on fed_dirty however, but we must check sanity to not assert. */
86 if ((long long)oa->o_dirty < 0)
88 else if (oa->o_dirty > fed->fed_grant + 4 * FILTER_GRANT_CHUNK)
89 oa->o_dirty = fed->fed_grant + 4 * FILTER_GRANT_CHUNK;
90 obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
91 if (fed->fed_grant < oa->o_dropped) {
92 CDEBUG(D_CACHE,"%s: cli %s/%p reports %u dropped > grant %lu\n",
93 obd->obd_name, exp->exp_client_uuid.uuid, exp,
94 oa->o_dropped, fed->fed_grant);
97 if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
98 CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
99 obd->obd_name, exp->exp_client_uuid.uuid, exp,
100 oa->o_dropped, obd->u.filter.fo_tot_granted);
103 obd->u.filter.fo_tot_granted -= oa->o_dropped;
104 fed->fed_grant -= oa->o_dropped;
105 fed->fed_dirty = oa->o_dirty;
107 if (oa->o_valid & OBD_MD_FLFLAGS && oa->o_flags & OBD_FL_SHRINK_GRANT) {
108 obd_size left_space = filter_grant_space_left(exp);
109 struct filter_obd *filter = &exp->exp_obd->u.filter;
111 /*Only if left_space < fo_tot_clients * 32M,
112 *then the grant space could be shrinked */
113 if (left_space < filter->fo_tot_granted_clients *
114 FILTER_GRANT_SHRINK_LIMIT) {
115 fed->fed_grant -= oa->o_grant;
116 filter->fo_tot_granted -= oa->o_grant;
117 CDEBUG(D_CACHE, "%s: cli %s/%p shrink "LPU64
118 "fed_grant %ld total "LPU64"\n",
119 obd->obd_name, exp->exp_client_uuid.uuid,
120 exp, oa->o_grant, fed->fed_grant,
121 filter->fo_tot_granted);
126 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
127 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
128 obd->obd_name, exp->exp_client_uuid.uuid, exp,
129 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
130 cfs_spin_unlock(&obd->obd_osfs_lock);
136 /* Figure out how much space is available between what we've granted
137 * and what remains in the filesystem. Compensate for ext3 indirect
138 * block overhead when computing how much free space is left ungranted.
140 * Caller must hold obd_osfs_lock. */
141 obd_size filter_grant_space_left(struct obd_export *exp)
143 struct obd_device *obd = exp->exp_obd;
144 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
145 obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
146 int rc, statfs_done = 0;
148 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
150 if (cfs_time_before_64(obd->obd_osfs_age,
151 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS))) {
153 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
154 cfs_time_shift_64(OBD_STATFS_CACHE_SECONDS));
155 if (rc) /* N.B. statfs can't really fail */
160 avail = obd->obd_osfs.os_bavail;
161 left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
162 if (left > GRANT_FOR_LLOG(obd)) {
163 left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
165 left = 0 /* << blockbits */;
168 if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
169 CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
173 if (left >= tot_granted) {
176 if (left < tot_granted - obd->u.filter.fo_tot_pending) {
177 CERROR("%s: cli %s/%p grant "LPU64" > available "
178 LPU64" and pending "LPU64"\n", obd->obd_name,
179 exp->exp_client_uuid.uuid, exp, tot_granted,
180 left, obd->u.filter.fo_tot_pending);
185 CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
186 " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
187 exp->exp_client_uuid.uuid, exp,
188 obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
189 tot_granted, left, obd->u.filter.fo_tot_pending);
194 /* Calculate how much grant space to allocate to this client, based on how
195 * much space is currently free and how much of that is already granted.
197 * if @conservative != 0, we limit the maximum grant to FILTER_GRANT_CHUNK;
198 * otherwise we'll satisfy the requested amount as possible as we can, this
199 * is usually due to client reconnect.
201 * Caller must hold obd_osfs_lock. */
202 long filter_grant(struct obd_export *exp, obd_size current_grant,
203 obd_size want, obd_size fs_space_left, int conservative)
205 struct obd_device *obd = exp->exp_obd;
206 struct filter_export_data *fed = &exp->exp_filter_data;
207 int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
210 LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
212 /* Grant some fraction of the client's requested grant space so that
213 * they are not always waiting for write credits (not all of it to
214 * avoid overgranting in face of multiple RPCs in flight). This
215 * essentially will be able to control the OSC_MAX_RIF for a client.
217 * If we do have a large disparity between what the client thinks it
218 * has and what we think it has, don't grant very much and let the
219 * client consume its grant first. Either it just has lots of RPCs
220 * in flight, or it was evicted and its grants will soon be used up. */
221 if (want > 0x7fffffff) {
222 CERROR("%s: client %s/%p requesting > 2GB grant "LPU64"\n",
223 obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
224 } else if (current_grant < want &&
225 current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
226 grant = min(want + (1 << blockbits) - 1, fs_space_left / 8);
227 grant &= ~((1ULL << blockbits) - 1);
230 if (grant > FILTER_GRANT_CHUNK && conservative)
231 grant = FILTER_GRANT_CHUNK;
233 obd->u.filter.fo_tot_granted += grant;
234 fed->fed_grant += grant;
235 if (fed->fed_grant < 0) {
236 CERROR("%s: cli %s/%p grant %ld want "LPU64
238 obd->obd_name, exp->exp_client_uuid.uuid,
239 exp, fed->fed_grant, want,current_grant);
240 cfs_spin_unlock(&obd->obd_osfs_lock);
247 "%s: cli %s/%p wants: "LPU64" current grant "LPU64
248 " granting: "LPU64"\n", obd->obd_name, exp->exp_client_uuid.uuid,
249 exp, want, current_grant, grant);
251 "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
252 " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
253 exp, obd->u.filter.fo_tot_dirty,
254 obd->u.filter.fo_tot_granted, obd->obd_num_exports);
260 * the routine is used to request pages from pagecache
262 * use GFP_NOFS for requests from a local client not allowing to enter FS
263 * as we might end up waiting on a page he sent in the request we're serving.
264 * use __GFP_HIGHMEM so that the pages can use all of the available memory
266 * use more aggressive GFP_HIGHUSER flags from non-local clients to be able to
267 * generate more memory pressure.
269 * See Bug 19529 and Bug 19917 for details.
271 static struct page *filter_get_page(struct obd_device *obd,
278 page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
279 (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
281 if (unlikely(page == NULL))
282 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
288 * the routine initializes array of local_niobuf from remote_niobuf
290 static int filter_map_remote_to_local(int objcount, struct obd_ioobj *obj,
291 struct niobuf_remote *nb,
292 int *nrpages, struct niobuf_local *res)
294 struct niobuf_remote *rnb;
295 struct niobuf_local *lnb;
299 /* we don't support multiobject RPC yet
300 * ost_brw_read() and ost_brw_write() check this */
301 LASSERT(objcount == 1);
305 for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt; i++, rnb++) {
306 obd_off offset = rnb->offset;
307 unsigned int len = rnb->len;
310 int poff = offset & (CFS_PAGE_SIZE - 1);
311 int plen = CFS_PAGE_SIZE - poff;
313 if (*nrpages >= max) {
314 CERROR("small array of local bufs: %d\n", max);
320 lnb->lnb_file_offset = offset;
321 lnb->lnb_page_offset = poff;
323 lnb->flags = rnb->flags;
326 lnb->lnb_grant_used = 0;
328 LASSERTF(plen <= len, "plen %u, len %u\n", plen, len);
339 * the invalidate above doesn't work during read because lnet pins pages.
340 * The truncate is used here instead to drop pages from cache
342 void filter_release_cache(struct obd_device *obd, struct obd_ioobj *obj,
343 struct niobuf_remote *rnb, struct inode *inode)
347 LASSERT(inode != NULL);
348 for (i = 0; i < obj->ioo_bufcnt; i++, rnb++) {
349 /* remove pages in which range is fit */
350 truncate_inode_pages_range(inode->i_mapping,
351 rnb->offset & CFS_PAGE_MASK,
352 (rnb->offset + rnb->len - 1) |
357 static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
358 int objcount, struct obd_ioobj *obj,
359 struct niobuf_remote *nb,
360 int *npages, struct niobuf_local *res,
361 struct obd_trans_info *oti,
362 struct lustre_capa *capa)
364 struct obd_device *obd = exp->exp_obd;
365 struct timeval start, end;
366 struct lvfs_run_ctxt saved;
367 struct niobuf_local *lnb;
368 struct dentry *dentry = NULL;
369 struct inode *inode = NULL;
371 int rc = 0, i, tot_bytes = 0;
372 unsigned long now = jiffies;
377 /* We are currently not supporting multi-obj BRW_READ RPCS at all.
378 * When we do this function's dentry cleanup will need to be fixed.
379 * These values are verified in ost_brw_write() from the wire. */
380 LASSERTF(objcount == 1, "%d\n", objcount);
381 LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
383 rc = filter_auth_capa(exp, NULL, oa->o_seq, capa,
388 if (oa && oa->o_valid & OBD_MD_FLGRANT) {
389 cfs_spin_lock(&obd->obd_osfs_lock);
390 filter_grant_incoming(exp, oa);
392 if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
393 !(oa->o_flags & OBD_FL_SHRINK_GRANT))
395 cfs_spin_unlock(&obd->obd_osfs_lock);
398 iobuf = filter_iobuf_get(&obd->u.filter, oti);
400 RETURN(PTR_ERR(iobuf));
402 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
403 dentry = filter_oa2dentry(obd, &oa->o_oi);
404 if (IS_ERR(dentry)) {
405 rc = PTR_ERR(dentry);
410 inode = dentry->d_inode;
411 isize = i_size_read(inode);
413 obdo_to_inode(inode, oa, OBD_MD_FLATIME);
415 rc = filter_map_remote_to_local(objcount, obj, nb, npages, res);
419 fsfilt_check_slow(obd, now, "preprw_read setup");
421 /* find pages for all segments, fill array with them */
422 cfs_gettimeofday(&start);
423 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
425 lnb->dentry = dentry;
427 if (isize <= lnb->lnb_file_offset)
428 /* If there's no more data, abort early. lnb->rc == 0,
429 * so it's easy to detect later. */
432 lnb->page = filter_get_page(obd, inode, lnb->lnb_file_offset,
434 if (lnb->page == NULL)
435 GOTO(cleanup, rc = -ENOMEM);
437 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS, 1);
439 if (isize < lnb->lnb_file_offset + lnb->len - 1)
440 lnb->rc = isize - lnb->lnb_file_offset;
444 tot_bytes += lnb->rc;
446 if (PageUptodate(lnb->page)) {
447 lprocfs_counter_add(obd->obd_stats,
448 LPROC_FILTER_CACHE_HIT, 1);
452 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_MISS, 1);
453 filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
455 cfs_gettimeofday(&end);
456 timediff = cfs_timeval_sub(&end, &start, NULL);
457 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
459 if (OBD_FAIL_CHECK(OBD_FAIL_OST_NOMEM))
460 GOTO(cleanup, rc = -ENOMEM);
462 fsfilt_check_slow(obd, now, "start_page_read");
464 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf,
465 exp, NULL, NULL, NULL);
469 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
471 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
472 lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
473 LPROC_FILTER_READ_BYTES, tot_bytes);
474 filter_counter_incr(exp, LPROC_FILTER_STATS_READ,
475 oti ? oti->oti_jobid : NULL, tot_bytes);
479 /* unlock pages to allow access from concurrent OST_READ */
480 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
482 LASSERT(PageLocked(lnb->page));
483 unlock_page(lnb->page);
486 page_cache_release(lnb->page);
497 filter_iobuf_put(&obd->u.filter, iobuf, oti);
499 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
501 CERROR("io error %d\n", rc);
506 /* When clients have dirtied as much space as they've been granted they
507 * fall through to sync writes. These sync writes haven't been expressed
508 * in grants and need to error with ENOSPC when there isn't room in the
509 * filesystem for them after grants are taken into account. However,
510 * writeback of the dirty data that was already granted space can write
513 * Caller must hold obd_osfs_lock. */
514 static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
515 int objcount, struct fsfilt_objinfo *fso,
516 int niocount, struct niobuf_local *lnb,
517 obd_size *left, struct inode *inode)
519 struct filter_export_data *fed = &exp->exp_filter_data;
520 int blocksize = exp->exp_obd->u.obt.obt_sb->s_blocksize;
521 unsigned long used = 0, ungranted = 0, using;
522 int i, rc = -ENOSPC, obj, n = 0;
525 if ((oa->o_valid & OBD_MD_FLFLAGS) &&
526 (oa->o_flags & OBD_FL_RECOV_RESEND)) {
528 CDEBUG(D_CACHE, "Recoverable resend arrived, skipping "
532 LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
534 for (obj = 0; obj < objcount; obj++) {
535 for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
538 /* should match the code in osc_exit_cache */
540 bytes += lnb[n].lnb_file_offset & (blocksize - 1);
541 tmp = (lnb[n].lnb_file_offset + lnb[n].len) &
544 bytes += blocksize - tmp;
547 if ((lnb[n].flags & OBD_BRW_FROM_GRANT) &&
548 (oa->o_valid & OBD_MD_FLGRANT)) {
550 /* this is a recoverable resent */
551 lnb[n].flags |= OBD_BRW_GRANTED;
554 } else if (fed->fed_grant < used + bytes) {
556 "%s: cli %s/%p claims %ld+%d "
557 "GRANT, real grant %lu idx %d\n",
558 exp->exp_obd->obd_name,
559 exp->exp_client_uuid.uuid, exp,
560 used, bytes, fed->fed_grant, n);
563 lnb[n].flags |= OBD_BRW_GRANTED;
564 lnb[n].lnb_grant_used = bytes;
565 CDEBUG(0, "idx %d used=%lu\n", n, used);
570 if (*left > ungranted + bytes) {
571 /* if enough space, pretend it was granted */
573 lnb[n].flags |= OBD_BRW_GRANTED;
574 lnb[n].lnb_grant_used = bytes;
575 CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
580 /* We can't check for already-mapped blocks here, as
581 * it requires dropping the osfs lock to do the bmap.
582 * Instead, we return ENOSPC and in that case we need
583 * to go through and verify if all of the blocks not
584 * marked BRW_GRANTED are already mapped and we can
585 * ignore this error. */
587 lnb[n].flags &= ~OBD_BRW_GRANTED;
588 CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
589 exp->exp_obd->obd_name,
590 exp->exp_client_uuid.uuid, exp, n, bytes);
594 /* Now substract what client have used already. We don't subtract
595 * this from the tot_granted yet, so that other client's can't grab
596 * that space before we have actually allocated our blocks. That
597 * happens in filter_grant_commit() after the writes are done. */
599 fed->fed_grant -= used;
600 fed->fed_pending += used + ungranted;
601 exp->exp_obd->u.filter.fo_tot_granted += ungranted;
602 exp->exp_obd->u.filter.fo_tot_pending += used + ungranted;
605 "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
606 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
607 ungranted, fed->fed_grant, fed->fed_dirty);
609 /* Rough calc in case we don't refresh cached statfs data */
610 using = (used + ungranted + 1 ) >>
611 exp->exp_obd->u.obt.obt_sb->s_blocksize_bits;
612 if (exp->exp_obd->obd_osfs.os_bavail > using)
613 exp->exp_obd->obd_osfs.os_bavail -= using;
615 exp->exp_obd->obd_osfs.os_bavail = 0;
617 if (fed->fed_dirty < used) {
618 CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
619 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
620 used, fed->fed_dirty);
621 used = fed->fed_dirty;
623 exp->exp_obd->u.filter.fo_tot_dirty -= used;
624 fed->fed_dirty -= used;
626 if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
627 CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
628 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
629 fed->fed_dirty, fed->fed_pending, fed->fed_grant);
630 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
636 /* If we ever start to support multi-object BRW RPCs, we will need to get locks
637 * on mulitple inodes. That isn't all, because there still exists the
638 * possibility of a truncate starting a new transaction while holding the ext3
639 * rwsem = write while some writes (which have started their transactions here)
640 * blocking on the ext3 rwsem = read => lock inversion.
642 * The handling gets very ugly when dealing with locked pages. It may be easier
643 * to just get rid of the locked page code (which has problems of its own) and
644 * either discover we do not need it anymore (i.e. it was a symptom of another
645 * bug) or ensure we get the page locks in an appropriate order. */
646 static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
647 int objcount, struct obd_ioobj *obj,
648 struct niobuf_remote *nb, int *npages,
649 struct niobuf_local *res,
650 struct obd_trans_info *oti,
651 struct lustre_capa *capa)
653 struct obd_device *obd = exp->exp_obd;
654 struct timeval start, end;
655 struct lvfs_run_ctxt saved;
656 struct niobuf_local *lnb = res;
657 struct fsfilt_objinfo fso;
658 struct filter_mod_data *fmd;
659 struct dentry *dentry = NULL;
662 unsigned long now = jiffies, timediff;
663 int rc = 0, i, tot_bytes = 0, cleanup_phase = 0, localreq = 0;
666 LASSERT(objcount == 1);
667 LASSERT(obj->ioo_bufcnt > 0);
669 rc = filter_auth_capa(exp, NULL, oa->o_seq, capa,
674 if (exp->exp_connection &&
675 exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
678 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
679 iobuf = filter_iobuf_get(&obd->u.filter, oti);
681 GOTO(cleanup, rc = PTR_ERR(iobuf));
684 dentry = filter_fid2dentry(obd, NULL, obj->ioo_seq,
687 GOTO(cleanup, rc = PTR_ERR(dentry));
690 if (dentry->d_inode == NULL) {
691 if (exp->exp_obd->obd_recovering) {
692 struct obdo *noa = oa;
697 GOTO(recreate_out, rc = -ENOMEM);
698 noa->o_id = obj->ioo_id;
699 noa->o_valid = OBD_MD_FLID;
702 if (filter_create(NULL, exp, noa, NULL, oti) == 0) {
704 dentry = filter_fid2dentry(exp->exp_obd, NULL,
712 if (IS_ERR(dentry) || dentry->d_inode == NULL) {
713 CERROR("%s: BRW to missing obj "LPU64"/"LPU64":rc %d\n",
714 exp->exp_obd->obd_name,
715 obj->ioo_id, obj->ioo_seq,
716 IS_ERR(dentry) ? (int)PTR_ERR(dentry) : -ENOENT);
719 GOTO(cleanup, rc = -ENOENT);
723 if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID) &&
724 dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) {
725 rc = filter_capa_fixoa(exp, oa, oa->o_seq, capa);
730 rc = filter_map_remote_to_local(objcount, obj, nb, npages, res);
734 fsfilt_check_slow(obd, now, "preprw_write setup");
736 /* Filter truncate first locks i_mutex then partially truncated
737 * page, filter write code first locks pages then take
738 * i_mutex. To avoid a deadlock in case of concurrent
739 * punch/write requests from one client, filter writes and
740 * filter truncates are serialized by i_alloc_sem, allowing
741 * multiple writes or single truncate. */
742 down_read(&dentry->d_inode->i_alloc_sem);
743 fsfilt_check_slow(obd, now, "i_alloc_sem");
745 /* Don't update inode timestamps if this write is older than a
746 * setattr which modifies the timestamps. b=10150 */
747 /* XXX when we start having persistent reservations this needs to
748 * be changed to filter_fmd_get() to create the fmd if it doesn't
749 * already exist so we can store the reservation handle there. */
750 fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_seq);
754 cfs_spin_lock(&obd->obd_osfs_lock);
756 filter_grant_incoming(exp, oa);
757 if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
758 oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
761 obdo_to_inode(dentry->d_inode, oa, OBD_MD_FLATIME |
762 OBD_MD_FLMTIME | OBD_MD_FLCTIME);
765 left = filter_grant_space_left(exp);
767 fso.fso_dentry = dentry;
768 fso.fso_bufcnt = *npages;
770 rc = filter_grant_check(exp, oa, objcount, &fso, *npages, res,
771 &left, dentry->d_inode);
773 /* do not zero out oa->o_valid as it is used in filter_commitrw_write()
774 * for setting UID/GID and fid EA in first write time. */
775 /* If OBD_FL_SHRINK_GRANT is set, the client just returned us some grant
776 * so no sense in allocating it some more. We either return the grant
777 * back to the client if we have plenty of space or we don't return
778 * anything if we are short. This was decided in filter_grant_incoming*/
779 if ((retries == 0) && (oa->o_valid & OBD_MD_FLGRANT) &&
780 (!(oa->o_valid & OBD_MD_FLFLAGS) ||
781 !(oa->o_flags & OBD_FL_SHRINK_GRANT)))
782 oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
785 cfs_spin_unlock(&obd->obd_osfs_lock);
787 if (rc == -ENOSPC && retries == 0) {
790 CDEBUG(D_INODE, "retry after commit pending journals");
793 handle = fsfilt_start(obd, dentry->d_inode,
794 FSFILT_OP_SETATTR, NULL);
795 if (handle != NULL &&
796 fsfilt_commit(obd, dentry->d_inode, handle, 1) == 0)
800 filter_fmd_put(exp, fmd);
802 OBD_FAIL_TIMEOUT(OBD_FAIL_OST_BRW_PAUSE_BULK2, (obd_timeout + 1) / 4);
808 cfs_gettimeofday(&start);
809 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
811 /* We still set up for ungranted pages so that granted pages
812 * can be written to disk as they were promised, and portals
813 * needs to keep the pages all aligned properly. */
814 lnb->dentry = dentry;
816 lnb->page = filter_get_page(obd, dentry->d_inode,
817 lnb->lnb_file_offset, localreq);
818 if (lnb->page == NULL)
819 GOTO(cleanup, rc = -ENOMEM);
821 /* DLM locking protects us from write and truncate competing
822 * for same region, but truncate can leave dirty page in the
823 * cache. it's possible the writeout on a such a page is in
824 * progress when we access it. it's also possible that during
825 * this writeout we put new (partial) data, but then won't
826 * be able to proceed in filter_commitrw_write(). thus let's
827 * just wait for writeout completion, should be rare enough.
829 wait_on_page_writeback(lnb->page);
830 BUG_ON(PageWriteback(lnb->page));
832 /* If the filter writes a partial page, then has the file
833 * extended, the client will read in the whole page. the
834 * filter has to be careful to zero the rest of the partial
835 * page on disk. we do it by hand for partial extending
836 * writes, send_bio() is responsible for zeroing pages when
837 * asked to read unmapped blocks -- brw_kiovec() does this. */
838 if (lnb->len != CFS_PAGE_SIZE) {
841 maxidx = ((i_size_read(dentry->d_inode) +
842 CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
843 if (maxidx >= lnb->page->index) {
844 LL_CDEBUG_PAGE(D_PAGE, lnb->page, "write %u @ "
845 LPU64" flg %x before EOF %llu\n",
846 lnb->len, lnb->lnb_file_offset,
848 i_size_read(dentry->d_inode));
849 filter_iobuf_add_page(obd, iobuf,
854 char *p = kmap(lnb->page);
856 off = lnb->lnb_file_offset & ~CFS_PAGE_MASK;
859 off = (lnb->lnb_file_offset + lnb->len) &
862 memset(p + off, 0, CFS_PAGE_SIZE - off);
867 tot_bytes += lnb->len;
869 cfs_gettimeofday(&end);
870 timediff = cfs_timeval_sub(&end, &start, NULL);
871 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_GET_PAGE, timediff);
873 if (OBD_FAIL_CHECK(OBD_FAIL_OST_NOMEM))
874 GOTO(cleanup, rc = -ENOMEM);
876 /* don't unlock pages to prevent any access */
877 rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
880 fsfilt_check_slow(obd, now, "start_page_write");
882 lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
885 if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
886 lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
887 LPROC_FILTER_WRITE_BYTES, tot_bytes);
888 filter_counter_incr(exp, LPROC_FILTER_STATS_WRITE,
889 oti ? oti->oti_jobid : NULL, tot_bytes);
892 switch(cleanup_phase) {
895 for (i = 0, lnb = res; i < *npages; i++, lnb++) {
896 if (lnb->page != NULL) {
897 unlock_page(lnb->page);
898 page_cache_release(lnb->page);
905 up_read(&dentry->d_inode->i_alloc_sem);
907 filter_iobuf_put(&obd->u.filter, iobuf, oti);
909 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
914 filter_iobuf_put(&obd->u.filter, iobuf, oti);
916 cfs_spin_lock(&obd->obd_osfs_lock);
918 filter_grant_incoming(exp, oa);
919 cfs_spin_unlock(&obd->obd_osfs_lock);
920 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
927 int filter_preprw(const struct lu_env *env, int cmd, struct obd_export *exp,
928 struct obdo *oa, int objcount, struct obd_ioobj *obj,
929 struct niobuf_remote *nb, int *npages,
930 struct niobuf_local *res, struct obd_trans_info *oti,
931 struct lustre_capa *capa)
933 if (cmd == OBD_BRW_WRITE)
934 return filter_preprw_write(cmd, exp, oa, objcount, obj,
935 nb, npages, res, oti, capa);
936 if (cmd == OBD_BRW_READ)
937 return filter_preprw_read(cmd, exp, oa, objcount, obj,
938 nb, npages, res, oti, capa);
943 static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
944 int objcount, struct obd_ioobj *obj,
945 struct niobuf_remote *rnb,
946 int npages, struct niobuf_local *res,
947 struct obd_trans_info *oti, int rc)
949 struct filter_obd *fo = &exp->exp_obd->u.filter;
950 struct inode *inode = NULL;
951 struct ldlm_res_id res_id;
952 struct ldlm_resource *resource = NULL;
953 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
954 struct niobuf_local *lnb;
958 osc_build_res_name(obj->ioo_id, obj->ioo_seq, &res_id);
959 /* If oa != NULL then filter_preprw_read updated the inode atime
960 * and we should update the lvb so that other glimpses will also
961 * get the updated value. bug 5972 */
962 if (oa && ns && ns->ns_lvbo && ns->ns_lvbo->lvbo_update) {
963 resource = ldlm_resource_get(ns, NULL, &res_id, LDLM_EXTENT, 0);
965 if (resource != NULL) {
966 LDLM_RESOURCE_ADDREF(resource);
967 ns->ns_lvbo->lvbo_update(resource, NULL, 1);
968 LDLM_RESOURCE_DELREF(resource);
969 ldlm_resource_putref(resource);
973 if (res->dentry != NULL)
974 inode = res->dentry->d_inode;
976 for (i = 0, lnb = res; i < npages; i++, lnb++) {
977 if (lnb->page != NULL) {
978 page_cache_release(lnb->page);
982 if (inode && (fo->fo_read_cache == 0 ||
983 i_size_read(inode) > fo->fo_readcache_max_filesize))
984 filter_release_cache(exp->exp_obd, obj, rnb, inode);
986 if (res->dentry != NULL)
991 void filter_grant_commit(struct obd_export *exp, int niocount,
992 struct niobuf_local *res)
994 struct filter_obd *filter = &exp->exp_obd->u.filter;
995 struct niobuf_local *lnb = res;
996 unsigned long pending = 0;
999 cfs_spin_lock(&exp->exp_obd->obd_osfs_lock);
1000 for (i = 0, lnb = res; i < niocount; i++, lnb++)
1001 pending += lnb->lnb_grant_used;
1003 LASSERTF(exp->exp_filter_data.fed_pending >= pending,
1004 "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
1005 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
1006 exp->exp_filter_data.fed_pending, pending);
1007 exp->exp_filter_data.fed_pending -= pending;
1008 LASSERTF(filter->fo_tot_granted >= pending,
1009 "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
1010 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
1011 exp->exp_obd->u.filter.fo_tot_granted, pending);
1012 filter->fo_tot_granted -= pending;
1013 LASSERTF(filter->fo_tot_pending >= pending,
1014 "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
1015 exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
1016 filter->fo_tot_pending, pending);
1017 filter->fo_tot_pending -= pending;
1019 cfs_spin_unlock(&exp->exp_obd->obd_osfs_lock);
1022 int filter_commitrw(const struct lu_env *env, int cmd, struct obd_export *exp,
1023 struct obdo *oa, int objcount, struct obd_ioobj *obj,
1024 struct niobuf_remote *nb, int npages,
1025 struct niobuf_local *res, struct obd_trans_info *oti,
1028 if (cmd == OBD_BRW_WRITE)
1029 return filter_commitrw_write(exp, oa, objcount, obj,
1030 nb, npages, res, oti, rc);
1031 if (cmd == OBD_BRW_READ)
1032 return filter_commitrw_read(exp, oa, objcount, obj,
1033 nb, npages, res, oti, rc);