* Author: Andreas Dilger <adilger@clusterfs.com>
* Author: Phil Schwan <phil@clusterfs.com>
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This file is part of the Lustre file system, http://www.lustre.org
+ * Lustre is a trademark of Cluster File Systems, Inc.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You may have signed or agreed to another license before downloading
+ * this software. If so, you are bound by the terms and conditions
+ * of that agreement, and the following does not apply to you. See the
+ * LICENSE file included with this distribution for more information.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * If you did not agree to a different license, then this copy of Lustre
+ * is open source software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * In either case, Lustre is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * license text for more details.
*/
#define DEBUG_SUBSYSTEM S_FILTER
+#ifndef AUTOCONF_INCLUDED
#include <linux/config.h>
+#endif
#include <linux/module.h>
#include <linux/pagemap.h> // XXX kill me soon
#include <linux/version.h>
-#include <asm/div64.h>
-#include <linux/obd_class.h>
-#include <linux/lustre_fsfilt.h>
+#include <obd_class.h>
+#include <lustre_fsfilt.h>
#include "filter_internal.h"
-static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
+int *obdfilter_created_scratchpad;
+
+static int filter_alloc_dio_page(struct obd_device *obd, struct inode *inode,
+ struct niobuf_local *lnb)
{
- struct address_space *mapping = inode->i_mapping;
struct page *page;
- unsigned long index = lnb->offset >> PAGE_SHIFT;
- int rc;
-
- page = grab_cache_page(mapping, index); /* locked page */
- if (page == NULL)
- return lnb->rc = -ENOMEM;
-
- LASSERT(page->mapping == mapping);
-
- lnb->page = page;
-
- if (inode->i_size < lnb->offset + lnb->len - 1)
- lnb->rc = inode->i_size - lnb->offset;
- else
- lnb->rc = lnb->len;
- if (PageUptodate(page)) {
- unlock_page(page);
- return 0;
- }
+ LASSERT(lnb->page != NULL);
- rc = mapping->a_ops->readpage(NULL, page);
- if (rc < 0) {
- CERROR("page index %lu, rc = %d\n", index, rc);
- lnb->page = NULL;
- page_cache_release(page);
- return lnb->rc = rc;
+ page = lnb->page;
+#if 0
+ POISON_PAGE(page, 0xf1);
+ if (lnb->len != CFS_PAGE_SIZE) {
+ memset(kmap(page) + lnb->len, 0, CFS_PAGE_SIZE - lnb->len);
+ kunmap(page);
}
+#endif
+ page->index = lnb->offset >> CFS_PAGE_SHIFT;
- return 0;
+ RETURN(0);
}
-static int filter_finish_page_read(struct niobuf_local *lnb)
+static void filter_free_dio_pages(int objcount, struct obd_ioobj *obj,
+ int niocount, struct niobuf_local *res)
{
- if (lnb->page == NULL)
- return 0;
-
- if (PageUptodate(lnb->page))
- return 0;
+ int i, j;
- wait_on_page(lnb->page);
- if (!PageUptodate(lnb->page)) {
- CERROR("page index %lu/offset "LPX64" not uptodate\n",
- lnb->page->index, lnb->offset);
- GOTO(err_page, lnb->rc = -EIO);
- }
- if (PageError(lnb->page)) {
- CERROR("page index %lu/offset "LPX64" has error\n",
- lnb->page->index, lnb->offset);
- GOTO(err_page, lnb->rc = -EIO);
+ for (i = 0; i < objcount; i++, obj++) {
+ for (j = 0 ; j < obj->ioo_bufcnt ; j++, res++)
+ res->page = NULL;
}
-
- return 0;
-
-err_page:
- page_cache_release(lnb->page);
- lnb->page = NULL;
- return lnb->rc;
}
-/* See if there are unallocated parts in given file region */
-static int filter_inode_has_holes(struct inode *inode, obd_size start,
- int len)
-{
- int j;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
- sector_t (*fs_bmap)(struct address_space *,
- sector_t);
-#else
- int (*fs_bmap)(struct address_space *, long);
-#endif
- fs_bmap = inode->i_mapping->a_ops->bmap;
- if (fs_bmap) {
- for (j = 0; j <= len ; j++) {
- if (!fs_bmap(inode->i_mapping, start+j)) {
- return 1;
- }
- }
- return 0;
- } else {
- /* Return -1 in case that caller cares about bmap availability.
- */
- return -1;
- }
-}
-
/* Grab the dirty and seen grant announcements from the incoming obdo.
- * We will later calculate the clients new grant and return it. */
+ * We will later calculate the clients new grant and return it.
+ * Caller must hold osfs lock */
static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
{
struct filter_export_data *fed;
struct obd_device *obd = exp->exp_obd;
- obd_size client_cached;
ENTRY;
- if (!oa || (oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
- (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
- if (oa)
- oa->o_valid &= ~OBD_MD_FLGRANT;
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
+ (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
+ oa->o_valid &= ~OBD_MD_FLGRANT;
EXIT;
return;
}
- client_cached = oa->o_blocks;
fed = &exp->exp_filter_data;
- if (client_cached > fed->fed_grant)
- CERROR("client %s claims "LPU64" granted, > "LPU64" granted\n",
- obd->obd_name, client_cached, fed->fed_grant);
-
- spin_lock(&obd->obd_osfs_lock);
- /* update our accounting now so that statfs takes it into account */
- obd->u.filter.fo_tot_cached += client_cached - fed->fed_cached;
- fed->fed_cached = client_cached;
-
- /* Acknowledgement that the client has seen our published grant.
- * If the client has met our shrinking target we can reuse its
- * difference from the previous grant. It is reasonable to announce
- * more dirty that cached as it tries to purge its previously granted
- * dirty data down to its newly received target. */
- if (fed->fed_grant_waiting && (oa->o_grant <= fed->fed_grant_sent)) {
- if (fed->fed_grant_sent < fed->fed_grant) {
- if (client_cached <= fed->fed_grant_sent) {
- obd->u.filter.fo_tot_granted -=
- fed->fed_grant - oa->o_grant;
- CDEBUG(D_SUPER, "reduced grant from "LPU64" to "
- LPU64", total grant now "LPU64"\n",
- fed->fed_grant, oa->o_grant,
- obd->u.filter.fo_tot_granted);
- fed->fed_grant = oa->o_grant;
- fed->fed_grant_waiting = 0;
- }
- } else {
- fed->fed_grant_waiting = 0;
- }
+ /* Add some margin, since there is a small race if other RPCs arrive
+ * out-or-order and have already consumed some grant. We want to
+ * leave this here in case there is a large error in accounting. */
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
+ oa->o_dropped, fed->fed_grant);
+
+ /* Update our accounting now so that statfs takes it into account.
+ * Note that fed_dirty is only approximate and can become incorrect
+ * if RPCs arrive out-of-order. No important calculations depend
+ * on fed_dirty however, but we must check sanity to not assert. */
+ if ((long long)oa->o_dirty < 0)
+ oa->o_dirty = 0;
+ else if (oa->o_dirty > fed->fed_grant + 4 * FILTER_GRANT_CHUNK)
+ oa->o_dirty = fed->fed_grant + 4 * FILTER_GRANT_CHUNK;
+ obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
+ if (fed->fed_grant < oa->o_dropped) {
+ CDEBUG(D_CACHE,"%s: cli %s/%p reports %u dropped > grant %lu\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ oa->o_dropped, fed->fed_grant);
+ oa->o_dropped = 0;
+ }
+ if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
+ CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ oa->o_dropped, obd->u.filter.fo_tot_granted);
+ oa->o_dropped = 0;
+ }
+ obd->u.filter.fo_tot_granted -= oa->o_dropped;
+ fed->fed_grant -= oa->o_dropped;
+ fed->fed_dirty = oa->o_dirty;
+ if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
+ CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ fed->fed_dirty, fed->fed_pending, fed->fed_grant);
+ spin_unlock(&obd->obd_osfs_lock);
+ LBUG();
}
- spin_unlock(&obd->obd_osfs_lock);
- oa->o_valid &= ~(OBD_MD_FLGRANT|OBD_MD_FLBLOCKS);
EXIT;
}
* Caller must hold obd_osfs_lock. */
obd_size filter_grant_space_left(struct obd_export *exp)
{
- obd_size left = 0;
struct obd_device *obd = exp->exp_obd;
- int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
- /* XXX I disabled statfs caching as it only creates extra problems now.
- -- green*/
- unsigned long max_age = jiffies/* - HZ*/+1;
- struct filter_export_data *fed = &exp->exp_filter_data;
- int rc;
+ int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
+ obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
+ int rc, statfs_done = 0;
-restat:
- rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, max_age);
- if (rc) /* N.B. statfs can't really fail, just for correctness */
- RETURN(0);
-
- left = obd->obd_osfs.os_bavail << blockbits;
- left -= (left >> (blockbits - 2)) + (left >> (2 * blockbits - 2));
- /* We cannot afford having absolutely no space, we need some for
- llog stuff */
- if ( left >= PAGE_SIZE * 10)
- left -= PAGE_SIZE * 10;
- else
- left = 0;
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
- /* If fed->fed_grant_waiting is set, this means
- obd->u.filter.fo_tot_granted does not represent actual granted
- amount and client is supposedly actively shrinks its cache, so
- no point in printing this warning */
- if (left < obd->u.filter.fo_tot_granted && !fed->fed_grant_waiting)
- CERROR("granted space "LPU64" more than available "LPU64"\n",
- obd->u.filter.fo_tot_granted, left);
-
- left -= min(left, obd->u.filter.fo_tot_granted);
- if (left < FILTER_GRANT_CHUNK && time_after(jiffies,obd->obd_osfs_age)){
- CDEBUG(D_SUPER, "fs has no space left and statfs too old\n");
- max_age = jiffies;
- goto restat;
+ if (cfs_time_before_64(obd->obd_osfs_age, cfs_time_current_64() - HZ)) {
+restat:
+ rc = fsfilt_statfs(obd, obd->u.obt.obt_sb,
+ cfs_time_current_64() + HZ);
+ if (rc) /* N.B. statfs can't really fail */
+ RETURN(0);
+ statfs_done = 1;
}
- CDEBUG(D_SUPER, "free: "LPU64" avail: "LPU64" grant left: "LPU64"\n",
- obd->obd_osfs.os_bfree << blockbits,
- obd->obd_osfs.os_bavail << blockbits, left);
-
- return left;
-}
-
-/* When clients have dirtied as much space as they've been granted they
- * fall through to sync writes. These sync writes haven't been expressed
- * in grants and need to error with ENOSPC when there isn't room in the
- * filesystem for them after grants are taken into account. However,
- * writeback of the dirty data that was already granted space can write
- * right on through. We have no need to stop writes that won't allocate
- * new space, so we bmap to calculate how much this io is going to consume.
- *
- * Caller must hold obd_osfs_lock. */
-static int filter_check_space(struct obd_export *exp, int objcount,
- struct fsfilt_objinfo *fso, int niocount,
- struct niobuf_remote *rnb,
- struct niobuf_local *lnb, obd_size *left,
- obd_size *consumed, struct inode *inode)
-{
- int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
- obd_size bytes, ungranted = 0;
- int i, rc = -ENOSPC, obj, n = 0;
-
- *consumed = 0;
-
- for (obj = 0; obj < objcount; obj++) {
- for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
- obd_size tmp;
+ avail = obd->obd_osfs.os_bavail;
+ left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
+ if (left > GRANT_FOR_LLOG(obd)) {
+ left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
+ } else {
+ left = 0 /* << blockbits */;
+ }
- bytes = rnb[n].len;
- tmp = rnb[n].offset & (blocksize - 1);
- bytes += tmp;
- tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
- if (tmp)
- bytes += blocksize - tmp;
+ if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
+ CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
+ goto restat;
+ }
- if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
- *consumed += bytes;
- rc = 0;
- continue;
- }
- if (*left - *consumed >= bytes) {
- /* if enough space, pretend it was granted */
- exp->exp_obd->u.filter.fo_tot_granted += bytes;
- exp->exp_filter_data.fed_grant += bytes;
- *consumed += bytes;
- *left -= bytes;
- rc = 0;
- continue;
- }
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
- if (!filter_inode_has_holes(inode,
- rnb[n].offset >>
- inode->i_blkbits,
- rnb[n].len >>
- inode->i_blkbits)) {
- rc = 0;
- } else {
- rc = lnb[n].rc = -ENOSPC;
- }
- spin_lock(&exp->exp_obd->obd_osfs_lock);
- if (rc)
- goto leave;
+ if (left >= tot_granted) {
+ left -= tot_granted;
+ } else {
+ if (left < tot_granted - obd->u.filter.fo_tot_pending) {
+ CERROR("%s: cli %s/%p grant "LPU64" > available "
+ LPU64" and pending "LPU64"\n", obd->obd_name,
+ exp->exp_client_uuid.uuid, exp, tot_granted,
+ left, obd->u.filter.fo_tot_pending);
}
+ left = 0;
}
- CDEBUG((*consumed != 0 && ungranted != 0) ? D_ERROR : D_SUPER,
- "consumed: "LPU64" ungranted: "LPU64"\n", *consumed, ungranted);
+ CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
+ " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
+ exp->exp_client_uuid.uuid, exp,
+ obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
+ tot_granted, left, obd->u.filter.fo_tot_pending);
- if (*consumed > exp->exp_filter_data.fed_grant)
- CERROR("request sent from cache, but not enough grant ("LPU64
- ","LPU64")\n", *consumed,
- exp->exp_filter_data.fed_grant);
-leave:
- return rc;
+ return left;
}
/* Calculate how much grant space to allocate to this client, based on how
* much space is currently free and how much of that is already granted.
*
* Caller must hold obd_osfs_lock. */
-static void filter_grant(struct obd_export *exp, struct obdo *oa,
- obd_size left, obd_size from_grant)
+long filter_grant(struct obd_export *exp, obd_size current_grant,
+ obd_size want, obd_size fs_space_left)
{
struct obd_device *obd = exp->exp_obd;
struct filter_export_data *fed = &exp->exp_filter_data;
- obd_size grant, extra;
- int blockbits;
-
- blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
-
- /* if things go wrong conservatively try to clamp them from
- * generating more dirty data until things are better on our end */
- grant = fed->fed_cached;
-
- extra = min(FILTER_GRANT_CHUNK, left / 2);
-
- if (grant > fed->fed_grant) {
- /* If client has screwed up, force basic grant until fixed */
- CERROR("client %s cached more "LPU64" than granted "LPU64"\n",
- exp->exp_client_uuid.uuid, fed->fed_cached,
- fed->fed_grant);
- grant = extra;
- } else if (fed->fed_grant_waiting) {
- /* KISS: only one grant change in flight at a time. We
- * could move it in the "same direction" easily,
- * but changing directions (e.g. grow then shrink
- * before client ACKs) would be bad. */
- grant = fed->fed_grant_sent;
- } else {
- /* grant will shrink or grow as client cache/extra changes */
- grant = fed->fed_cached + extra;
- }
-
- /* If we've granted all we're willing, we have to revoke
- * the grant covering what the client just wrote. */
- if (left == 0) {
- grant -= min(from_grant, grant);
- }
-
- if (!fed->fed_grant_waiting && grant + from_grant > left ) {
- if (from_grant < left)
- grant = left - from_grant;
- else
- grant = 0;
- }
-
- if (grant != fed->fed_grant) {
- fed->fed_grant_waiting = 1;
- fed->fed_grant_sent = grant;
- if (grant > fed->fed_grant) {
- obd->u.filter.fo_tot_granted += grant - fed->fed_grant;
- fed->fed_grant = grant;
+ int blockbits = obd->u.obt.obt_sb->s_blocksize_bits;
+ __u64 grant = 0;
+
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ /* Grant some fraction of the client's requested grant space so that
+ * they are not always waiting for write credits (not all of it to
+ * avoid overgranting in face of multiple RPCs in flight). This
+ * essentially will be able to control the OSC_MAX_RIF for a client.
+ *
+ * If we do have a large disparity between what the client thinks it
+ * has and what we think it has, don't grant very much and let the
+ * client consume its grant first. Either it just has lots of RPCs
+ * in flight, or it was evicted and its grants will soon be used up. */
+ if (want > 0x7fffffff) {
+ CERROR("%s: client %s/%p requesting > 2GB grant "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
+ } else if (current_grant < want &&
+ current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
+ grant = min((want >> blockbits),
+ (fs_space_left >> blockbits) / 8);
+ grant <<= blockbits;
+
+ if (grant) {
+ /* Allow >FILTER_GRANT_CHUNK size when clients
+ * reconnect due to a server reboot.
+ */
+ if ((grant > FILTER_GRANT_CHUNK) &&
+ (!obd->obd_recovering))
+ grant = FILTER_GRANT_CHUNK;
+
+ obd->u.filter.fo_tot_granted += grant;
+ fed->fed_grant += grant;
+ if (fed->fed_grant < 0) {
+ CERROR("%s: cli %s/%p grant %ld want "LPU64
+ "current"LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, fed->fed_grant, want,current_grant);
+ spin_unlock(&obd->obd_osfs_lock);
+ LBUG();
+ }
}
}
- CDEBUG(D_SUPER,"cli %s cache:"LPU64" grant:"LPU64", granting:"LPU64"\n",
- exp->exp_connection->c_remote_uuid.uuid, oa->o_blocks,
- oa->o_grant, grant);
- CDEBUG(D_SUPER, "fed sent:"LPU64" wt:%d grant:"LPU64"\n",
- fed->fed_grant_sent, fed->fed_grant_waiting,
- fed->fed_grant);
- CDEBUG(D_SUPER, "tot cached:"LPU64" granted:"LPU64" num_exports: %d\n",
- obd->u.filter.fo_tot_cached,
- obd->u.filter.fo_tot_granted, obd->obd_num_exports);
-
- oa->o_valid |= OBD_MD_FLGRANT;
- oa->o_grant = grant;
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p wants: "LPU64" current grant "LPU64
+ " granting: "LPU64"\n", obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, want, current_grant, grant);
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
+ " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, obd->u.filter.fo_tot_dirty,
+ obd->u.filter.fo_tot_granted, obd->obd_num_exports);
+
+ return grant;
}
static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
int niocount, struct niobuf_remote *nb,
struct niobuf_local *res,
- struct obd_trans_info *oti)
+ struct obd_trans_info *oti,
+ struct lustre_capa *capa)
{
struct obd_device *obd = exp->exp_obd;
- struct obd_run_ctxt saved;
- struct obd_ioobj *o;
+ struct lvfs_run_ctxt saved;
struct niobuf_remote *rnb;
- struct niobuf_local *lnb = NULL;
- struct fsfilt_objinfo *fso;
- struct dentry *dentry;
+ struct niobuf_local *lnb;
+ struct dentry *dentry = NULL;
struct inode *inode;
- int rc = 0, i, j, tot_bytes = 0, cleanup_phase = 0;
+ void *iobuf = NULL;
+ int rc = 0, i, tot_bytes = 0;
unsigned long now = jiffies;
ENTRY;
/* We are currently not supporting multi-obj BRW_READ RPCS at all.
- * When we do this function's dentry cleanup will need to be fixed */
- LASSERT(objcount == 1);
-
- OBD_ALLOC(fso, objcount * sizeof(*fso));
- if (fso == NULL)
- RETURN(-ENOMEM);
+ * When we do this function's dentry cleanup will need to be fixed.
+ * These values are verified in ost_brw_write() from the wire. */
+ LASSERTF(objcount == 1, "%d\n", objcount);
+ LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
- memset(res, 0, niocount * sizeof(*res));
+ rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
+ CAPA_OPC_OSS_READ);
+ if (rc)
+ RETURN(rc);
- push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
- for (i = 0, o = obj; i < objcount; i++, o++) {
- LASSERT(o->ioo_bufcnt);
+ if (oa && oa->o_valid & OBD_MD_FLGRANT) {
+ spin_lock(&obd->obd_osfs_lock);
+ filter_grant_incoming(exp, oa);
- dentry = filter_oa2dentry(obd, oa);
- if (IS_ERR(dentry))
- GOTO(cleanup, rc = PTR_ERR(dentry));
+ oa->o_grant = 0;
+ spin_unlock(&obd->obd_osfs_lock);
+ }
- if (dentry->d_inode == NULL) {
- CERROR("trying to BRW to non-existent file "LPU64"\n",
- o->ioo_id);
- f_dput(dentry);
- GOTO(cleanup, rc = -ENOENT);
- }
+ iobuf = filter_iobuf_get(&obd->u.filter, oti);
+ if (IS_ERR(iobuf))
+ RETURN(PTR_ERR(iobuf));
- fso[i].fso_dentry = dentry;
- fso[i].fso_bufcnt = o->ioo_bufcnt;
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ dentry = filter_oa2dentry(obd, oa);
+ if (IS_ERR(dentry)) {
+ rc = PTR_ERR(dentry);
+ dentry = NULL;
+ GOTO(cleanup, rc);
}
- if (time_after(jiffies, now + 15 * HZ))
- CERROR("slow preprw_read setup %lus\n", (jiffies - now) / HZ);
- else
- CDEBUG(D_INFO, "preprw_read setup: %lu jiffies\n",
- (jiffies - now));
+ inode = dentry->d_inode;
- if (oa) {
- spin_lock(&obd->obd_osfs_lock);
- filter_grant(exp, oa, filter_grant_space_left(exp), 0);
- spin_unlock(&obd->obd_osfs_lock);
- }
+ obdo_to_inode(inode, oa, OBD_MD_FLATIME);
+ fsfilt_check_slow(obd, now, obd_timeout, "preprw_read setup");
- for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
- dentry = fso[i].fso_dentry;
- inode = dentry->d_inode;
-
- for (j = 0; j < o->ioo_bufcnt; j++, rnb++, lnb++) {
- lnb->dentry = dentry;
- lnb->offset = rnb->offset;
- lnb->len = rnb->len;
- lnb->flags = rnb->flags;
- lnb->start = jiffies;
-
- if (inode->i_size <= rnb->offset) {
- /* If there's no more data, abort early.
- * lnb->page == NULL and lnb->rc == 0, so it's
- * easy to detect later. */
- break;
- } else {
- rc = filter_start_page_read(inode, lnb);
- }
+ for (i = 0, lnb = res, rnb = nb; i < obj->ioo_bufcnt;
+ i++, rnb++, lnb++) {
+ lnb->dentry = dentry;
+ lnb->offset = rnb->offset;
+ lnb->len = rnb->len;
+ lnb->flags = rnb->flags;
- if (rc) {
- CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR,
- "page err %u@"LPU64" %u/%u %p: rc %d\n",
- lnb->len, lnb->offset, j, o->ioo_bufcnt,
- dentry, rc);
- cleanup_phase = 1;
- GOTO(cleanup, rc);
- }
+ /*
+ * ost_brw_write()->ost_nio_pages_get() already initialized
+ * lnb->page to point to the page from the per-thread page
+ * pool (bug 5137), initialize page.
+ */
+ LASSERT(lnb->page != NULL);
- tot_bytes += lnb->rc;
- if (lnb->rc < lnb->len) {
- /* short read, be sure to wait on it */
- lnb++;
- break;
- }
- }
+ if (i_size_read(inode) <= rnb->offset)
+ /* If there's no more data, abort early. lnb->rc == 0,
+ * so it's easy to detect later. */
+ break;
+ else
+ filter_alloc_dio_page(obd, inode, lnb);
+
+ if (i_size_read(inode) < lnb->offset + lnb->len - 1)
+ lnb->rc = i_size_read(inode) - lnb->offset;
+ else
+ lnb->rc = lnb->len;
+
+ tot_bytes += lnb->rc;
+
+ filter_iobuf_add_page(obd, iobuf, inode, lnb->page);
}
- if (time_after(jiffies, now + 15 * HZ))
- CERROR("slow start_page_read %lus\n", (jiffies - now) / HZ);
- else
- CDEBUG(D_INFO, "start_page_read: %lu jiffies\n",
- (jiffies - now));
+ fsfilt_check_slow(obd, now, obd_timeout, "start_page_read");
+
+ rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf,
+ exp, NULL, NULL, NULL);
+ if (rc)
+ GOTO(cleanup, rc);
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
- while (lnb-- > res) {
- rc = filter_finish_page_read(lnb);
- if (rc) {
- CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
- lnb->offset, (int)(lnb - res), lnb->dentry, rc);
- cleanup_phase = 1;
- GOTO(cleanup, rc);
- }
- }
- if (time_after(jiffies, now + 15 * HZ))
- CERROR("slow finish_page_read %lus\n", (jiffies - now) / HZ);
- else
- CDEBUG(D_INFO, "finish_page_read: %lu jiffies\n",
- (jiffies - now));
+ lprocfs_counter_add(exp->exp_ops_stats, LPROC_FILTER_READ_BYTES,
+ tot_bytes);
EXIT;
cleanup:
- switch (cleanup_phase) {
- case 1:
- for (lnb = res; lnb < (res + niocount); lnb++) {
- if (lnb->page)
- page_cache_release(lnb->page);
- }
- if (res->dentry != NULL)
- f_dput(res->dentry);
- else
- CERROR("NULL dentry in cleanup -- tell CFS\n");
- case 0:
- OBD_FREE(fso, objcount * sizeof(*fso));
- pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
+ if (rc != 0) {
+ filter_free_dio_pages(objcount, obj, niocount, res);
+
+ if (dentry != NULL)
+ f_dput(dentry);
}
+
+ filter_iobuf_put(&obd->u.filter, iobuf, oti);
+
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ if (rc)
+ CERROR("io error %d\n", rc);
+
return rc;
}
-static int filter_start_page_write(struct inode *inode,
- struct niobuf_local *lnb)
+/* When clients have dirtied as much space as they've been granted they
+ * fall through to sync writes. These sync writes haven't been expressed
+ * in grants and need to error with ENOSPC when there isn't room in the
+ * filesystem for them after grants are taken into account. However,
+ * writeback of the dirty data that was already granted space can write
+ * right on through.
+ *
+ * Caller must hold obd_osfs_lock. */
+static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
+ int objcount, struct fsfilt_objinfo *fso,
+ int niocount, struct niobuf_remote *rnb,
+ struct niobuf_local *lnb, obd_size *left,
+ struct inode *inode)
{
- struct page *page = alloc_pages(GFP_HIGHUSER, 0);
- if (page == NULL) {
- CERROR("no memory for a temp page\n");
- RETURN(lnb->rc = -ENOMEM);
+ struct filter_export_data *fed = &exp->exp_filter_data;
+ int blocksize = exp->exp_obd->u.obt.obt_sb->s_blocksize;
+ unsigned long used = 0, ungranted = 0, using;
+ int i, rc = -ENOSPC, obj, n = 0;
+
+ LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
+
+ for (obj = 0; obj < objcount; obj++) {
+ for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
+ int tmp, bytes;
+
+ /* should match the code in osc_exit_cache */
+ bytes = rnb[n].len;
+ bytes += rnb[n].offset & (blocksize - 1);
+ tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
+ if (tmp)
+ bytes += blocksize - tmp;
+
+ if ((rnb[n].flags & OBD_BRW_FROM_GRANT) &&
+ (oa->o_valid & OBD_MD_FLGRANT)) {
+ if (fed->fed_grant < used + bytes) {
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p claims %ld+%d "
+ "GRANT, real grant %lu idx %d\n",
+ exp->exp_obd->obd_name,
+ exp->exp_client_uuid.uuid, exp,
+ used, bytes, fed->fed_grant, n);
+ } else {
+ used += bytes;
+ rnb[n].flags |= OBD_BRW_GRANTED;
+ lnb[n].lnb_grant_used = bytes;
+ CDEBUG(0, "idx %d used=%lu\n", n, used);
+ rc = 0;
+ continue;
+ }
+ }
+ if (*left > ungranted + bytes) {
+ /* if enough space, pretend it was granted */
+ ungranted += bytes;
+ rnb[n].flags |= OBD_BRW_GRANTED;
+ lnb[n].lnb_grant_used = bytes;
+ CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
+ rc = 0;
+ continue;
+ }
+
+ /* We can't check for already-mapped blocks here, as
+ * it requires dropping the osfs lock to do the bmap.
+ * Instead, we return ENOSPC and in that case we need
+ * to go through and verify if all of the blocks not
+ * marked BRW_GRANTED are already mapped and we can
+ * ignore this error. */
+ lnb[n].rc = -ENOSPC;
+ rnb[n].flags &= ~OBD_BRW_GRANTED;
+ CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
+ exp->exp_obd->obd_name,
+ exp->exp_client_uuid.uuid, exp, n, bytes);
+ }
}
- POISON_PAGE(page, 0xf1);
- page->index = lnb->offset >> PAGE_SHIFT;
- lnb->page = page;
- return 0;
+ /* Now substract what client have used already. We don't subtract
+ * this from the tot_granted yet, so that other client's can't grab
+ * that space before we have actually allocated our blocks. That
+ * happens in filter_grant_commit() after the writes are done. */
+ *left -= ungranted;
+ fed->fed_grant -= used;
+ fed->fed_pending += used + ungranted;
+ exp->exp_obd->u.filter.fo_tot_granted += ungranted;
+ exp->exp_obd->u.filter.fo_tot_pending += used + ungranted;
+
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
+ ungranted, fed->fed_grant, fed->fed_dirty);
+
+ /* Rough calc in case we don't refresh cached statfs data */
+ using = (used + ungranted + 1 ) >>
+ exp->exp_obd->u.obt.obt_sb->s_blocksize_bits;
+ if (exp->exp_obd->obd_osfs.os_bavail > using)
+ exp->exp_obd->obd_osfs.os_bavail -= using;
+ else
+ exp->exp_obd->obd_osfs.os_bavail = 0;
+
+ if (fed->fed_dirty < used) {
+ CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ used, fed->fed_dirty);
+ used = fed->fed_dirty;
+ }
+ exp->exp_obd->u.filter.fo_tot_dirty -= used;
+ fed->fed_dirty -= used;
+
+ if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
+ CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ fed->fed_dirty, fed->fed_pending, fed->fed_grant);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ LBUG();
+ }
+ return rc;
}
/* If we ever start to support multi-object BRW RPCs, we will need to get locks
int objcount, struct obd_ioobj *obj,
int niocount, struct niobuf_remote *nb,
struct niobuf_local *res,
- struct obd_trans_info *oti)
+ struct obd_trans_info *oti,
+ struct lustre_capa *capa)
{
- struct obd_device *obd = exp->exp_obd;
- struct obd_run_ctxt saved;
- struct niobuf_remote *rnb = nb;
+ struct lvfs_run_ctxt saved;
+ struct niobuf_remote *rnb;
struct niobuf_local *lnb = res;
struct fsfilt_objinfo fso;
- struct dentry *dentry;
- int rc = 0, i, tot_bytes = 0;
- obd_size consumed = 0, left;
+ struct filter_mod_data *fmd;
+ struct dentry *dentry = NULL;
+ void *iobuf;
+ obd_size left;
unsigned long now = jiffies;
+ int rc = 0, i, tot_bytes = 0, cleanup_phase = 0;
ENTRY;
LASSERT(objcount == 1);
LASSERT(obj->ioo_bufcnt > 0);
- filter_grant_incoming(exp, oa);
+ rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
+ CAPA_OPC_OSS_WRITE);
+ if (rc)
+ RETURN(rc);
- memset(res, 0, niocount * sizeof(*res));
+ push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ iobuf = filter_iobuf_get(&exp->exp_obd->u.filter, oti);
+ if (IS_ERR(iobuf))
+ GOTO(cleanup, rc = PTR_ERR(iobuf));
+ cleanup_phase = 1;
- push_ctxt(&saved, &obd->obd_ctxt, NULL);
- dentry = filter_fid2dentry(obd, NULL, obj->ioo_gr, obj->ioo_id);
+ dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
+ obj->ioo_id);
if (IS_ERR(dentry))
GOTO(cleanup, rc = PTR_ERR(dentry));
+ cleanup_phase = 2;
if (dentry->d_inode == NULL) {
- CERROR("trying to BRW to non-existent file "LPU64"\n",
- obj->ioo_id);
- f_dput(dentry);
+ CERROR("%s: trying to BRW to non-existent file "LPU64"\n",
+ exp->exp_obd->obd_name, obj->ioo_id);
GOTO(cleanup, rc = -ENOENT);
}
fso.fso_dentry = dentry;
fso.fso_bufcnt = obj->ioo_bufcnt;
- if (time_after(jiffies, now + 15 * HZ))
- CERROR("slow preprw_write setup %lus\n", (jiffies - now) / HZ);
+ fsfilt_check_slow(exp->exp_obd, now, obd_timeout, "preprw_write setup");
+
+ /* Don't update inode timestamps if this write is older than a
+ * setattr which modifies the timestamps. b=10150 */
+ /* XXX when we start having persistent reservations this needs to
+ * be changed to filter_fmd_get() to create the fmd if it doesn't
+ * already exist so we can store the reservation handle there. */
+ fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
+
+ LASSERT(oa != NULL);
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ filter_grant_incoming(exp, oa);
+ if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
+ oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLATIME);
else
- CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
- (jiffies - now));
+ obdo_to_inode(dentry->d_inode, oa, OBD_MD_FLATIME |
+ OBD_MD_FLMTIME | OBD_MD_FLCTIME);
+ cleanup_phase = 3;
- spin_lock(&obd->obd_osfs_lock);
left = filter_grant_space_left(exp);
- rc = filter_check_space(exp, objcount, &fso, niocount, rnb, lnb,
- &left, &consumed, dentry->d_inode);
- if (oa)
- filter_grant(exp, oa, left, consumed);
+ rc = filter_grant_check(exp, oa, objcount, &fso, niocount, nb, res,
+ &left, dentry->d_inode);
- spin_unlock(&obd->obd_osfs_lock);
+ /* do not zero out oa->o_valid as it is used in filter_commitrw_write()
+ * for setting UID/GID and fid EA in first write time. */
+ if (oa->o_valid & OBD_MD_FLGRANT)
+ oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
- if (rc) {
- f_dput(dentry);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ filter_fmd_put(exp, fmd);
+
+ if (rc)
GOTO(cleanup, rc);
- }
for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
i++, lnb++, rnb++) {
-
- /* If there were any granting failures, we should not have
- come here */
- LASSERT (lnb->rc == 0);
-
+ /* We still set up for ungranted pages so that granted pages
+ * can be written to disk as they were promised, and portals
+ * needs to keep the pages all aligned properly. */
lnb->dentry = dentry;
lnb->offset = rnb->offset;
lnb->len = rnb->len;
lnb->flags = rnb->flags;
- lnb->start = jiffies;
-
- rc = filter_start_page_write(dentry->d_inode, lnb);
- if (rc) {
- CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR, "page err %u@"
- LPU64" %u/%u %p: rc %d\n", lnb->len, lnb->offset,
- i, obj->ioo_bufcnt, dentry, rc);
- while (lnb-- > res)
- __free_pages(lnb->page, 0);
- f_dput(dentry);
- GOTO(cleanup, rc);
+
+ /*
+ * ost_brw_write()->ost_nio_pages_get() already initialized
+ * lnb->page to point to the page from the per-thread page
+ * pool (bug 5137), initialize page.
+ */
+ LASSERT(lnb->page != NULL);
+ if (lnb->len != CFS_PAGE_SIZE) {
+ memset(kmap(lnb->page) + lnb->len,
+ 0, CFS_PAGE_SIZE - lnb->len);
+ kunmap(lnb->page);
}
- tot_bytes += lnb->len;
+ lnb->page->index = lnb->offset >> CFS_PAGE_SHIFT;
+
+ cleanup_phase = 4;
+
+ /* If the filter writes a partial page, then has the file
+ * extended, the client will read in the whole page. the
+ * filter has to be careful to zero the rest of the partial
+ * page on disk. we do it by hand for partial extending
+ * writes, send_bio() is responsible for zeroing pages when
+ * asked to read unmapped blocks -- brw_kiovec() does this. */
+ if (lnb->len != CFS_PAGE_SIZE) {
+ __s64 maxidx;
+
+ maxidx = ((i_size_read(dentry->d_inode) +
+ CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1;
+ if (maxidx >= lnb->page->index) {
+ LL_CDEBUG_PAGE(D_PAGE, lnb->page, "write %u @ "
+ LPU64" flg %x before EOF %llu\n",
+ lnb->len, lnb->offset,lnb->flags,
+ i_size_read(dentry->d_inode));
+ filter_iobuf_add_page(exp->exp_obd, iobuf,
+ dentry->d_inode,
+ lnb->page);
+ } else {
+ long off;
+ char *p = kmap(lnb->page);
+
+ off = lnb->offset & ~CFS_PAGE_MASK;
+ if (off)
+ memset(p, 0, off);
+ off = (lnb->offset + lnb->len) & ~CFS_PAGE_MASK;
+ if (off)
+ memset(p + off, 0, CFS_PAGE_SIZE - off);
+ kunmap(lnb->page);
+ }
+ }
+ if (lnb->rc == 0)
+ tot_bytes += lnb->len;
}
- if (time_after(jiffies, now + 15 * HZ))
- CERROR("slow start_page_write %lus\n", (jiffies - now) / HZ);
- else
- CDEBUG(D_INFO, "start_page_write: %lu jiffies\n",
- (jiffies - now));
+ rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
+ NULL, NULL, NULL);
- lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_WRITE_BYTES, tot_bytes);
+ fsfilt_check_slow(exp->exp_obd, now, obd_timeout, "start_page_write");
+
+ lprocfs_counter_add(exp->exp_ops_stats, LPROC_FILTER_WRITE_BYTES,
+ tot_bytes);
EXIT;
cleanup:
- pop_ctxt(&saved, &obd->obd_ctxt, NULL);
+ switch(cleanup_phase) {
+ case 4:
+ case 3:
+ filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
+ case 2:
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ if (rc)
+ f_dput(dentry);
+ break;
+ case 1:
+ filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
+ case 0:
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ if (oa)
+ filter_grant_incoming(exp, oa);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ break;
+ default:;
+ }
return rc;
}
int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj, int niocount,
struct niobuf_remote *nb, struct niobuf_local *res,
- struct obd_trans_info *oti)
+ struct obd_trans_info *oti, struct lustre_capa *capa)
{
if (cmd == OBD_BRW_WRITE)
return filter_preprw_write(cmd, exp, oa, objcount, obj,
- niocount, nb, res, oti);
-
+ niocount, nb, res, oti, capa);
if (cmd == OBD_BRW_READ)
return filter_preprw_read(cmd, exp, oa, objcount, obj,
- niocount, nb, res, oti);
-
+ niocount, nb, res, oti, capa);
LBUG();
return -EPROTO;
}
+void filter_release_read_page(struct filter_obd *filter, struct inode *inode,
+ struct page *page)
+{
+ int drop = 0;
+
+ if (inode != NULL &&
+ (i_size_read(inode) > filter->fo_readcache_max_filesize))
+ drop = 1;
+
+ /* drop from cache like truncate_list_pages() */
+ if (drop && !TryLockPage(page)) {
+ if (page->mapping)
+ ll_truncate_complete_page(page);
+ unlock_page(page);
+ }
+ page_cache_release(page);
+}
+
static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
int niocount, struct niobuf_local *res,
- struct obd_trans_info *oti)
+ struct obd_trans_info *oti, int rc)
{
- struct obd_ioobj *o;
- struct niobuf_local *lnb;
- int i, j;
+ struct inode *inode = NULL;
+ struct ldlm_res_id res_id = { .name = { obj->ioo_id, 0,
+ obj->ioo_gr, 0} };
+ struct ldlm_resource *resource = NULL;
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
ENTRY;
- for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
- for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
- if (lnb->page != NULL)
- page_cache_release(lnb->page);
+ /* If oa != NULL then filter_preprw_read updated the inode atime
+ * and we should update the lvb so that other glimpses will also
+ * get the updated value. bug 5972 */
+ if (oa && ns && ns->ns_lvbo && ns->ns_lvbo->lvbo_update) {
+ resource = ldlm_resource_get(ns, NULL, &res_id, LDLM_EXTENT, 0);
+
+ if (resource != NULL) {
+ ns->ns_lvbo->lvbo_update(resource, NULL, 0, 1);
+ ldlm_resource_putref(resource);
}
}
+
+ if (res->dentry != NULL)
+ inode = res->dentry->d_inode;
+
+ filter_free_dio_pages(objcount, obj, niocount, res);
+
if (res->dentry != NULL)
f_dput(res->dentry);
- RETURN(0);
+ RETURN(rc);
}
void flip_into_page_cache(struct inode *inode, struct page *new_page)
/* the dlm is protecting us from read/write concurrency, so we
* expect this find_lock_page to return quickly. even if we
* race with another writer it won't be doing much work with
- * the page locked. we do this 'cause t_c_p expects a
+ * the page locked. we do this 'cause t_c_p expects a
* locked page, and it wants to grab the pagecache lock
* as well. */
old_page = find_lock_page(inode->i_mapping, new_page->index);
if (old_page) {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- truncate_complete_page(old_page);
-#else
- truncate_complete_page(old_page->mapping, old_page);
-#endif
+ ll_truncate_complete_page(old_page);
unlock_page(old_page);
page_cache_release(old_page);
}
/* racing o_directs (no locking ioctl) could race adding
* their pages, so we repeat the page invalidation unless
* we successfully added our new page */
- rc = add_to_page_cache_unique(new_page, inode->i_mapping,
+ rc = add_to_page_cache_unique(new_page, inode->i_mapping,
new_page->index,
- page_hash(inode->i_mapping,
+ page_hash(inode->i_mapping,
new_page->index));
if (rc == 0) {
/* add_to_page_cache clears uptodate|dirty and locks
SetPageUptodate(new_page);
unlock_page(new_page);
}
-#else
+#else
rc = 0;
#endif
} while (rc != 0);
}
-/* XXX needs to trickle its oa down */
+void filter_grant_commit(struct obd_export *exp, int niocount,
+ struct niobuf_local *res)
+{
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
+ struct niobuf_local *lnb = res;
+ unsigned long pending = 0;
+ int i;
+
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ for (i = 0, lnb = res; i < niocount; i++, lnb++)
+ pending += lnb->lnb_grant_used;
+
+ LASSERTF(exp->exp_filter_data.fed_pending >= pending,
+ "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ exp->exp_filter_data.fed_pending, pending);
+ exp->exp_filter_data.fed_pending -= pending;
+ LASSERTF(filter->fo_tot_granted >= pending,
+ "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ exp->exp_obd->u.filter.fo_tot_granted, pending);
+ filter->fo_tot_granted -= pending;
+ LASSERTF(filter->fo_tot_pending >= pending,
+ "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ filter->fo_tot_pending, pending);
+ filter->fo_tot_pending -= pending;
+
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+}
+
int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj, int niocount,
- struct niobuf_local *res, struct obd_trans_info *oti)
+ struct niobuf_local *res, struct obd_trans_info *oti,
+ int rc)
{
if (cmd == OBD_BRW_WRITE)
return filter_commitrw_write(exp, oa, objcount, obj, niocount,
- res, oti);
+ res, oti, rc);
if (cmd == OBD_BRW_READ)
return filter_commitrw_read(exp, oa, objcount, obj, niocount,
- res, oti);
+ res, oti, rc);
LBUG();
return -EPROTO;
}
-int filter_brw(int cmd, struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *lsm, obd_count oa_bufs,
- struct brw_page *pga, struct obd_trans_info *oti)
+int filter_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
+ obd_count oa_bufs, struct brw_page *pga,
+ struct obd_trans_info *oti)
{
struct obd_ioobj ioo;
struct niobuf_local *lnb;
GOTO(out, ret = -ENOMEM);
for (i = 0; i < oa_bufs; i++) {
+ lnb[i].page = pga[i].pg;
rnb[i].offset = pga[i].off;
rnb[i].len = pga[i].count;
}
- obdo_to_ioobj(oa, &ioo);
+ obdo_to_ioobj(oinfo->oi_oa, &ioo);
ioo.ioo_bufcnt = oa_bufs;
- ret = filter_preprw(cmd, exp, oa, 1, &ioo, oa_bufs, rnb, lnb, oti);
+ ret = filter_preprw(cmd, exp, oinfo->oi_oa, 1, &ioo,
+ oa_bufs, rnb, lnb, oti, oinfo_capa(oinfo));
if (ret != 0)
GOTO(out, ret);
- for (i = 0; i < oa_bufs; i++) {
- void *virt = kmap(pga[i].pg);
- obd_off off = pga[i].off & ~PAGE_MASK;
- void *addr = kmap(lnb[i].page);
-
- /* 2 kmaps == vanishingly small deadlock opportunity */
-
- if (cmd & OBD_BRW_WRITE)
- memcpy(addr + off, virt + off, pga[i].count);
- else
- memcpy(virt + off, addr + off, pga[i].count);
-
- kunmap(lnb[i].page);
- kunmap(pga[i].pg);
- }
-
- ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti);
+ ret = filter_commitrw(cmd, exp, oinfo->oi_oa, 1, &ioo,
+ oa_bufs, lnb, oti, ret);
out:
if (lnb)