#include <linux/module.h>
#include <linux/pagemap.h> // XXX kill me soon
#include <linux/version.h>
-#include <asm/div64.h>
#include <linux/obd_class.h>
#include <linux/lustre_fsfilt.h>
return lnb->rc;
}
-/* See if there are unallocated parts in given file region */
-static int filter_inode_has_holes(struct inode *inode, obd_size start,
- int len)
-{
- int j;
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
- sector_t (*fs_bmap)(struct address_space *,
- sector_t);
-#else
- int (*fs_bmap)(struct address_space *, long);
-#endif
- fs_bmap = inode->i_mapping->a_ops->bmap;
- if (fs_bmap) {
- for (j = 0; j <= len ; j++) {
- if (!fs_bmap(inode->i_mapping, start+j)) {
- return 1;
- }
- }
- return 0;
- } else {
- /* Return -1 in case that caller cares about bmap availability.
- */
- return -1;
- }
-}
-
/* Grab the dirty and seen grant announcements from the incoming obdo.
- * We will later calculate the clients new grant and return it. */
+ * We will later calculate the clients new grant and return it.
+ * Caller must hold osfs lock */
static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
{
struct filter_export_data *fed;
struct obd_device *obd = exp->exp_obd;
- obd_size client_cached;
ENTRY;
- if (!oa || (oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
- (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
- if (oa)
- oa->o_valid &= ~OBD_MD_FLGRANT;
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
+ (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
+ oa->o_valid &= ~OBD_MD_FLGRANT;
EXIT;
return;
}
- client_cached = oa->o_blocks;
fed = &exp->exp_filter_data;
- if (client_cached > fed->fed_grant)
- CERROR("client %s claims "LPU64" granted, > "LPU64" granted\n",
- obd->obd_name, client_cached, fed->fed_grant);
-
- spin_lock(&obd->obd_osfs_lock);
- /* update our accounting now so that statfs takes it into account */
- obd->u.filter.fo_tot_cached += client_cached - fed->fed_cached;
- fed->fed_cached = client_cached;
-
- /* Acknowledgement that the client has seen our published grant.
- * If the client has met our shrinking target we can reuse its
- * difference from the previous grant. It is reasonable to announce
- * more dirty that cached as it tries to purge its previously granted
- * dirty data down to its newly received target. */
- if (fed->fed_grant_waiting && (oa->o_grant <= fed->fed_grant_sent)) {
- if (fed->fed_grant_sent < fed->fed_grant) {
- if (client_cached <= fed->fed_grant_sent) {
- obd->u.filter.fo_tot_granted -=
- fed->fed_grant - oa->o_grant;
- CDEBUG(D_SUPER, "reduced grant from "LPU64" to "
- LPU64", total grant now "LPU64"\n",
- fed->fed_grant, oa->o_grant,
- obd->u.filter.fo_tot_granted);
- fed->fed_grant = oa->o_grant;
- fed->fed_grant_waiting = 0;
- }
- } else {
- fed->fed_grant_waiting = 0;
- }
+ /* Add some margin, since there is a small race if other RPCs arrive
+ * out-or-order and have already consumed some grant. We want to
+ * leave this here in case there is a large error in accounting. */
+ CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ?
+ D_ERROR : D_CACHE,
+ "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
+ oa->o_dropped, fed->fed_grant);
+
+ /* Update our accounting now so that statfs takes it into account.
+ * Note that fed_dirty is only approximate and can become incorrect
+ * if RPCs arrive out-of-order. No important calculations depend
+ * on fed_dirty however. */
+ obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
+ if (fed->fed_grant < oa->o_dropped) {
+ CERROR("%s: cli %s/%p reports %u dropped > fed_grant %lu\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ oa->o_dropped, fed->fed_grant);
+ oa->o_dropped = 0;
}
- spin_unlock(&obd->obd_osfs_lock);
- oa->o_valid &= ~(OBD_MD_FLGRANT|OBD_MD_FLBLOCKS);
+ if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
+ CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ oa->o_dropped, obd->u.filter.fo_tot_granted);
+ oa->o_dropped = 0;
+ }
+ obd->u.filter.fo_tot_granted -= oa->o_dropped;
+ fed->fed_grant -= oa->o_dropped;
+ fed->fed_dirty = oa->o_dirty;
EXIT;
}
+#define GRANT_FOR_LLOG(obd) 16
+
/* Figure out how much space is available between what we've granted
* and what remains in the filesystem. Compensate for ext3 indirect
* block overhead when computing how much free space is left ungranted.
* Caller must hold obd_osfs_lock. */
obd_size filter_grant_space_left(struct obd_export *exp)
{
- obd_size left = 0;
struct obd_device *obd = exp->exp_obd;
int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
- /* XXX I disabled statfs caching as it only creates extra problems now.
- -- green*/
- unsigned long max_age = jiffies/* - HZ*/+1;
- struct filter_export_data *fed = &exp->exp_filter_data;
- int rc;
+ obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
+ int rc, statfs_done = 0;
-restat:
- rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, max_age);
- if (rc) /* N.B. statfs can't really fail, just for correctness */
- RETURN(0);
-
- left = obd->obd_osfs.os_bavail << blockbits;
- left -= (left >> (blockbits - 2)) + (left >> (2 * blockbits - 2));
- /* We cannot afford having absolutely no space, we need some for
- llog stuff */
- if ( left >= PAGE_SIZE * 10)
- left -= PAGE_SIZE * 10;
- else
- left = 0;
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
- /* If fed->fed_grant_waiting is set, this means
- obd->u.filter.fo_tot_granted does not represent actual granted
- amount and client is supposedly actively shrinks its cache, so
- no point in printing this warning */
- if (left < obd->u.filter.fo_tot_granted && !fed->fed_grant_waiting)
- CERROR("granted space "LPU64" more than available "LPU64"\n",
- obd->u.filter.fo_tot_granted, left);
-
- left -= min(left, obd->u.filter.fo_tot_granted);
- if (left < FILTER_GRANT_CHUNK && time_after(jiffies,obd->obd_osfs_age)){
- CDEBUG(D_SUPER, "fs has no space left and statfs too old\n");
- max_age = jiffies;
- goto restat;
+ if (time_before(obd->obd_osfs_age, jiffies - HZ)) {
+restat:
+ rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, jiffies + 1);
+ if (rc) /* N.B. statfs can't really fail */
+ RETURN(0);
+ statfs_done = 1;
}
- CDEBUG(D_SUPER, "free: "LPU64" avail: "LPU64" grant left: "LPU64"\n",
- obd->obd_osfs.os_bfree << blockbits,
- obd->obd_osfs.os_bavail << blockbits, left);
-
- return left;
-}
-
-/* When clients have dirtied as much space as they've been granted they
- * fall through to sync writes. These sync writes haven't been expressed
- * in grants and need to error with ENOSPC when there isn't room in the
- * filesystem for them after grants are taken into account. However,
- * writeback of the dirty data that was already granted space can write
- * right on through. We have no need to stop writes that won't allocate
- * new space, so we bmap to calculate how much this io is going to consume.
- *
- * Caller must hold obd_osfs_lock. */
-static int filter_check_space(struct obd_export *exp, int objcount,
- struct fsfilt_objinfo *fso, int niocount,
- struct niobuf_remote *rnb,
- struct niobuf_local *lnb, obd_size *left,
- obd_size *consumed, struct inode *inode)
-{
- int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
- obd_size bytes, ungranted = 0;
- int i, rc = -ENOSPC, obj, n = 0;
-
- *consumed = 0;
-
- for (obj = 0; obj < objcount; obj++) {
- for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
- obd_size tmp;
+ avail = obd->obd_osfs.os_bavail;
+ left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
+ if (left > GRANT_FOR_LLOG(obd)) {
+ left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
+ } else {
+ left = 0 /* << blockbits */;
+ }
- bytes = rnb[n].len;
- tmp = rnb[n].offset & (blocksize - 1);
- bytes += tmp;
- tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
- if (tmp)
- bytes += blocksize - tmp;
+ if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
+ CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
+ goto restat;
+ }
- if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
- *consumed += bytes;
- rc = 0;
- continue;
- }
- if (*left - *consumed >= bytes) {
- /* if enough space, pretend it was granted */
- exp->exp_obd->u.filter.fo_tot_granted += bytes;
- exp->exp_filter_data.fed_grant += bytes;
- *consumed += bytes;
- *left -= bytes;
- rc = 0;
- continue;
- }
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
- if (!filter_inode_has_holes(inode,
- rnb[n].offset >>
- inode->i_blkbits,
- rnb[n].len >>
- inode->i_blkbits)) {
- rc = 0;
- } else {
- rc = lnb[n].rc = -ENOSPC;
- }
- spin_lock(&exp->exp_obd->obd_osfs_lock);
- if (rc)
- goto leave;
+ if (left >= tot_granted) {
+ left -= tot_granted;
+ } else {
+ static unsigned long next;
+ if (left < tot_granted - obd->u.filter.fo_tot_pending &&
+ time_after(jiffies, next)) {
+ spin_unlock(&obd->obd_osfs_lock);
+ CERROR("%s: cli %s/%p grant "LPU64" > available "
+ LPU64" and pending "LPU64"\n", obd->obd_name,
+ exp->exp_client_uuid.uuid, exp, tot_granted,
+ left, obd->u.filter.fo_tot_pending);
+ if (next == 0)
+ portals_debug_dumplog();
+ next = jiffies + 20 * HZ;
+ spin_lock(&obd->obd_osfs_lock);
}
+ left = 0;
}
- CDEBUG((*consumed != 0 && ungranted != 0) ? D_ERROR : D_SUPER,
- "consumed: "LPU64" ungranted: "LPU64"\n", *consumed, ungranted);
+ CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
+ " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
+ exp->exp_client_uuid.uuid, exp,
+ obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
+ tot_granted, left, obd->u.filter.fo_tot_pending);
- if (*consumed > exp->exp_filter_data.fed_grant)
- CERROR("request sent from cache, but not enough grant ("LPU64
- ","LPU64")\n", *consumed,
- exp->exp_filter_data.fed_grant);
-leave:
- return rc;
+ return left;
}
/* Calculate how much grant space to allocate to this client, based on how
* much space is currently free and how much of that is already granted.
*
* Caller must hold obd_osfs_lock. */
-static void filter_grant(struct obd_export *exp, struct obdo *oa,
- obd_size left, obd_size from_grant)
+long filter_grant(struct obd_export *exp, obd_size current_grant,
+ obd_size want, obd_size fs_space_left)
{
struct obd_device *obd = exp->exp_obd;
struct filter_export_data *fed = &exp->exp_filter_data;
- obd_size grant, extra;
- int blockbits;
-
- blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
-
- /* if things go wrong conservatively try to clamp them from
- * generating more dirty data until things are better on our end */
- grant = fed->fed_cached;
-
- extra = min(FILTER_GRANT_CHUNK, left / 2);
-
- if (grant > fed->fed_grant) {
- /* If client has screwed up, force basic grant until fixed */
- CERROR("client %s cached more "LPU64" than granted "LPU64"\n",
- exp->exp_client_uuid.uuid, fed->fed_cached,
- fed->fed_grant);
- grant = extra;
- } else if (fed->fed_grant_waiting) {
- /* KISS: only one grant change in flight at a time. We
- * could move it in the "same direction" easily,
- * but changing directions (e.g. grow then shrink
- * before client ACKs) would be bad. */
- grant = fed->fed_grant_sent;
- } else {
- /* grant will shrink or grow as client cache/extra changes */
- grant = fed->fed_cached + extra;
- }
-
- /* If we've granted all we're willing, we have to revoke
- * the grant covering what the client just wrote. */
- if (left == 0) {
- grant -= min(from_grant, grant);
- }
-
- if (!fed->fed_grant_waiting && grant + from_grant > left ) {
- if (from_grant < left)
- grant = left - from_grant;
- else
- grant = 0;
- }
-
- if (grant != fed->fed_grant) {
- fed->fed_grant_waiting = 1;
- fed->fed_grant_sent = grant;
- if (grant > fed->fed_grant) {
- obd->u.filter.fo_tot_granted += grant - fed->fed_grant;
- fed->fed_grant = grant;
+ int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
+ __u64 grant = 0;
+
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ /* Grant some fraction of the client's requested grant space so that
+ * they are not always waiting for write credits (not all of it to
+ * avoid overgranting in face of multiple RPCs in flight). This
+ * essentially will be able to control the OSC_MAX_RIF for a client.
+ *
+ * If we do have a large disparity between what the client thinks it
+ * has and what we think it has, don't grant very much and let the
+ * client consume its grant first. Either it just has lots of RPCs
+ * in flight, or it was evicted and its grants will soon be used up. */
+ if (current_grant < want) {
+ if (current_grant > fed->fed_grant + FILTER_GRANT_CHUNK)
+ want = 65536;
+ grant = min((want >> blockbits) / 2,
+ (fs_space_left >> blockbits) / 8);
+ grant <<= blockbits;
+
+ if (grant) {
+ if (grant > FILTER_GRANT_CHUNK)
+ grant = FILTER_GRANT_CHUNK;
+
+ obd->u.filter.fo_tot_granted += grant;
+ fed->fed_grant += grant;
}
}
- CDEBUG(D_SUPER,"cli %s cache:"LPU64" grant:"LPU64", granting:"LPU64"\n",
- exp->exp_connection->c_remote_uuid.uuid, oa->o_blocks,
- oa->o_grant, grant);
- CDEBUG(D_SUPER, "fed sent:"LPU64" wt:%d grant:"LPU64"\n",
- fed->fed_grant_sent, fed->fed_grant_waiting,
- fed->fed_grant);
- CDEBUG(D_SUPER, "tot cached:"LPU64" granted:"LPU64" num_exports: %d\n",
- obd->u.filter.fo_tot_cached,
- obd->u.filter.fo_tot_granted, obd->obd_num_exports);
-
- oa->o_valid |= OBD_MD_FLGRANT;
- oa->o_grant = grant;
+ CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
+ " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, obd->u.filter.fo_tot_dirty,
+ obd->u.filter.fo_tot_granted, obd->obd_num_exports);
+
+ return grant;
}
static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
/* We are currently not supporting multi-obj BRW_READ RPCS at all.
* When we do this function's dentry cleanup will need to be fixed */
LASSERT(objcount == 1);
+ LASSERT(obj->ioo_bufcnt > 0);
+
+ if (oa && oa->o_valid & OBD_MD_FLGRANT) {
+ spin_lock(&obd->obd_osfs_lock);
+ filter_grant_incoming(exp, oa);
+
+#if 0
+ /* Reads do not increase grants */
+ oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
+ filter_grant_space_left(exp));
+#else
+ oa->o_grant = 0;
+#endif
+ spin_unlock(&obd->obd_osfs_lock);
+ }
OBD_ALLOC(fso, objcount * sizeof(*fso));
if (fso == NULL)
CDEBUG(D_INFO, "preprw_read setup: %lu jiffies\n",
(jiffies - now));
- if (oa) {
- spin_lock(&obd->obd_osfs_lock);
- filter_grant(exp, oa, filter_grant_space_left(exp), 0);
- spin_unlock(&obd->obd_osfs_lock);
- }
-
for (i = 0, o = obj, rnb = nb, lnb = res; i < objcount; i++, o++) {
dentry = fso[i].fso_dentry;
inode = dentry->d_inode;
lnb->offset = rnb->offset;
lnb->len = rnb->len;
lnb->flags = rnb->flags;
- lnb->start = jiffies;
if (inode->i_size <= rnb->offset) {
/* If there's no more data, abort early.
return rc;
}
+/* When clients have dirtied as much space as they've been granted they
+ * fall through to sync writes. These sync writes haven't been expressed
+ * in grants and need to error with ENOSPC when there isn't room in the
+ * filesystem for them after grants are taken into account. However,
+ * writeback of the dirty data that was already granted space can write
+ * right on through.
+ *
+ * Caller must hold obd_osfs_lock. */
+static int filter_grant_check(struct obd_export *exp, int objcount,
+ struct fsfilt_objinfo *fso, int niocount,
+ struct niobuf_remote *rnb,
+ struct niobuf_local *lnb, obd_size *left,
+ struct inode *inode)
+{
+ struct filter_export_data *fed = &exp->exp_filter_data;
+ int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
+ unsigned long used = 0, ungranted = 0, using;
+ int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
+
+ LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
+
+ for (obj = 0; obj < objcount; obj++) {
+ for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
+ int tmp, bytes;
+
+ /* FIXME: this is calculated with PAGE_SIZE on client */
+ bytes = rnb[n].len;
+ bytes += rnb[n].offset & (blocksize - 1);
+ tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
+ if (tmp)
+ bytes += blocksize - tmp;
+
+ if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
+ if (fed->fed_grant < used + bytes) {
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p claims %ld+%d "
+ "GRANT, real grant %lu idx %d\n",
+ exp->exp_obd->obd_name,
+ exp->exp_client_uuid.uuid, exp,
+ used, bytes, fed->fed_grant, n);
+ mask = D_ERROR;
+ } else {
+ used += bytes;
+ rnb[n].flags |= OBD_BRW_GRANTED;
+ lnb[n].lnb_grant_used = bytes;
+ CDEBUG(0, "idx %d used=%lu\n", n, used);
+ rc = 0;
+ continue;
+ }
+ }
+ if (*left > ungranted) {
+ /* if enough space, pretend it was granted */
+ ungranted += bytes;
+ rnb[n].flags |= OBD_BRW_GRANTED;
+ CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
+ rc = 0;
+ continue;
+ }
+
+ /* We can't check for already-mapped blocks here, as
+ * it requires dropping the osfs lock to do the bmap.
+ * Instead, we return ENOSPC and in that case we need
+ * to go through and verify if all of the blocks not
+ * marked BRW_GRANTED are already mapped and we can
+ * ignore this error. */
+ lnb[n].rc = -ENOSPC;
+ rnb[n].flags &= OBD_BRW_GRANTED;
+ CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
+ exp->exp_obd->obd_name,
+ exp->exp_client_uuid.uuid, exp, n, bytes);
+ }
+ }
+
+ /* Now substract what client have used already. We don't subtract
+ * this from the tot_granted yet, so that other client's can't grab
+ * that space before we have actually allocated our blocks. That
+ * happens in filter_grant_commit() after the writes are done. */
+ *left -= ungranted;
+ fed->fed_grant -= used;
+ fed->fed_pending += used;
+ exp->exp_obd->u.filter.fo_tot_pending += used;
+
+ CDEBUG(mask,
+ "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
+ ungranted, fed->fed_grant, fed->fed_dirty);
+
+ /* Rough calc in case we don't refresh cached statfs data */
+ using = (used + ungranted + 1 ) >>
+ exp->exp_obd->u.filter.fo_sb->s_blocksize_bits;
+ if (exp->exp_obd->obd_osfs.os_bavail > using)
+ exp->exp_obd->obd_osfs.os_bavail -= using;
+ else
+ exp->exp_obd->obd_osfs.os_bavail = 0;
+
+ if (fed->fed_dirty < used) {
+ CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ used, fed->fed_dirty);
+ used = fed->fed_dirty;
+ }
+ exp->exp_obd->u.filter.fo_tot_dirty -= used;
+ fed->fed_dirty -= used;
+
+ return rc;
+}
+
static int filter_start_page_write(struct inode *inode,
struct niobuf_local *lnb)
{
RETURN(lnb->rc = -ENOMEM);
}
POISON_PAGE(page, 0xf1);
+ if (lnb->len != PAGE_SIZE) {
+ memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
+ kunmap(page);
+ }
page->index = lnb->offset >> PAGE_SHIFT;
lnb->page = page;
struct niobuf_local *res,
struct obd_trans_info *oti)
{
- struct obd_device *obd = exp->exp_obd;
struct obd_run_ctxt saved;
- struct niobuf_remote *rnb = nb;
- struct niobuf_local *lnb = res;
+ struct niobuf_remote *rnb;
+ struct niobuf_local *lnb;
struct fsfilt_objinfo fso;
struct dentry *dentry;
- int rc = 0, i, tot_bytes = 0;
- obd_size consumed = 0, left;
+ obd_size left;
unsigned long now = jiffies;
+ int rc = 0, i, tot_bytes = 0, cleanup_phase = 1;
ENTRY;
LASSERT(objcount == 1);
LASSERT(obj->ioo_bufcnt > 0);
- filter_grant_incoming(exp, oa);
-
memset(res, 0, niocount * sizeof(*res));
- push_ctxt(&saved, &obd->obd_ctxt, NULL);
- dentry = filter_fid2dentry(obd, NULL, obj->ioo_gr, obj->ioo_id);
+ push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
+ dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
+ obj->ioo_id);
if (IS_ERR(dentry))
GOTO(cleanup, rc = PTR_ERR(dentry));
CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
(jiffies - now));
- spin_lock(&obd->obd_osfs_lock);
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ if (oa)
+ filter_grant_incoming(exp, oa);
+ cleanup_phase = 0;
+
left = filter_grant_space_left(exp);
- rc = filter_check_space(exp, objcount, &fso, niocount, rnb, lnb,
- &left, &consumed, dentry->d_inode);
- if (oa)
- filter_grant(exp, oa, left, consumed);
+ rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
+ &left, dentry->d_inode);
+ if (oa && oa->o_valid & OBD_MD_FLGRANT)
+ oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
- spin_unlock(&obd->obd_osfs_lock);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
if (rc) {
f_dput(dentry);
for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
i++, lnb++, rnb++) {
-
- /* If there were any granting failures, we should not have
- come here */
- LASSERT (lnb->rc == 0);
-
+ /* We still set up for ungranted pages so that granted pages
+ * can be written to disk as they were promised, and portals
+ * needs to keep the pages all aligned properly. */
lnb->dentry = dentry;
lnb->offset = rnb->offset;
lnb->len = rnb->len;
lnb->flags = rnb->flags;
- lnb->start = jiffies;
rc = filter_start_page_write(dentry->d_inode, lnb);
if (rc) {
- CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR, "page err %u@"
- LPU64" %u/%u %p: rc %d\n", lnb->len, lnb->offset,
+ CDEBUG(D_ERROR, "page err %u@"LPU64" %u/%u %p: rc %d\n",
+ lnb->len, lnb->offset,
i, obj->ioo_bufcnt, dentry, rc);
while (lnb-- > res)
__free_pages(lnb->page, 0);
f_dput(dentry);
GOTO(cleanup, rc);
}
- tot_bytes += lnb->len;
+ if (lnb->rc == 0)
+ tot_bytes += lnb->len;
}
if (time_after(jiffies, now + 15 * HZ))
CDEBUG(D_INFO, "start_page_write: %lu jiffies\n",
(jiffies - now));
- lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_WRITE_BYTES, tot_bytes);
+ lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_WRITE_BYTES,
+ tot_bytes);
EXIT;
cleanup:
- pop_ctxt(&saved, &obd->obd_ctxt, NULL);
+ switch(cleanup_phase) {
+ case 1:
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ if (oa)
+ filter_grant_incoming(exp, oa);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ default: ;
+ }
+ pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
return rc;
}
static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
int niocount, struct niobuf_local *res,
- struct obd_trans_info *oti)
+ struct obd_trans_info *oti, int rc)
{
struct obd_ioobj *o;
struct niobuf_local *lnb;
- int i, j;
+ int i, j, drop = 0;
ENTRY;
+ if (res->dentry != NULL)
+ drop = (res->dentry->d_inode->i_size >
+ exp->exp_obd->u.filter.fo_readcache_max_filesize);
+
for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
- if (lnb->page != NULL)
- page_cache_release(lnb->page);
+ if (lnb->page == NULL)
+ continue;
+ /* drop from cache like truncate_list_pages() */
+ if (drop && !TryLockPage(lnb->page)) {
+ if (lnb->page->mapping)
+ ll_truncate_complete_page(lnb->page);
+ unlock_page(lnb->page);
+ }
+ page_cache_release(lnb->page);
}
}
+
if (res->dentry != NULL)
f_dput(res->dentry);
- RETURN(0);
+ RETURN(rc);
}
void flip_into_page_cache(struct inode *inode, struct page *new_page)
/* the dlm is protecting us from read/write concurrency, so we
* expect this find_lock_page to return quickly. even if we
* race with another writer it won't be doing much work with
- * the page locked. we do this 'cause t_c_p expects a
+ * the page locked. we do this 'cause t_c_p expects a
* locked page, and it wants to grab the pagecache lock
* as well. */
old_page = find_lock_page(inode->i_mapping, new_page->index);
if (old_page) {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- truncate_complete_page(old_page);
-#else
- truncate_complete_page(old_page->mapping, old_page);
-#endif
+ ll_truncate_complete_page(old_page);
unlock_page(old_page);
page_cache_release(old_page);
}
/* racing o_directs (no locking ioctl) could race adding
* their pages, so we repeat the page invalidation unless
* we successfully added our new page */
- rc = add_to_page_cache_unique(new_page, inode->i_mapping,
+ rc = add_to_page_cache_unique(new_page, inode->i_mapping,
new_page->index,
- page_hash(inode->i_mapping,
+ page_hash(inode->i_mapping,
new_page->index));
if (rc == 0) {
/* add_to_page_cache clears uptodate|dirty and locks
SetPageUptodate(new_page);
unlock_page(new_page);
}
-#else
+#else
rc = 0;
#endif
} while (rc != 0);
}
-/* XXX needs to trickle its oa down */
+void filter_grant_commit(struct obd_export *exp, int niocount,
+ struct niobuf_local *res)
+{
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
+ struct niobuf_local *lnb = res;
+ unsigned long pending = 0;
+ int i;
+
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ for (i = 0, lnb = res; i < niocount; i++, lnb++)
+ pending += lnb->lnb_grant_used;
+
+ LASSERTF(exp->exp_filter_data.fed_pending >= pending,
+ "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ exp->exp_filter_data.fed_pending, pending);
+ exp->exp_filter_data.fed_pending -= pending;
+ LASSERTF(filter->fo_tot_granted >= pending,
+ "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ exp->exp_obd->u.filter.fo_tot_granted, pending);
+ filter->fo_tot_granted -= pending;
+ LASSERTF(filter->fo_tot_pending >= pending,
+ "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ filter->fo_tot_pending, pending);
+ filter->fo_tot_pending -= pending;
+
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+}
+
int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj, int niocount,
- struct niobuf_local *res, struct obd_trans_info *oti)
+ struct niobuf_local *res, struct obd_trans_info *oti,int rc)
{
if (cmd == OBD_BRW_WRITE)
return filter_commitrw_write(exp, oa, objcount, obj, niocount,
- res, oti);
+ res, oti, rc);
if (cmd == OBD_BRW_READ)
return filter_commitrw_read(exp, oa, objcount, obj, niocount,
- res, oti);
+ res, oti, rc);
LBUG();
return -EPROTO;
}
kunmap(pga[i].pg);
}
- ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti);
+ ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti, ret);
out:
if (lnb)