#include <linux/obd_class.h>
#include <linux/lustre_fsfilt.h>
+#include <linux/lustre_smfs.h>
+#include <linux/lustre_snap.h>
#include "filter_internal.h"
-static int filter_start_page_read(struct inode *inode, struct niobuf_local *lnb)
+static int filter_start_page_read(struct obd_device *obd, struct inode *inode,
+ struct niobuf_local *lnb)
{
- struct address_space *mapping = inode->i_mapping;
struct page *page;
unsigned long index = lnb->offset >> PAGE_SHIFT;
- int rc;
-
- page = grab_cache_page(mapping, index); /* locked page */
- if (page == NULL)
- return lnb->rc = -ENOMEM;
-
- LASSERT(page->mapping == mapping);
- lnb->page = page;
-
- if (inode->i_size < lnb->offset + lnb->len - 1)
- lnb->rc = inode->i_size - lnb->offset;
- else
- lnb->rc = lnb->len;
+ page = fsfilt_getpage(obd, inode, index);
+ if (IS_ERR(page)) {
+ CERROR("page index %lu, rc = %ld\n", index, PTR_ERR(page));
- if (PageUptodate(page)) {
- unlock_page(page);
- return 0;
- }
-
- rc = mapping->a_ops->readpage(NULL, page);
- if (rc < 0) {
- CERROR("page index %lu, rc = %d\n", index, rc);
lnb->page = NULL;
- page_cache_release(page);
- return lnb->rc = rc;
+ lnb->rc = PTR_ERR(page);
+ return lnb->rc;
}
+ lnb->page = page;
+
return 0;
}
return lnb->rc;
}
+/* Grab the dirty and seen grant announcements from the incoming obdo.
+ * We will later calculate the clients new grant and return it.
+ * Caller must hold osfs lock */
+static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
+{
+ struct filter_export_data *fed;
+ struct obd_device *obd = exp->exp_obd;
+ static unsigned long last_msg;
+ static int last_count;
+ int mask = D_CACHE;
+ ENTRY;
+
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ if ((oa->o_valid & (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) !=
+ (OBD_MD_FLBLOCKS|OBD_MD_FLGRANT)) {
+ oa->o_valid &= ~OBD_MD_FLGRANT;
+ EXIT;
+ return;
+ }
+
+ fed = &exp->exp_filter_data;
+
+ /* Don't print this to the console the first time it happens, since
+ * it can happen legitimately on occasion, but only rarely. */
+ if (time_after(jiffies, last_msg + 60 * HZ)) {
+ last_count = 0;
+ last_msg = jiffies;
+ }
+ if ((last_count & (-last_count)) == last_count)
+ mask = D_WARNING;
+ last_count++;
+
+ /* Add some margin, since there is a small race if other RPCs arrive
+ * out-or-order and have already consumed some grant. We want to
+ * leave this here in case there is a large error in accounting. */
+ CDEBUG(oa->o_grant > fed->fed_grant + FILTER_GRANT_CHUNK ? mask:D_CACHE,
+ "%s: cli %s/%p reports grant: "LPU64" dropped: %u, local: %lu\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp, oa->o_grant,
+ oa->o_dropped, fed->fed_grant);
+
+ /* Update our accounting now so that statfs takes it into account.
+ * Note that fed_dirty is only approximate and can become incorrect
+ * if RPCs arrive out-of-order. No important calculations depend
+ * on fed_dirty however. */
+ obd->u.filter.fo_tot_dirty += oa->o_dirty - fed->fed_dirty;
+ if (fed->fed_grant < oa->o_dropped) {
+ CERROR("%s: cli %s/%p reports %u dropped > fed_grant %lu\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ oa->o_dropped, fed->fed_grant);
+ oa->o_dropped = 0;
+ }
+ if (obd->u.filter.fo_tot_granted < oa->o_dropped) {
+ CERROR("%s: cli %s/%p reports %u dropped > tot_grant "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ oa->o_dropped, obd->u.filter.fo_tot_granted);
+ oa->o_dropped = 0;
+ }
+ obd->u.filter.fo_tot_granted -= oa->o_dropped;
+ fed->fed_grant -= oa->o_dropped;
+ fed->fed_dirty = oa->o_dirty;
+ EXIT;
+}
+
+#define GRANT_FOR_LLOG(obd) 16
+
+/* Figure out how much space is available between what we've granted
+ * and what remains in the filesystem. Compensate for ext3 indirect
+ * block overhead when computing how much free space is left ungranted.
+ *
+ * Caller must hold obd_osfs_lock. */
+obd_size filter_grant_space_left(struct obd_export *exp)
+{
+ struct obd_device *obd = exp->exp_obd;
+ int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
+ obd_size tot_granted = obd->u.filter.fo_tot_granted, avail, left = 0;
+ int rc, statfs_done = 0;
+
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ if (time_before(obd->obd_osfs_age, jiffies - HZ)) {
+restat:
+ rc = fsfilt_statfs(obd, obd->u.filter.fo_sb, jiffies + 1);
+ if (rc) /* N.B. statfs can't really fail */
+ RETURN(0);
+ statfs_done = 1;
+ }
+
+ avail = obd->obd_osfs.os_bavail;
+ left = avail - (avail >> (blockbits - 3)); /* (d)indirect */
+ if (left > GRANT_FOR_LLOG(obd)) {
+ left = (left - GRANT_FOR_LLOG(obd)) << blockbits;
+ } else {
+ left = 0 /* << blockbits */;
+ }
+
+ if (!statfs_done && left < 32 * FILTER_GRANT_CHUNK + tot_granted) {
+ CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
+ goto restat;
+ }
+
+ if (left >= tot_granted) {
+ left -= tot_granted;
+ } else {
+ static unsigned long next;
+ if (left < tot_granted - obd->u.filter.fo_tot_pending &&
+ time_after(jiffies, next)) {
+ spin_unlock(&obd->obd_osfs_lock);
+ CERROR("%s: cli %s/%p grant "LPU64" > available "
+ LPU64" and pending "LPU64"\n", obd->obd_name,
+ exp->exp_client_uuid.uuid, exp, tot_granted,
+ left, obd->u.filter.fo_tot_pending);
+ if (next == 0)
+ portals_debug_dumplog();
+ next = jiffies + 20 * HZ;
+ spin_lock(&obd->obd_osfs_lock);
+ }
+ left = 0;
+ }
+
+ CDEBUG(D_CACHE, "%s: cli %s/%p free: "LPU64" avail: "LPU64" grant "LPU64
+ " left: "LPU64" pending: "LPU64"\n", obd->obd_name,
+ exp->exp_client_uuid.uuid, exp,
+ obd->obd_osfs.os_bfree << blockbits, avail << blockbits,
+ tot_granted, left, obd->u.filter.fo_tot_pending);
+
+ return left;
+}
+
+/* Calculate how much grant space to allocate to this client, based on how
+ * much space is currently free and how much of that is already granted.
+ *
+ * Caller must hold obd_osfs_lock. */
+long filter_grant(struct obd_export *exp, obd_size current_grant,
+ obd_size want, obd_size fs_space_left)
+{
+ struct obd_device *obd = exp->exp_obd;
+ struct filter_export_data *fed = &exp->exp_filter_data;
+ int blockbits = obd->u.filter.fo_sb->s_blocksize_bits;
+ __u64 grant = 0;
+
+ LASSERT_SPIN_LOCKED(&obd->obd_osfs_lock);
+
+ /* Grant some fraction of the client's requested grant space so that
+ * they are not always waiting for write credits (not all of it to
+ * avoid overgranting in face of multiple RPCs in flight). This
+ * essentially will be able to control the OSC_MAX_RIF for a client.
+ *
+ * If we do have a large disparity between what the client thinks it
+ * has and what we think it has, don't grant very much and let the
+ * client consume its grant first. Either it just has lots of RPCs
+ * in flight, or it was evicted and its grants will soon be used up. */
+ if (current_grant < want &&
+ current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
+ grant = min((want >> blockbits) / 2,
+ (fs_space_left >> blockbits) / 8);
+ grant <<= blockbits;
+
+ if (grant) {
+ if (grant > FILTER_GRANT_CHUNK)
+ grant = FILTER_GRANT_CHUNK;
+
+ obd->u.filter.fo_tot_granted += grant;
+ fed->fed_grant += grant;
+ }
+ }
+
+ CDEBUG(D_CACHE,"%s: cli %s/%p wants: "LPU64" granting: "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid, exp, want, grant);
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p tot cached:"LPU64" granted:"LPU64
+ " num_exports: %d\n", obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, obd->u.filter.fo_tot_dirty,
+ obd->u.filter.fo_tot_granted, obd->obd_num_exports);
+
+ return grant;
+}
+
static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
int niocount, struct niobuf_remote *nb,
struct niobuf_local *res,
struct obd_trans_info *oti)
{
- struct obd_run_ctxt saved;
+ struct obd_device *obd = exp->exp_obd;
+ struct lvfs_run_ctxt saved;
struct obd_ioobj *o;
struct niobuf_remote *rnb;
struct niobuf_local *lnb = NULL;
/* We are currently not supporting multi-obj BRW_READ RPCS at all.
* When we do this function's dentry cleanup will need to be fixed */
LASSERT(objcount == 1);
+ LASSERT(obj->ioo_bufcnt > 0);
+
+ if (oa && oa->o_valid & OBD_MD_FLGRANT) {
+ spin_lock(&obd->obd_osfs_lock);
+ filter_grant_incoming(exp, oa);
+
+#if 0
+ /* Reads do not increase grants */
+ oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
+ filter_grant_space_left(exp));
+#else
+ oa->o_grant = 0;
+#endif
+ spin_unlock(&obd->obd_osfs_lock);
+ }
OBD_ALLOC(fso, objcount * sizeof(*fso));
if (fso == NULL)
memset(res, 0, niocount * sizeof(*res));
- push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
+ push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
for (i = 0, o = obj; i < objcount; i++, o++) {
LASSERT(o->ioo_bufcnt);
- dentry = filter_oa2dentry(exp->exp_obd, oa);
+ dentry = filter_oa2dentry(obd, oa);
if (IS_ERR(dentry))
GOTO(cleanup, rc = PTR_ERR(dentry));
lnb->offset = rnb->offset;
lnb->len = rnb->len;
lnb->flags = rnb->flags;
- lnb->start = jiffies;
if (inode->i_size <= rnb->offset) {
/* If there's no more data, abort early.
* easy to detect later. */
break;
} else {
- rc = filter_start_page_read(inode, lnb);
+ rc = filter_start_page_read(obd, inode, lnb);
}
if (rc) {
GOTO(cleanup, rc);
}
+ if (inode->i_size < lnb->offset + lnb->len - 1)
+ lnb->rc = inode->i_size - lnb->offset;
+ else
+ lnb->rc = lnb->len;
+
tot_bytes += lnb->rc;
if (lnb->rc < lnb->len) {
/* short read, be sure to wait on it */
CDEBUG(D_INFO, "start_page_read: %lu jiffies\n",
(jiffies - now));
- lprocfs_counter_add(exp->exp_obd->obd_stats, LPROC_FILTER_READ_BYTES,
- tot_bytes);
+ lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_READ_BYTES, tot_bytes);
while (lnb-- > res) {
rc = filter_finish_page_read(lnb);
if (rc) {
CERROR("NULL dentry in cleanup -- tell CFS\n");
case 0:
OBD_FREE(fso, objcount * sizeof(*fso));
- pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ }
+ return rc;
+}
+
+/* When clients have dirtied as much space as they've been granted they
+ * fall through to sync writes. These sync writes haven't been expressed
+ * in grants and need to error with ENOSPC when there isn't room in the
+ * filesystem for them after grants are taken into account. However,
+ * writeback of the dirty data that was already granted space can write
+ * right on through.
+ *
+ * Caller must hold obd_osfs_lock. */
+static int filter_grant_check(struct obd_export *exp, int objcount,
+ struct fsfilt_objinfo *fso, int niocount,
+ struct niobuf_remote *rnb,
+ struct niobuf_local *lnb, obd_size *left,
+ struct inode *inode)
+{
+ struct filter_export_data *fed = &exp->exp_filter_data;
+ int blocksize = exp->exp_obd->u.filter.fo_sb->s_blocksize;
+ unsigned long used = 0, ungranted = 0, using;
+ int i, rc = -ENOSPC, obj, n = 0, mask = D_CACHE;
+
+ LASSERT_SPIN_LOCKED(&exp->exp_obd->obd_osfs_lock);
+
+ for (obj = 0; obj < objcount; obj++) {
+ for (i = 0; i < fso[obj].fso_bufcnt; i++, n++) {
+ int tmp, bytes;
+
+ /* FIXME: this is calculated with PAGE_SIZE on client */
+ bytes = rnb[n].len;
+ bytes += rnb[n].offset & (blocksize - 1);
+ tmp = (rnb[n].offset + rnb[n].len) & (blocksize - 1);
+ if (tmp)
+ bytes += blocksize - tmp;
+
+ if (rnb[n].flags & OBD_BRW_FROM_GRANT) {
+ if (fed->fed_grant < used + bytes) {
+ CDEBUG(D_CACHE,
+ "%s: cli %s/%p claims %ld+%d "
+ "GRANT, real grant %lu idx %d\n",
+ exp->exp_obd->obd_name,
+ exp->exp_client_uuid.uuid, exp,
+ used, bytes, fed->fed_grant, n);
+ mask = D_ERROR;
+ } else {
+ used += bytes;
+ rnb[n].flags |= OBD_BRW_GRANTED;
+ lnb[n].lnb_grant_used = bytes;
+ CDEBUG(0, "idx %d used=%lu\n", n, used);
+ rc = 0;
+ continue;
+ }
+ }
+ if (*left > ungranted) {
+ /* if enough space, pretend it was granted */
+ ungranted += bytes;
+ rnb[n].flags |= OBD_BRW_GRANTED;
+ CDEBUG(0, "idx %d ungranted=%lu\n",n,ungranted);
+ rc = 0;
+ continue;
+ }
+
+ /* We can't check for already-mapped blocks here, as
+ * it requires dropping the osfs lock to do the bmap.
+ * Instead, we return ENOSPC and in that case we need
+ * to go through and verify if all of the blocks not
+ * marked BRW_GRANTED are already mapped and we can
+ * ignore this error. */
+ lnb[n].rc = -ENOSPC;
+ rnb[n].flags &= OBD_BRW_GRANTED;
+ CDEBUG(D_CACHE,"%s: cli %s/%p idx %d no space for %d\n",
+ exp->exp_obd->obd_name,
+ exp->exp_client_uuid.uuid, exp, n, bytes);
+ }
}
+
+ /* Now substract what client have used already. We don't subtract
+ * this from the tot_granted yet, so that other client's can't grab
+ * that space before we have actually allocated our blocks. That
+ * happens in filter_grant_commit() after the writes are done. */
+ *left -= ungranted;
+ fed->fed_grant -= used;
+ fed->fed_pending += used;
+ exp->exp_obd->u.filter.fo_tot_pending += used;
+
+ CDEBUG(mask,
+ "%s: cli %s/%p used: %lu ungranted: %lu grant: %lu dirty: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp, used,
+ ungranted, fed->fed_grant, fed->fed_dirty);
+
+ /* Rough calc in case we don't refresh cached statfs data */
+ using = (used + ungranted + 1 ) >>
+ exp->exp_obd->u.filter.fo_sb->s_blocksize_bits;
+ if (exp->exp_obd->obd_osfs.os_bavail > using)
+ exp->exp_obd->obd_osfs.os_bavail -= using;
+ else
+ exp->exp_obd->obd_osfs.os_bavail = 0;
+
+ if (fed->fed_dirty < used) {
+ CERROR("%s: cli %s/%p claims used %lu > fed_dirty %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ used, fed->fed_dirty);
+ used = fed->fed_dirty;
+ }
+ exp->exp_obd->u.filter.fo_tot_dirty -= used;
+ fed->fed_dirty -= used;
+
return rc;
}
-static int filter_start_page_write(struct inode *inode,
+static int filter_start_page_write(struct obd_device *obd, struct inode *inode,
struct niobuf_local *lnb)
{
- struct page *page = alloc_pages(GFP_HIGHUSER, 0);
+ struct page *page;
+
+ if (lnb->len != PAGE_SIZE)
+ return filter_start_page_read(obd, inode, lnb);
+
+ page = alloc_pages(GFP_HIGHUSER, 0);
if (page == NULL) {
CERROR("no memory for a temp page\n");
RETURN(lnb->rc = -ENOMEM);
}
+#if 0
POISON_PAGE(page, 0xf1);
+ if (lnb->len != PAGE_SIZE) {
+ memset(kmap(page) + lnb->len, 0, PAGE_SIZE - lnb->len);
+ kunmap(page);
+ }
+#endif
page->index = lnb->offset >> PAGE_SHIFT;
lnb->page = page;
return 0;
}
+static void filter_abort_page_write(struct niobuf_local *lnb)
+{
+ LASSERT(lnb->page != NULL);
+
+ if (lnb->len != PAGE_SIZE)
+ page_cache_release(lnb->page);
+ else
+ __free_pages(lnb->page, 0);
+}
+
+/* a helper for both the 2.4 and 2.6 commitrw paths which are both built
+ * up by our shared filter_preprw_write() */
+void filter_release_write_page(struct filter_obd *filter, struct inode *inode,
+ struct niobuf_local *lnb, int rc)
+{
+ if (lnb->len != PAGE_SIZE)
+ return filter_release_read_page(filter, inode, lnb->page);
+
+ if (rc == 0)
+ flip_into_page_cache(inode, lnb->page);
+ __free_page(lnb->page);
+}
+
/* If we ever start to support multi-object BRW RPCs, we will need to get locks
* on mulitple inodes. That isn't all, because there still exists the
* possibility of a truncate starting a new transaction while holding the ext3
struct niobuf_local *res,
struct obd_trans_info *oti)
{
- struct obd_run_ctxt saved;
+ struct lvfs_run_ctxt saved;
struct niobuf_remote *rnb;
- struct niobuf_local *lnb = NULL;
+ struct niobuf_local *lnb;
struct fsfilt_objinfo fso;
struct dentry *dentry;
- int rc = 0, i, tot_bytes = 0;
+ obd_size left;
unsigned long now = jiffies;
+ int rc = 0, i, tot_bytes = 0, cleanup_phase = 1;
ENTRY;
LASSERT(objcount == 1);
LASSERT(obj->ioo_bufcnt > 0);
memset(res, 0, niocount * sizeof(*res));
- push_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
- dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
+ push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
obj->ioo_id);
if (IS_ERR(dentry))
GOTO(cleanup, rc = PTR_ERR(dentry));
CDEBUG(D_INFO, "preprw_write setup: %lu jiffies\n",
(jiffies - now));
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ if (oa)
+ filter_grant_incoming(exp, oa);
+ cleanup_phase = 0;
+
+ left = filter_grant_space_left(exp);
+
+ rc = filter_grant_check(exp, objcount, &fso, niocount, nb, res,
+ &left, dentry->d_inode);
+ if (oa && oa->o_valid & OBD_MD_FLGRANT)
+ oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
+
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+
+ if (rc) {
+ f_dput(dentry);
+ GOTO(cleanup, rc);
+ }
+
for (i = 0, rnb = nb, lnb = res; i < obj->ioo_bufcnt;
i++, lnb++, rnb++) {
+ /* We still set up for ungranted pages so that granted pages
+ * can be written to disk as they were promised, and portals
+ * needs to keep the pages all aligned properly. */
lnb->dentry = dentry;
lnb->offset = rnb->offset;
lnb->len = rnb->len;
lnb->flags = rnb->flags;
- lnb->start = jiffies;
- rc = filter_start_page_write(dentry->d_inode, lnb);
+ rc = filter_start_page_write(exp->exp_obd, dentry->d_inode,lnb);
if (rc) {
- CDEBUG(rc == -ENOSPC ? D_INODE : D_ERROR, "page err %u@"
- LPU64" %u/%u %p: rc %d\n", lnb->len, lnb->offset,
+ CERROR("page err %u@"LPU64" %u/%u %p: rc %d\n",
+ lnb->len, lnb->offset,
i, obj->ioo_bufcnt, dentry, rc);
while (lnb-- > res)
- __free_pages(lnb->page, 0);
+ filter_abort_page_write(lnb);
f_dput(dentry);
GOTO(cleanup, rc);
}
- tot_bytes += lnb->len;
+ if (lnb->rc == 0)
+ tot_bytes += lnb->len;
+ }
+
+ while (lnb-- > res) {
+ if (lnb->len == PAGE_SIZE)
+ continue;
+ rc = filter_finish_page_read(lnb);
+ if (rc) {
+ CERROR("error page %u@"LPU64" %u %p: rc %d\n", lnb->len,
+ lnb->offset, (int)(lnb - res), lnb->dentry, rc);
+ GOTO(cleanup, rc);
+ }
}
if (time_after(jiffies, now + 15 * HZ))
tot_bytes);
EXIT;
cleanup:
- pop_ctxt(&saved, &exp->exp_obd->obd_ctxt, NULL);
+ switch(cleanup_phase) {
+ case 1:
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ if (oa)
+ filter_grant_incoming(exp, oa);
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ default: ;
+ }
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
return rc;
}
return -EPROTO;
}
+void filter_release_read_page(struct filter_obd *filter, struct inode *inode,
+ struct page *page)
+{
+ int drop = 0;
+
+ if (inode != NULL &&
+ (inode->i_size > filter->fo_readcache_max_filesize))
+ drop = 1;
+
+ /* drop from cache like truncate_list_pages() */
+ if (drop && !TryLockPage(page)) {
+ if (page->mapping)
+ ll_truncate_complete_page(page);
+ unlock_page(page);
+ }
+ page_cache_release(page);
+}
+
static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
int niocount, struct niobuf_local *res,
- struct obd_trans_info *oti)
+ struct obd_trans_info *oti, int rc)
{
struct obd_ioobj *o;
struct niobuf_local *lnb;
int i, j;
+ struct inode *inode = NULL;
ENTRY;
+ if (res->dentry != NULL)
+ inode = res->dentry->d_inode;
+
for (i = 0, o = obj, lnb = res; i < objcount; i++, o++) {
for (j = 0 ; j < o->ioo_bufcnt ; j++, lnb++) {
- if (lnb->page != NULL)
- page_cache_release(lnb->page);
+ if (lnb->page == NULL)
+ continue;
+ filter_release_read_page(&exp->exp_obd->u.filter,
+ inode, lnb->page);
}
}
+
if (res->dentry != NULL)
f_dput(res->dentry);
- RETURN(0);
+ RETURN(rc);
}
void flip_into_page_cache(struct inode *inode, struct page *new_page)
/* the dlm is protecting us from read/write concurrency, so we
* expect this find_lock_page to return quickly. even if we
* race with another writer it won't be doing much work with
- * the page locked. we do this 'cause t_c_p expects a
+ * the page locked. we do this 'cause t_c_p expects a
* locked page, and it wants to grab the pagecache lock
* as well. */
old_page = find_lock_page(inode->i_mapping, new_page->index);
if (old_page) {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
- truncate_complete_page(old_page);
-#else
- truncate_complete_page(old_page->mapping, old_page);
-#endif
+ ll_truncate_complete_page(old_page);
unlock_page(old_page);
page_cache_release(old_page);
}
/* racing o_directs (no locking ioctl) could race adding
* their pages, so we repeat the page invalidation unless
* we successfully added our new page */
- rc = add_to_page_cache_unique(new_page, inode->i_mapping,
+ rc = add_to_page_cache_unique(new_page, inode->i_mapping,
new_page->index,
- page_hash(inode->i_mapping,
+ page_hash(inode->i_mapping,
new_page->index));
if (rc == 0) {
/* add_to_page_cache clears uptodate|dirty and locks
SetPageUptodate(new_page);
unlock_page(new_page);
}
-#else
+#else
rc = 0;
#endif
} while (rc != 0);
}
-/* XXX needs to trickle its oa down */
+void filter_grant_commit(struct obd_export *exp, int niocount,
+ struct niobuf_local *res)
+{
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
+ struct niobuf_local *lnb = res;
+ unsigned long pending = 0;
+ int i;
+
+ spin_lock(&exp->exp_obd->obd_osfs_lock);
+ for (i = 0, lnb = res; i < niocount; i++, lnb++)
+ pending += lnb->lnb_grant_used;
+
+ LASSERTF(exp->exp_filter_data.fed_pending >= pending,
+ "%s: cli %s/%p fed_pending: %lu grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ exp->exp_filter_data.fed_pending, pending);
+ exp->exp_filter_data.fed_pending -= pending;
+ LASSERTF(filter->fo_tot_granted >= pending,
+ "%s: cli %s/%p tot_granted: "LPU64" grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ exp->exp_obd->u.filter.fo_tot_granted, pending);
+ filter->fo_tot_granted -= pending;
+ LASSERTF(filter->fo_tot_pending >= pending,
+ "%s: cli %s/%p tot_pending: "LPU64" grant_used: %lu\n",
+ exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
+ filter->fo_tot_pending, pending);
+ filter->fo_tot_pending -= pending;
+
+ spin_unlock(&exp->exp_obd->obd_osfs_lock);
+}
+int filter_do_cow(struct obd_export *exp, struct obd_ioobj *obj,
+ int nioo, struct niobuf_remote *rnb)
+{
+ struct dentry *dentry;
+ struct lvfs_run_ctxt saved;
+ struct write_extents *extents = NULL;
+ int j, rc = 0, numexts = 0, flags = 0;
+
+ ENTRY;
+
+ LASSERT(nioo == 1);
+
+ push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+
+ dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
+ obj->ioo_id);
+ if (IS_ERR(dentry)) {
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ RETURN (PTR_ERR(dentry));
+ }
+
+ if (dentry->d_inode == NULL) {
+ CERROR("trying to write extents to non-existent file "LPU64"\n",
+ obj->ioo_id);
+ GOTO(cleanup, rc = -ENOENT);
+ }
+
+ flags = fsfilt_get_fs_flags(exp->exp_obd, dentry);
+ if (!(flags & SM_DO_COW)) {
+ GOTO(cleanup, rc);
+ }
+ OBD_ALLOC(extents, obj->ioo_bufcnt * sizeof(struct write_extents));
+ if (!extents) {
+ CERROR("No Memory\n");
+ GOTO(cleanup, rc = -ENOMEM);
+ }
+ for (j = 0; j < obj->ioo_bufcnt; j++) {
+ if (rnb[j].len != 0) {
+ extents[numexts].w_count = rnb[j].len;
+ extents[numexts].w_pos = rnb[j].offset;
+ numexts++;
+ }
+ }
+ rc = fsfilt_do_write_cow(exp->exp_obd, dentry, extents, numexts);
+ if (rc) {
+ CERROR("Do cow error id "LPU64" rc:%d \n",
+ obj->ioo_id, rc);
+ GOTO(cleanup, rc);
+ }
+
+cleanup:
+ if (extents) {
+ OBD_FREE(extents, obj->ioo_bufcnt * sizeof(struct write_extents));
+ }
+ f_dput(dentry);
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ RETURN(rc);
+
+}
+int filter_write_extents(struct obd_export *exp, struct obd_ioobj *obj, int nobj,
+ int niocount, struct niobuf_local *local, int rc)
+{
+ struct lvfs_run_ctxt saved;
+ struct dentry *dentry;
+ struct niobuf_local *lnb;
+ __u64 offset = 0;
+ __u32 len = 0;
+ int i, flags;
+
+ ENTRY;
+
+ LASSERT(nobj == 1);
+
+ push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+
+ dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
+ obj->ioo_id);
+ if (IS_ERR(dentry)) {
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ RETURN (PTR_ERR(dentry));
+ }
+
+ if (dentry->d_inode == NULL) {
+ CERROR("trying to write extents to non-existent file "LPU64"\n",
+ obj->ioo_id);
+ GOTO(cleanup, rc = -ENOENT);
+ }
+
+ flags = fsfilt_get_fs_flags(exp->exp_obd, dentry);
+ if (!(flags & SM_DO_REC)) {
+ GOTO(cleanup, rc);
+ }
+
+ for (i = 0, lnb = local; i < obj->ioo_bufcnt; i++, lnb++) {
+ if (len == 0) {
+ offset = lnb->offset;
+ len = lnb->len;
+ } else if (lnb->offset == (offset + len)) {
+ len += lnb->len;
+ } else {
+ rc = fsfilt_write_extents(exp->exp_obd, dentry,
+ offset, len);
+ if (rc) {
+ CERROR("write exts off "LPU64" num %u rc:%d\n",
+ offset, len, rc);
+ GOTO(cleanup, rc);
+ }
+ offset = lnb->offset;
+ len = lnb->len;
+ }
+ }
+ if (len > 0) {
+ rc = fsfilt_write_extents(exp->exp_obd, dentry,
+ offset, len);
+ if (rc) {
+ CERROR("write exts off "LPU64" num %u rc:%d\n",
+ offset, len, rc);
+ GOTO(cleanup, rc);
+ }
+ }
+cleanup:
+ f_dput(dentry);
+ pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ RETURN(rc);
+}
+
int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj, int niocount,
- struct niobuf_local *res, struct obd_trans_info *oti)
+ struct niobuf_local *res, struct obd_trans_info *oti,int rc)
{
if (cmd == OBD_BRW_WRITE)
return filter_commitrw_write(exp, oa, objcount, obj, niocount,
- res, oti);
+ res, oti, rc);
if (cmd == OBD_BRW_READ)
return filter_commitrw_read(exp, oa, objcount, obj, niocount,
- res, oti);
+ res, oti, rc);
LBUG();
return -EPROTO;
}
kunmap(pga[i].pg);
}
- ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti);
+ ret = filter_commitrw(cmd, exp, oa, 1, &ioo, oa_bufs, lnb, oti, ret);
out:
if (lnb)