/* Grab the dirty and seen grant announcements from the incoming obdo.
* We will later calculate the clients new grant and return it.
* Caller must hold osfs lock */
-static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
+void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
{
struct filter_export_data *fed;
struct obd_device *obd = exp->exp_obd;
obd->u.filter.fo_tot_granted -= oa->o_dropped;
fed->fed_grant -= oa->o_dropped;
fed->fed_dirty = oa->o_dirty;
+
+ if (oa->o_flags & OBD_FL_SHRINK_GRANT) {
+ obd_size left_space = filter_grant_space_left(exp);
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
+
+ /*Only if left_space < fo_tot_clients * 32M,
+ *then the grant space could be shrinked */
+ if (left_space < filter->fo_tot_granted_clients *
+ FILTER_GRANT_SHRINK_LIMIT) {
+ fed->fed_grant -= oa->o_grant;
+ filter->fo_tot_granted -= oa->o_grant;
+ CDEBUG(D_CACHE, "%s: cli %s/%p shrink "LPU64
+ "fed_grant %ld total "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, oa->o_grant, fed->fed_grant,
+ filter->fo_tot_granted);
+ oa->o_grant = 0;
+ }
+ }
+
if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
/*
* the routine is used to request pages from pagecache
*
- * use GFP_NOFS not allowing to enter FS as the client can run on this node
- * and we might end waiting on a page he sent in the request we're serving.
- *
+ * use GFP_NOFS for requests from a local client not allowing to enter FS
+ * as we might end up waiting on a page he sent in the request we're serving.
* use __GFP_HIGHMEM so that the pages can use all of the available memory
* on 32-bit machines
+ * use more agressive GFP_HIGHUSER flags from non-local clients to be able to
+ * generate more memory pressure, but at the same time use __GFP_NOMEMALLOC
+ * in order not to exhaust emergency reserves.
+ *
+ * See Bug 19529 and Bug 19917 for details.
*/
static struct page *filter_get_page(struct obd_device *obd,
struct inode *inode,
- obd_off offset)
+ obd_off offset,
+ int localreq)
{
struct page *page;
page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
- GFP_NOFS | __GFP_HIGHMEM);
+ (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
+ : (GFP_HIGHUSER | __GFP_NOMEMALLOC)));
if (unlikely(page == NULL))
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
}
}
+/*
+ * the invalidate above doesn't work during read because lnet pins pages.
+ * The truncate is used here instead to drop pages from cache
+ */
+void filter_truncate_cache(struct obd_device *obd, struct obd_ioobj *obj,
+ struct niobuf_remote *nb, int pages,
+ struct niobuf_local *res, struct inode *inode)
+{
+ struct niobuf_remote *rnb;
+ int i;
+
+ LASSERT(inode != NULL);
+#ifdef HAVE_TRUNCATE_RANGE
+ for (i = 0, rnb = nb; i < obj->ioo_bufcnt; i++, rnb++) {
+ /* remove pages in which range is fit */
+ truncate_inode_pages_range(inode->i_mapping,
+ rnb->offset & CFS_PAGE_MASK,
+ (rnb->offset + rnb->len - 1) |
+ ~CFS_PAGE_MASK);
+ }
+#elif (defined HAVE_TRUNCATE_COMPLETE)
+ for (i = 0, lnb = res; i < pages; i++, lnb++)
+ truncate_complete_page(inode->i_mapping, lnb->page);
+#else
+#error "Nor truncate_inode_pages_range or truncate_complete_page are supported"
+#endif
+}
+
static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *nb,
spin_lock(&obd->obd_osfs_lock);
filter_grant_incoming(exp, oa);
- oa->o_grant = 0;
+ if (!(oa->o_flags & OBD_FL_SHRINK_GRANT))
+ oa->o_grant = 0;
spin_unlock(&obd->obd_osfs_lock);
}
* so it's easy to detect later. */
break;
- lnb->page = filter_get_page(obd, inode, lnb->offset);
+ lnb->page = filter_get_page(obd, inode, lnb->offset, 0);
if (lnb->page == NULL)
GOTO(cleanup, rc = -ENOMEM);
void *iobuf;
obd_size left;
unsigned long now = jiffies, timediff;
- int rc = 0, i, tot_bytes = 0, cleanup_phase = 0;
+ int rc = 0, i, tot_bytes = 0, cleanup_phase = 0, localreq = 0;
ENTRY;
LASSERT(objcount == 1);
LASSERT(obj->ioo_bufcnt > 0);
if (rc)
RETURN(rc);
+ if (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
+ localreq = 1;
+
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
iobuf = filter_iobuf_get(&obd->u.filter, oti);
if (IS_ERR(iobuf))
if (rc)
GOTO(cleanup, rc);
+ cleanup_phase = 4;
+
+ /* Filter truncate first locks i_mutex then partally truncated
+ * page, filter write code first locks pages then take
+ * i_mutex. To avoid a deadlock in case of concurrent
+ * punch/write requests from one client, filter writes and
+ * filter truncates are serialized by i_alloc_sem, allowing
+ * multiple writes or single truncate. */
+ down_read(&dentry->d_inode->i_alloc_sem);
do_gettimeofday(&start);
for (i = 0, lnb = res; i < *npages; i++, lnb++) {
* needs to keep the pages all aligned properly. */
lnb->dentry = dentry;
- lnb->page = filter_get_page(obd, dentry->d_inode, lnb->offset);
+ lnb->page = filter_get_page(obd, dentry->d_inode, lnb->offset,
+ localreq);
if (lnb->page == NULL)
GOTO(cleanup, rc = -ENOMEM);
- cleanup_phase = 4;
/* DLM locking protects us from write and truncate competing
* for same region, but truncate can leave dirty page in the
lnb->page = NULL;
}
}
+ up_read(&dentry->d_inode->i_alloc_sem);
}
case 3:
filter_iobuf_put(&obd->u.filter, iobuf, oti);
if (res->dentry != NULL)
inode = res->dentry->d_inode;
- for (i = 0, lnb = res; i < npages; i++, lnb++) {
- if (lnb->page != NULL) {
+ for (i = 0, lnb = res; i < npages; i++, lnb++)
+ if (lnb->page != NULL)
page_cache_release(lnb->page);
- lnb->page = NULL;
- }
- }
if (inode && (fo->fo_read_cache == 0 ||
- i_size_read(inode) > fo->fo_readcache_max_filesize))
- filter_invalidate_cache(exp->exp_obd, obj, rnb, inode);
+ i_size_read(inode) > fo->fo_readcache_max_filesize))
+ filter_truncate_cache(exp->exp_obd, obj, rnb, npages, res,
+ inode);
+
+ for (i = 0, lnb = res; i < npages; i++, lnb++)
+ lnb->page = NULL;
if (res->dentry != NULL)
f_dput(res->dentry);