/* Grab the dirty and seen grant announcements from the incoming obdo.
* We will later calculate the clients new grant and return it.
* Caller must hold osfs lock */
-static void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
+void filter_grant_incoming(struct obd_export *exp, struct obdo *oa)
{
struct filter_export_data *fed;
struct obd_device *obd = exp->exp_obd;
obd->u.filter.fo_tot_granted -= oa->o_dropped;
fed->fed_grant -= oa->o_dropped;
fed->fed_dirty = oa->o_dirty;
+
+ if (oa->o_flags & OBD_FL_SHRINK_GRANT) {
+ obd_size left_space = filter_grant_space_left(exp);
+ struct filter_obd *filter = &exp->exp_obd->u.filter;
+
+ /*Only if left_space < fo_tot_clients * 32M,
+ *then the grant space could be shrinked */
+ if (left_space < filter->fo_tot_granted_clients *
+ FILTER_GRANT_SHRINK_LIMIT) {
+ fed->fed_grant -= oa->o_grant;
+ filter->fo_tot_granted -= oa->o_grant;
+ CDEBUG(D_CACHE, "%s: cli %s/%p shrink "LPU64
+ "fed_grant %ld total "LPU64"\n",
+ obd->obd_name, exp->exp_client_uuid.uuid,
+ exp, oa->o_grant, fed->fed_grant,
+ filter->fo_tot_granted);
+ oa->o_grant = 0;
+ }
+ }
+
if (fed->fed_dirty < 0 || fed->fed_grant < 0 || fed->fed_pending < 0) {
CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
/* Calculate how much grant space to allocate to this client, based on how
* much space is currently free and how much of that is already granted.
*
+ * if @conservative != 0, we limit the maximum grant to FILTER_GRANT_CHUNK;
+ * otherwise we'll satisfy the requested amount as possible as we can, this
+ * is usually due to client reconnect.
+ *
* Caller must hold obd_osfs_lock. */
long filter_grant(struct obd_export *exp, obd_size current_grant,
- obd_size want, obd_size fs_space_left)
+ obd_size want, obd_size fs_space_left, int conservative)
{
struct obd_device *obd = exp->exp_obd;
struct filter_export_data *fed = &exp->exp_filter_data;
obd->obd_name, exp->exp_client_uuid.uuid, exp, want);
} else if (current_grant < want &&
current_grant < fed->fed_grant + FILTER_GRANT_CHUNK) {
- grant = min((want >> blockbits),
- (fs_space_left >> blockbits) / 8);
- grant <<= blockbits;
+ grant = min(want + (1 << blockbits) - 1, fs_space_left / 8);
+ grant &= ~((1ULL << blockbits) - 1);
if (grant) {
- /* Allow >FILTER_GRANT_CHUNK size when clients
- * reconnect due to a server reboot.
- */
- if ((grant > FILTER_GRANT_CHUNK) &&
- (!obd->obd_recovering))
+ if (grant > FILTER_GRANT_CHUNK && conservative)
grant = FILTER_GRANT_CHUNK;
obd->u.filter.fo_tot_granted += grant;
}
CDEBUG(D_CACHE,
- "%s: cli %s/%p wants: "LPU64" current grant "LPU64
+ "%s: cli %s/%p wants: "LPU64" current grant "LPU64
" granting: "LPU64"\n", obd->obd_name, exp->exp_client_uuid.uuid,
exp, want, current_grant, grant);
CDEBUG(D_CACHE,
/*
* the routine is used to request pages from pagecache
*
- * use GFP_NOFS not allowing to enter FS as the client can run on this node
- * and we might end waiting on a page he sent in the request we're serving.
+ * use GFP_NOFS for requests from a local client not allowing to enter FS
+ * as we might end up waiting on a page he sent in the request we're serving.
+ * use __GFP_HIGHMEM so that the pages can use all of the available memory
+ * on 32-bit machines
+ * use more agressive GFP_HIGHUSER flags from non-local clients to be able to
+ * generate more memory pressure, but at the same time use __GFP_NOMEMALLOC
+ * in order not to exhaust emergency reserves.
*
- * use NORETRY so that the allocator doesn't go crazy: chance to more lucky
- * thread have enough memory to complete his request. for our request client
- * will do resend hopefully -bzzz
+ * See Bug 19529 and Bug 19917 for details.
*/
-static struct page * filter_get_page(struct obd_device *obd,
- struct inode *inode,
- obd_off offset)
+static struct page *filter_get_page(struct obd_device *obd,
+ struct inode *inode,
+ obd_off offset,
+ int localreq)
{
struct page *page;
page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT,
- GFP_NOFS | __GFP_NORETRY);
+ (localreq ? (GFP_NOFS | __GFP_HIGHMEM)
+ : GFP_HIGHUSER));
if (unlikely(page == NULL))
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_NO_PAGE, 1);
}
/*
- * the function is used to free all pages used for request
- * just to mimic cacheless OSS which don't occupy much memory
+ * the invalidate above doesn't work during read because lnet pins pages.
+ * The truncate is used here instead to drop pages from cache
*/
-void filter_invalidate_cache(struct obd_device *obd, struct obd_ioobj *obj,
- struct niobuf_remote *nb, struct inode *inode)
+void filter_release_cache(struct obd_device *obd, struct obd_ioobj *obj,
+ struct niobuf_remote *rnb, struct inode *inode)
{
- struct niobuf_remote *rnb;
int i;
LASSERT(inode != NULL);
-
- for (i = 0, rnb = nb; i < obj->ioo_bufcnt; i++, rnb++) {
- obd_off start = rnb->offset >> CFS_PAGE_SHIFT;
- obd_off end = (rnb->offset + rnb->len) >> CFS_PAGE_SHIFT;
- invalidate_mapping_pages(inode->i_mapping, start, end);
- /* just to avoid warnings */
- start = 0;
- end = 0;
+ for (i = 0; i < obj->ioo_bufcnt; i++, rnb++) {
+#ifdef HAVE_TRUNCATE_RANGE
+ /* remove pages in which range is fit */
+ truncate_inode_pages_range(inode->i_mapping,
+ rnb->offset & CFS_PAGE_MASK,
+ (rnb->offset + rnb->len - 1) |
+ ~CFS_PAGE_MASK);
+#else
+ /* use invalidate for old kernels */
+ invalidate_mapping_pages(inode->i_mapping,
+ rnb->offset >> CFS_PAGE_SHIFT,
+ (rnb->offset + rnb->len) >>
+ CFS_PAGE_SHIFT);
+#endif
}
-
}
static int filter_preprw_read(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *nb,
- int *pages, struct niobuf_local *res,
+ int *npages, struct niobuf_local *res,
struct obd_trans_info *oti,
struct lustre_capa *capa)
{
struct obd_device *obd = exp->exp_obd;
- struct filter_obd *fo = &obd->u.filter;
struct timeval start, end;
struct lvfs_run_ctxt saved;
struct niobuf_local *lnb;
int rc = 0, i, tot_bytes = 0;
unsigned long now = jiffies;
long timediff;
+ loff_t isize;
ENTRY;
/* We are currently not supporting multi-obj BRW_READ RPCS at all.
LASSERTF(objcount == 1, "%d\n", objcount);
LASSERTF(obj->ioo_bufcnt > 0, "%d\n", obj->ioo_bufcnt);
- rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+ rc = filter_auth_capa(exp, NULL, oa->o_gr, capa,
CAPA_OPC_OSS_READ);
if (rc)
RETURN(rc);
spin_lock(&obd->obd_osfs_lock);
filter_grant_incoming(exp, oa);
- oa->o_grant = 0;
+ if (!(oa->o_flags & OBD_FL_SHRINK_GRANT))
+ oa->o_grant = 0;
spin_unlock(&obd->obd_osfs_lock);
}
}
inode = dentry->d_inode;
+ isize = i_size_read(inode);
obdo_to_inode(inode, oa, OBD_MD_FLATIME);
- rc = filter_map_remote_to_local(objcount, obj, nb, pages, res);
+ rc = filter_map_remote_to_local(objcount, obj, nb, npages, res);
if (rc)
GOTO(cleanup, rc);
/* find pages for all segments, fill array with them */
do_gettimeofday(&start);
- for (i = 0, lnb = res; i < *pages; i++, lnb++) {
+ for (i = 0, lnb = res; i < *npages; i++, lnb++) {
lnb->dentry = dentry;
- if (i_size_read(inode) <= lnb->offset)
+ if (isize <= lnb->offset)
/* If there's no more data, abort early. lnb->rc == 0,
* so it's easy to detect later. */
break;
- lnb->page = filter_get_page(obd, inode, lnb->offset);
+ lnb->page = filter_get_page(obd, inode, lnb->offset, 0);
if (lnb->page == NULL)
GOTO(cleanup, rc = -ENOMEM);
lprocfs_counter_add(obd->obd_stats, LPROC_FILTER_CACHE_ACCESS, 1);
- if (i_size_read(inode) < lnb->offset + lnb->len - 1)
- lnb->rc = i_size_read(inode) - lnb->offset;
+ if (isize < lnb->offset + lnb->len - 1)
+ lnb->rc = isize - lnb->offset;
else
lnb->rc = lnb->len;
cleanup:
/* unlock pages to allow access from concurrent OST_READ */
- for (i = 0, lnb = res; i < *pages; i++, lnb++) {
+ for (i = 0, lnb = res; i < *npages; i++, lnb++) {
if (lnb->page) {
LASSERT(PageLocked(lnb->page));
unlock_page(lnb->page);
}
}
- if (inode && (fo->fo_read_cache == 0 ||
- i_size_read(inode) > fo->fo_readcache_max_filesize))
- filter_invalidate_cache(obd, obj, nb, inode);
-
if (rc != 0) {
if (dentry != NULL)
f_dput(dentry);
* right on through.
*
* Caller must hold obd_osfs_lock. */
-static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
- int objcount, struct fsfilt_objinfo *fso,
+static int filter_grant_check(struct obd_export *exp, struct obdo *oa,
+ int objcount, struct fsfilt_objinfo *fso,
int niocount, struct niobuf_local *lnb,
obd_size *left, struct inode *inode)
{
* bug) or ensure we get the page locks in an appropriate order. */
static int filter_preprw_write(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *nb, int *pages,
+ struct niobuf_remote *nb, int *npages,
struct niobuf_local *res,
struct obd_trans_info *oti,
struct lustre_capa *capa)
void *iobuf;
obd_size left;
unsigned long now = jiffies, timediff;
- int rc = 0, i, tot_bytes = 0, cleanup_phase = 0;
+ int rc = 0, i, tot_bytes = 0, cleanup_phase = 0, localreq = 0;
ENTRY;
LASSERT(objcount == 1);
LASSERT(obj->ioo_bufcnt > 0);
- rc = filter_auth_capa(exp, NULL, obdo_mdsno(oa), capa,
+ LASSERT(oa->o_valid & OBD_MD_FLGROUP);
+ rc = filter_auth_capa(exp, NULL, oa->o_gr, capa,
CAPA_OPC_OSS_WRITE);
if (rc)
RETURN(rc);
- push_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
- iobuf = filter_iobuf_get(&exp->exp_obd->u.filter, oti);
+ if (exp->exp_connection &&
+ exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)
+ localreq = 1;
+
+ push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
+ iobuf = filter_iobuf_get(&obd->u.filter, oti);
if (IS_ERR(iobuf))
GOTO(cleanup, rc = PTR_ERR(iobuf));
cleanup_phase = 1;
- dentry = filter_fid2dentry(exp->exp_obd, NULL, obj->ioo_gr,
+ dentry = filter_fid2dentry(obd, NULL, obj->ioo_gr,
obj->ioo_id);
if (IS_ERR(dentry))
GOTO(cleanup, rc = PTR_ERR(dentry));
if (dentry->d_inode == NULL) {
CERROR("%s: trying to BRW to non-existent file "LPU64"\n",
- exp->exp_obd->obd_name, obj->ioo_id);
+ obd->obd_name, obj->ioo_id);
GOTO(cleanup, rc = -ENOENT);
}
- rc = filter_map_remote_to_local(objcount, obj, nb, pages, res);
+ if (oa->o_valid & (OBD_MD_FLUID | OBD_MD_FLGID) &&
+ dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) {
+ rc = filter_capa_fixoa(exp, oa, oa->o_gr, capa);
+ if (rc)
+ GOTO(cleanup, rc);
+ }
+
+ rc = filter_map_remote_to_local(objcount, obj, nb, npages, res);
if (rc)
GOTO(cleanup, rc);
- fsfilt_check_slow(exp->exp_obd, now, "preprw_write setup");
+ fsfilt_check_slow(obd, now, "preprw_write setup");
/* Don't update inode timestamps if this write is older than a
* setattr which modifies the timestamps. b=10150 */
fmd = filter_fmd_find(exp, obj->ioo_id, obj->ioo_gr);
LASSERT(oa != NULL);
- spin_lock(&exp->exp_obd->obd_osfs_lock);
+ spin_lock(&obd->obd_osfs_lock);
filter_grant_incoming(exp, oa);
if (fmd && fmd->fmd_mactime_xid > oti->oti_xid)
oa->o_valid &= ~(OBD_MD_FLMTIME | OBD_MD_FLCTIME |
left = filter_grant_space_left(exp);
fso.fso_dentry = dentry;
- fso.fso_bufcnt = *pages;
+ fso.fso_bufcnt = *npages;
- rc = filter_grant_check(exp, oa, objcount, &fso, *pages, res,
+ rc = filter_grant_check(exp, oa, objcount, &fso, *npages, res,
&left, dentry->d_inode);
/* do not zero out oa->o_valid as it is used in filter_commitrw_write()
* for setting UID/GID and fid EA in first write time. */
if (oa->o_valid & OBD_MD_FLGRANT)
- oa->o_grant = filter_grant(exp,oa->o_grant,oa->o_undirty,left);
+ oa->o_grant = filter_grant(exp, oa->o_grant, oa->o_undirty,
+ left, 1);
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
+ spin_unlock(&obd->obd_osfs_lock);
filter_fmd_put(exp, fmd);
if (rc)
GOTO(cleanup, rc);
+ cleanup_phase = 4;
+
+ /* Filter truncate first locks i_mutex then partally truncated
+ * page, filter write code first locks pages then take
+ * i_mutex. To avoid a deadlock in case of concurrent
+ * punch/write requests from one client, filter writes and
+ * filter truncates are serialized by i_alloc_sem, allowing
+ * multiple writes or single truncate. */
+ down_read(&dentry->d_inode->i_alloc_sem);
do_gettimeofday(&start);
- for (i = 0, lnb = res; i < *pages; i++, lnb++) {
+ for (i = 0, lnb = res; i < *npages; i++, lnb++) {
/* We still set up for ungranted pages so that granted pages
* can be written to disk as they were promised, and portals
* needs to keep the pages all aligned properly. */
lnb->dentry = dentry;
- lnb->page = filter_get_page(obd, dentry->d_inode, lnb->offset);
+ lnb->page = filter_get_page(obd, dentry->d_inode, lnb->offset,
+ localreq);
if (lnb->page == NULL)
GOTO(cleanup, rc = -ENOMEM);
- cleanup_phase = 4;
/* DLM locking protects us from write and truncate competing
* for same region, but truncate can leave dirty page in the
* just wait for writeout completion, should be rare enough.
* -bzzz */
wait_on_page_writeback(lnb->page);
+ BUG_ON(PageWriteback(lnb->page));
/* If the filter writes a partial page, then has the file
* extended, the client will read in the whole page. the
LPU64" flg %x before EOF %llu\n",
lnb->len, lnb->offset,lnb->flags,
i_size_read(dentry->d_inode));
- filter_iobuf_add_page(exp->exp_obd, iobuf,
+ filter_iobuf_add_page(obd, iobuf,
dentry->d_inode,
lnb->page);
} else {
rc = filter_direct_io(OBD_BRW_READ, dentry, iobuf, exp,
NULL, NULL, NULL);
- fsfilt_check_slow(exp->exp_obd, now, "start_page_write");
+ fsfilt_check_slow(obd, now, "start_page_write");
if (exp->exp_nid_stats && exp->exp_nid_stats->nid_stats)
lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
switch(cleanup_phase) {
case 4:
if (rc) {
- for (i = 0, lnb = res; i < *pages; i++, lnb++) {
+ for (i = 0, lnb = res; i < *npages; i++, lnb++) {
if (lnb->page != NULL) {
unlock_page(lnb->page);
page_cache_release(lnb->page);
lnb->page = NULL;
}
}
+ up_read(&dentry->d_inode->i_alloc_sem);
}
case 3:
- filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
+ filter_iobuf_put(&obd->u.filter, iobuf, oti);
case 2:
- pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
if (rc)
f_dput(dentry);
break;
case 1:
- filter_iobuf_put(&exp->exp_obd->u.filter, iobuf, oti);
+ filter_iobuf_put(&obd->u.filter, iobuf, oti);
case 0:
- spin_lock(&exp->exp_obd->obd_osfs_lock);
+ spin_lock(&obd->obd_osfs_lock);
if (oa)
filter_grant_incoming(exp, oa);
- spin_unlock(&exp->exp_obd->obd_osfs_lock);
- pop_ctxt(&saved, &exp->exp_obd->obd_lvfs_ctxt, NULL);
+ spin_unlock(&obd->obd_osfs_lock);
+ pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
break;
default:;
}
int filter_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *nb, int *pages,
+ struct niobuf_remote *nb, int *npages,
struct niobuf_local *res, struct obd_trans_info *oti,
struct lustre_capa *capa)
{
if (cmd == OBD_BRW_WRITE)
return filter_preprw_write(cmd, exp, oa, objcount, obj,
- nb, pages, res, oti, capa);
+ nb, npages, res, oti, capa);
if (cmd == OBD_BRW_READ)
return filter_preprw_read(cmd, exp, oa, objcount, obj,
- nb, pages, res, oti, capa);
+ nb, npages, res, oti, capa);
LBUG();
return -EPROTO;
}
static int filter_commitrw_read(struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *rnb,
- int pages, struct niobuf_local *res,
+ int npages, struct niobuf_local *res,
struct obd_trans_info *oti, int rc)
{
+ struct filter_obd *fo = &exp->exp_obd->u.filter;
struct inode *inode = NULL;
struct ldlm_res_id res_id;
struct ldlm_resource *resource = NULL;
resource = ldlm_resource_get(ns, NULL, &res_id, LDLM_EXTENT, 0);
if (resource != NULL) {
- ns->ns_lvbo->lvbo_update(resource, NULL, 0, 1);
+ LDLM_RESOURCE_ADDREF(resource);
+ ns->ns_lvbo->lvbo_update(resource, NULL, 1);
+ LDLM_RESOURCE_DELREF(resource);
ldlm_resource_putref(resource);
}
}
if (res->dentry != NULL)
inode = res->dentry->d_inode;
- for (i = 0, lnb = res; i < pages; i++, lnb++) {
+ for (i = 0, lnb = res; i < npages; i++, lnb++) {
if (lnb->page != NULL) {
page_cache_release(lnb->page);
lnb->page = NULL;
}
}
+ if (inode && (fo->fo_read_cache == 0 ||
+ i_size_read(inode) > fo->fo_readcache_max_filesize))
+ filter_release_cache(exp->exp_obd, obj, rnb, inode);
if (res->dentry != NULL)
f_dput(res->dentry);
int filter_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *nb, int pages,
+ struct niobuf_remote *nb, int npages,
struct niobuf_local *res, struct obd_trans_info *oti,
int rc)
{
if (cmd == OBD_BRW_WRITE)
return filter_commitrw_write(exp, oa, objcount, obj,
- nb, pages, res, oti, rc);
+ nb, npages, res, oti, rc);
if (cmd == OBD_BRW_READ)
return filter_commitrw_read(exp, oa, objcount, obj,
- nb, pages, res, oti, rc);
+ nb, npages, res, oti, rc);
LBUG();
return -EPROTO;
}