Current read-on-open implementation does allocate cl_page after data
are piggied back by open request, which is expensive and not
necessary.
This patch improves the case by just adding the pages into page cache.
As long as those pages will be discarded at lock revocation, there
should be no concerns.
Signed-off-by: Jinshan Xiong <jinshan.xiong@uber.com>
Change-Id: Idef1b70483e3780790ba5b95c26ef2d4141add5f
Reviewed-on: https://review.whamcloud.com/33234
Tested-by: Jenkins
Reviewed-by: Mike Pershin <mpershin@whamcloud.com>
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
struct page *vmpage;
struct niobuf_remote *rnb;
char *data;
struct page *vmpage;
struct niobuf_remote *rnb;
char *data;
- struct lu_env *env;
- struct cl_io *io;
- __u16 refcheck;
struct lustre_handle lockh;
struct ldlm_lock *lock;
unsigned long index, start;
struct niobuf_local lnb;
struct lustre_handle lockh;
struct ldlm_lock *lock;
unsigned long index, start;
struct niobuf_local lnb;
bool dom_lock = false;
ENTRY;
bool dom_lock = false;
ENTRY;
dom_lock = ldlm_has_dom(lock);
LDLM_LOCK_PUT(lock);
}
dom_lock = ldlm_has_dom(lock);
LDLM_LOCK_PUT(lock);
}
if (!dom_lock)
RETURN_EXIT;
if (!dom_lock)
RETURN_EXIT;
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- RETURN_EXIT;
-
if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
RCL_SERVER))
if (!req_capsule_has_field(&req->rq_pill, &RMF_NIOBUF_INLINE,
RCL_SERVER))
- GOTO(out_env, rc = -ENODATA);
rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
rnb = req_capsule_server_get(&req->rq_pill, &RMF_NIOBUF_INLINE);
- data = (char *)rnb + sizeof(*rnb);
-
if (rnb == NULL || rnb->rnb_len == 0)
if (rnb == NULL || rnb->rnb_len == 0)
CDEBUG(D_INFO, "Get data buffer along with open, len %i, i_size %llu\n",
rnb->rnb_len, i_size_read(inode));
CDEBUG(D_INFO, "Get data buffer along with open, len %i, i_size %llu\n",
rnb->rnb_len, i_size_read(inode));
- io = vvp_env_thread_io(env);
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- rc = cl_io_init(env, io, CIT_MISC, obj);
- if (rc)
- GOTO(out_io, rc);
+ data = (char *)rnb + sizeof(*rnb);
lnb.lnb_file_offset = rnb->rnb_offset;
start = lnb.lnb_file_offset / PAGE_SIZE;
lnb.lnb_file_offset = rnb->rnb_offset;
start = lnb.lnb_file_offset / PAGE_SIZE;
LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
lnb.lnb_page_offset = 0;
do {
LASSERT(lnb.lnb_file_offset % PAGE_SIZE == 0);
lnb.lnb_page_offset = 0;
do {
lnb.lnb_data = data + (index << PAGE_SHIFT);
lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
if (lnb.lnb_len > PAGE_SIZE)
lnb.lnb_data = data + (index << PAGE_SHIFT);
lnb.lnb_len = rnb->rnb_len - (index << PAGE_SHIFT);
if (lnb.lnb_len > PAGE_SIZE)
PTR_ERR(vmpage));
break;
}
PTR_ERR(vmpage));
break;
}
- lock_page(vmpage);
- if (vmpage->mapping == NULL) {
- unlock_page(vmpage);
- put_page(vmpage);
- /* page was truncated */
- GOTO(out_io, rc = -ENODATA);
- }
- clp = cl_page_find(env, obj, vmpage->index, vmpage,
- CPT_CACHEABLE);
- if (IS_ERR(clp)) {
- unlock_page(vmpage);
- put_page(vmpage);
- GOTO(out_io, rc = PTR_ERR(clp));
- }
-
- /* export page */
- cl_page_export(env, clp, 1);
- cl_page_put(env, clp);
- unlock_page(vmpage);
put_page(vmpage);
index++;
} while (rnb->rnb_len > (index << PAGE_SHIFT));
put_page(vmpage);
index++;
} while (rnb->rnb_len > (index << PAGE_SHIFT));
-out_io:
- cl_io_fini(env, io);
-out_env:
- cl_env_put(env, &refcheck);
}
static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
}
static int ll_intent_file_open(struct dentry *de, void *lmm, int lmmsize,
+ if (!lli->lli_clob) {
+ /* due to DoM read on open, there may exist pages for Lustre
+ * regular file even though cl_object is not set up yet. */
+ truncate_inode_pages(inode->i_mapping, 0);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
env = cl_env_get(&refcheck);
if (IS_ERR(env))