/* Operation type */
__u32 op_opc;
+
+ /* Used by readdir */
+ __u32 op_npages;
+ __u64 op_offset;
};
struct md_enqueue_info;
struct md_open_data **mod);
int (*m_sync)(struct obd_export *, const struct lu_fid *,
struct obd_capa *, struct ptlrpc_request **);
- int (*m_readpage)(struct obd_export *, const struct lu_fid *,
- struct obd_capa *, __u64, struct page **,
- unsigned, struct ptlrpc_request **);
+ int (*m_readpage)(struct obd_export *, struct md_op_data *,
+ struct page **, struct ptlrpc_request **);
int (*m_unlink)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
RETURN(rc);
}
-static inline int md_readpage(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u64 offset,
- struct page **pages, unsigned npages,
+static inline int md_readpage(struct obd_export *exp, struct md_op_data *opdata,
+ struct page **pages,
struct ptlrpc_request **request)
{
int rc;
ENTRY;
EXP_CHECK_MD_OP(exp, readpage);
EXP_MD_COUNTER_INCREMENT(exp, readpage);
- rc = MDP(exp->exp_obd, readpage)(exp, fid, oc, offset, pages, npages,
- request);
+ rc = MDP(exp->exp_obd, readpage)(exp, opdata, pages, request);
RETURN(rc);
}
struct lookup_intent it = { .it_op = IT_READDIR };
struct md_op_data op_data = {{ 0 }};
ldlm_policy_data_t policy = { .l_inodebits = { MDS_INODELOCK_UPDATE } };
- __u64 offset;
int rc = 0;
ENTRY;
+ llu_prep_md_op_data(&op_data, inode, NULL, NULL, 0, 0, LUSTRE_OPC_ANY);
rc = md_lock_match(sbi->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
&lli->lli_fid, LDLM_IBITS, &policy, LCK_CR, &lockh);
if (!rc) {
llu_md_blocking_ast, ldlm_completion_ast, NULL, NULL,
inode};
- llu_prep_md_op_data(&op_data, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY);
-
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it,
&op_data, &lockh, NULL, 0, NULL,
LDLM_FL_CANCEL_ON_BLOCK);
}
ldlm_lock_dump_handle(D_OTHER, &lockh);
- offset = (__u64)hash_x_index(page->index, 0);
- rc = md_readpage(sbi->ll_md_exp, &lli->lli_fid, NULL,
- offset, &page, 1, &request);
+ op_data.op_offset = (__u64)hash_x_index(page->index, 0);
+ op_data.op_npages = 1;
+ rc = md_readpage(sbi->ll_md_exp, &op_data, &page, &request);
if (!rc) {
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL); /* checked by md_readpage() */
op_data->op_data = NULL;
}
-void llu_finish_md_op_data(struct md_op_data *op_data)
-{
- OBD_FREE_PTR(op_data);
-}
-
void obdo_refresh_inode(struct inode *dst,
struct obdo *src,
obd_flag valid)
void llu_prep_md_op_data(struct md_op_data *op_data, struct inode *i1,
struct inode *i2, const char *name, int namelen,
int mode, __u32 opc);
-void llu_finish_md_op_data(struct md_op_data *op_data);
int llu_create(struct inode *dir, struct pnode_base *pnode, int mode);
int llu_local_open(struct llu_inode_info *lli, struct lookup_intent *it);
int llu_iop_open(struct pnode *pnode, int flags, mode_t mode);
struct obd_export *exp = ll_i2sbi(inode)->ll_md_exp;
struct ptlrpc_request *request;
struct mdt_body *body;
- struct obd_capa *oc;
+ struct md_op_data *op_data;
__u64 hash;
struct page **page_pool;
struct page *page;
page_pool[npages] = page;
}
- oc = ll_mdscapa_get(inode);
- rc = md_readpage(exp, ll_inode2fid(inode), oc, hash, page_pool, npages,
- &request);
- capa_put(oc);
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ op_data->op_npages = npages;
+ op_data->op_offset = hash;
+ rc = md_readpage(exp, op_data, page_pool, &request);
+ ll_finish_md_op_data(op_data);
if (rc == 0) {
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
/* Checked by mdc_readpage() */
return id ^ (id >> 32);
}
-static int lmv_readpage(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u64 offset64,
- struct page **pages, unsigned npages,
- struct ptlrpc_request **request)
+static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- struct lu_fid rid = *fid;
struct lmv_object *obj;
- __u64 offset;
+ struct lu_fid rid = op_data->op_fid1;
+ __u64 offset = op_data->op_offset;
__u64 hash_adj = 0;
__u32 rank = 0;
__u64 seg_size = 0;
struct lu_dirent *ent;
ENTRY;
- offset = offset64;
-
rc = lmv_check_connect(obd);
if (rc)
RETURN(rc);
* [R*MAX_HASH/N ... (R + 1)*MAX_HASH/N] there for we do hash_adj
* on hash values that we get.
*/
- obj = lmv_object_find_lock(obd, fid);
+ obj = lmv_object_find_lock(obd, &rid);
if (obj) {
nr = obj->lo_objcount;
LASSERT(nr > 0);
do_div(seg_size, nr);
los = obj->lo_stripes;
tgt = lmv_get_target(lmv, los[0].ls_mds);
- rank = lmv_node_rank(tgt->ltd_exp, fid) % nr;
+ rank = lmv_node_rank(tgt->ltd_exp, &rid) % nr;
tgt_tmp = offset;
do_div(tgt_tmp, seg_size);
tgt0_idx = do_div(tgt_tmp, nr);
if (IS_ERR(tgt))
GOTO(cleanup, rc = PTR_ERR(tgt));
- rc = md_readpage(tgt->ltd_exp, &rid, oc, offset, pages, npages,
- request);
+ op_data->op_fid1 = rid;
+ rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
if (rc)
GOTO(cleanup, rc);
>> CFS_PAGE_SHIFT;
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
- LASSERT(nrdpgs > 0 && nrdpgs <= npages);
+ LASSERT(nrdpgs > 0 && nrdpgs <= op_data->op_npages);
- CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs, npages);
+ CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs,
+ op_data->op_npages);
for (i = 0; i < nrdpgs; i++) {
#if CFS_PAGE_SIZE > LU_PAGE_SIZE
if (obj) {
lmv_hash_adjust(&dp->ldp_hash_start, hash_adj);
lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
- LASSERT(le64_to_cpu(dp->ldp_hash_start) <= offset64);
+ LASSERT(le64_to_cpu(dp->ldp_hash_start) <=
+ op_data->op_offset);
if ((tgt0_idx != nr - 1) &&
(le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF))
EXPORT_SYMBOL(mdc_sendpage);
#endif
-int mdc_readpage(struct obd_export *exp, const struct lu_fid *fid,
- struct obd_capa *oc, __u64 offset, struct page **pages,
- unsigned npages, struct ptlrpc_request **request)
+int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
+ struct page **pages, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
if (req == NULL)
RETURN(-ENOMEM);
- mdc_set_capa_size(req, &RMF_CAPA1, oc);
+ mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
if (rc) {
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
- desc = ptlrpc_prep_bulk_imp(req, npages, BULK_PUT_SINK,
+ desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, BULK_PUT_SINK,
MDS_BULK_PORTAL);
if (desc == NULL) {
ptlrpc_request_free(req);
}
/* NB req now owns desc and will free it when it gets freed */
- for (i = 0; i < npages; i++)
+ for (i = 0; i < op_data->op_npages; i++)
ptlrpc_prep_bulk_page(desc, pages[i], 0, CFS_PAGE_SIZE);
- mdc_readdir_pack(req, offset, CFS_PAGE_SIZE * npages, fid, oc);
+ mdc_readdir_pack(req, op_data->op_offset,
+ CFS_PAGE_SIZE * op_data->op_npages,
+ &op_data->op_fid1, op_data->op_capa1);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
req->rq_bulk->bd_nob_transferred,
- CFS_PAGE_SIZE * npages);
+ CFS_PAGE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
RETURN(-EPROTO);
}