#endif
#include <lustre_acl.h>
+#include <lustre_ioctl.h>
#include <obd_class.h>
#include <lustre_lmv.h>
#include <lustre_fid.h>
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
int rc;
- /* mdc_enter_request() ensures that this client has no more
+ /* obd_get_request_slot() ensures that this client has no more
* than cl_max_rpcs_in_flight RPCs simultaneously inf light
* against an MDT. */
- rc = mdc_enter_request(cli);
+ rc = obd_get_request_slot(cli);
if (rc != 0)
return rc;
rc = ptlrpc_queue_wait(req);
- mdc_exit_request(cli);
+ obd_put_request_slot(cli);
return rc;
}
/* Flush local XATTR locks to get rid of a possible cancel RPC */
if (opcode == MDS_REINT && fid_is_sane(fid) &&
exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
- CFS_LIST_HEAD(cancels);
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
int count;
/* Without that packing would fail */
sizeof(struct mdt_rec_reint));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->sx_opcode = REINT_SETXATTR;
- rec->sx_fsuid = current_fsuid();
- rec->sx_fsgid = current_fsgid();
+ rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->sx_cap = cfs_curproc_cap_pack();
rec->sx_suppgid1 = suppgid;
rec->sx_suppgid2 = -1;
req_fmt = &RQF_MDS_RELEASE_CLOSE;
/* allocate a FID for volatile file */
- rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
+ rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc < 0) {
CERROR("%s: "DFID" failed to allocate FID: %d\n",
obd->obd_name, PFID(&op_data->op_fid1), rc);
LASSERTF(*start <= *hash, "start = "LPX64
",end = "LPX64",hash = "LPX64"\n",
*start, *end, *hash);
- CDEBUG(D_VFSTRACE, "page%lu [%llu %llu], hash"LPU64"\n",
- offset, *start, *end, *hash);
+ CDEBUG(D_VFSTRACE, "offset %lx ["LPX64" "LPX64"],"
+ " hash "LPX64"\n", offset, *start, *end, *hash);
if (*hash > *end) {
kunmap(page);
mdc_release_page(page, 0);
{
struct lookup_intent it = { .it_op = IT_READDIR };
struct page *page;
- struct inode *dir = NULL;
+ struct inode *dir = op_data->op_data;
struct address_space *mapping;
struct lu_dirpage *dp;
__u64 start = 0;
*ppage = NULL;
- if (op_data->op_mea1 != NULL) {
- __u32 index = op_data->op_stripe_offset;
-
- dir = op_data->op_mea1->lsm_md_oinfo[index].lmo_root;
- } else {
- dir = op_data->op_data;
- }
LASSERT(dir != NULL);
-
mapping = dir->i_mapping;
- rc = mdc_intent_lock(exp, op_data, NULL, 0, &it, 0, &enq_req,
+ rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
cb_op->md_blocking_ast, 0);
if (enq_req != NULL)
ptlrpc_req_finished(enq_req);
struct lu_dirpage *dp;
struct lu_dirent *ent;
int rc = 0;
+ __u32 same_hash_count;
+ __u64 hash_offset = op_data->op_hash_offset;
ENTRY;
- CDEBUG(D_INFO, DFID "offset = "LPU64"\n", PFID(&op_data->op_fid1),
- op_data->op_hash_offset);
+ CDEBUG(D_INFO, DFID " offset = "LPU64", flags %#x\n",
+ PFID(&op_data->op_fid1), op_data->op_hash_offset,
+ op_data->op_cli_flags);
*ppage = NULL;
*entp = NULL;
if (rc != 0)
RETURN(rc);
+ /* same_hash_count means how many entries with this
+ * hash value has been read */
+ same_hash_count = op_data->op_same_hash_offset + 1;
dp = page_address(page);
for (ent = lu_dirent_start(dp); ent != NULL;
ent = lu_dirent_next(ent)) {
if (le16_to_cpu(ent->lde_namelen) == 0)
continue;
- if (le64_to_cpu(ent->lde_hash) > op_data->op_hash_offset)
- break;
+ if (le64_to_cpu(ent->lde_hash) <
+ op_data->op_hash_offset)
+ continue;
+
+ if (unlikely(le64_to_cpu(ent->lde_hash) ==
+ op_data->op_hash_offset)) {
+ /* If it is not for next entry, which usually from
+ * ll_dir_entry_start, return this entry. */
+ if (!(op_data->op_cli_flags & CLI_NEXT_ENTRY))
+ break;
+
+ /* Keep reading until all of entries being read are
+ * skipped. */
+ if (same_hash_count > 0) {
+ same_hash_count--;
+ continue;
+ }
+ }
+ break;
}
/* If it can not find entry in current page, try next page. */
if (ent == NULL) {
- __u64 orig_offset = op_data->op_hash_offset;
-
if (le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF) {
- mdc_release_page(page, 0);
+ op_data->op_same_hash_offset = 0;
+ mdc_release_page(page,
+ le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
RETURN(0);
}
dp = page_address(page);
ent = lu_dirent_start(dp);
}
+ }
- op_data->op_hash_offset = orig_offset;
+ /* If the next hash is the same as the current hash, increase
+ * the op_same_hash_offset to resolve the same hash conflict */
+ if (ent != NULL && op_data->op_cli_flags & CLI_NEXT_ENTRY) {
+ if (unlikely(le64_to_cpu(ent->lde_hash) == hash_offset))
+ op_data->op_same_hash_offset++;
+ else
+ op_data->op_same_hash_offset = 0;
}
*ppage = page;
*entp = ent;
-
RETURN(rc);
}
static int mdc_ioc_changelog_send(struct obd_device *obd,
struct ioc_changelog *icc)
{
- struct changelog_show *cs;
- int rc;
+ struct changelog_show *cs;
+ struct task_struct *task;
+ int rc;
/* Freed in mdc_changelog_send_thread */
OBD_ALLOC_PTR(cs);
* New thread because we should return to user app before
* writing into our pipe
*/
- rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
- "mdc_clg_send_thread"));
- if (!IS_ERR_VALUE(rc)) {
- CDEBUG(D_CHANGELOG, "start changelog thread\n");
- return 0;
+ task = kthread_run(mdc_changelog_send_thread, cs,
+ "mdc_clg_send_thread");
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("%s: cannot start changelog thread: rc = %d\n",
+ obd->obd_name, rc);
+ OBD_FREE_PTR(cs);
+ } else {
+ rc = 0;
+ CDEBUG(D_CHANGELOG, "%s: started changelog thread\n",
+ obd->obd_name);
}
- CERROR("Failed to start changelog thread: %d\n", rc);
- OBD_FREE_PTR(cs);
- return rc;
+ return rc;
}
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
static int mdc_ioc_swap_layouts(struct obd_export *exp,
struct md_op_data *op_data)
{
- CFS_LIST_HEAD(cancels);
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
struct ptlrpc_request *req;
int rc, count;
struct mdc_swap_layouts *msl, *payload;
RETURN(rc);
}
-int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
- struct md_op_data *op_data)
+int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
+ struct lu_fid *fid, struct md_op_data *op_data)
{
- struct client_obd *cli = &exp->exp_obd->u.cli;
- struct lu_client_seq *seq = cli->cl_seq;
- ENTRY;
- RETURN(seq_client_alloc_fid(NULL, seq, fid));
+ struct client_obd *cli = &exp->exp_obd->u.cli;
+ struct lu_client_seq *seq = cli->cl_seq;
+ ENTRY;
+ RETURN(seq_client_alloc_fid(env, seq, fid));
}
struct obd_uuid *mdc_get_uuid(struct obd_export *exp) {
}
struct ldlm_valblock_ops inode_lvbo = {
- lvbo_free: mdc_resource_inode_free
+ .lvbo_free = mdc_resource_inode_free
};
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
int __init mdc_init(void)
{
- return class_register_type(&mdc_obd_ops, &mdc_md_ops, NULL,
+ return class_register_type(&mdc_obd_ops, &mdc_md_ops, true, NULL,
#ifndef HAVE_ONLY_PROCFS_SEQ
- NULL,
+ NULL,
#endif
- LUSTRE_MDC_NAME, NULL);
+ LUSTRE_MDC_NAME, NULL);
}
#ifdef __KERNEL__