*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/mdt/mdt_handler.c
*
lh->mlh_type = MDT_REG_LOCK;
}
+void mdt_lh_reg_init(struct mdt_lock_handle *lh, struct ldlm_lock *lock)
+{
+ mdt_lock_reg_init(lh, lock->l_req_mode);
+ if (lock->l_req_mode == LCK_GROUP)
+ lh->mlh_gid = lock->l_policy_data.l_inodebits.li_gid;
+}
+
void mdt_lock_pdo_init(struct mdt_lock_handle *lh, enum ldlm_mode lock_mode,
const struct lu_name *lname)
{
EXIT;
}
+/**
+ * Check whether \a o is directory stripe object.
+ *
+ * \param[in] info thread environment
+ * \param[in] o MDT object
+ *
+ * \retval 1 is directory stripe.
+ * \retval 0 isn't directory stripe.
+ * \retval < 1 error code
+ */
+static int mdt_is_dir_stripe(struct mdt_thread_info *info,
+ struct mdt_object *o)
+{
+ struct md_attr *ma = &info->mti_attr;
+ struct lmv_mds_md_v1 *lmv;
+ int rc;
+
+ rc = mdt_stripe_get(info, o, ma, XATTR_NAME_LMV);
+ if (rc < 0)
+ return rc;
+
+ if (!(ma->ma_valid & MA_LMV))
+ return 0;
+
+ lmv = &ma->ma_lmv->lmv_md_v1;
+
+ if (!lmv_is_sane2(lmv))
+ return -EBADF;
+
+ if (le32_to_cpu(lmv->lmv_magic) == LMV_MAGIC_STRIPE)
+ return 1;
+
+ return 0;
+}
+
static int mdt_lookup_fileset(struct mdt_thread_info *info, const char *fileset,
struct lu_fid *fid)
{
struct mdt_device *mdt = info->mti_mdt;
struct lu_name *lname = &info->mti_name;
+ const char *start = fileset;
char *filename = info->mti_filename;
struct mdt_object *parent;
u32 mode;
*/
*fid = mdt->mdt_md_root_fid;
- while (rc == 0 && fileset != NULL && *fileset != '\0') {
- const char *s1 = fileset;
+ while (rc == 0 && start != NULL && *start != '\0') {
+ const char *s1 = start;
const char *s2;
while (*++s1 == '/')
if (s2 == s1)
break;
- fileset = s2;
+ start = s2;
lname->ln_namelen = s2 - s1;
if (lname->ln_namelen > NAME_MAX) {
rc = PTR_ERR(parent);
else {
mode = lu_object_attr(&parent->mot_obj);
- mdt_object_put(info->mti_env, parent);
- if (!S_ISDIR(mode))
+ if (!S_ISDIR(mode)) {
rc = -ENOTDIR;
+ } else if (mdt_is_remote_object(info, parent, parent)) {
+ if (!mdt->mdt_enable_remote_subdir_mount) {
+ rc = -EREMOTE;
+ LCONSOLE_WARN("%s: subdir mount '%s' refused because 'enable_remote_subdir_mount=0': rc = %d\n",
+ mdt_obd_name(mdt),
+ fileset, rc);
+ } else {
+ LCONSOLE_INFO("%s: subdir mount '%s' is remote and may be slow\n",
+ mdt_obd_name(mdt),
+ fileset);
+ }
+ }
+ mdt_object_put(info->mti_env, parent);
}
}
struct mdt_body *reqbody = NULL;
struct mdt_statfs_cache *msf;
ktime_t kstart = ktime_get();
+ int current_blockbits;
int rc;
ENTRY;
spin_unlock(&mdt->mdt_lock);
}
+ /* tgd_blockbit is recordsize bits set during mkfs.
+ * This once set does not change. However, 'zfs set'
+ * can be used to change the MDT blocksize. Instead
+ * of using cached value of 'tgd_blockbit' always
+ * calculate the blocksize bits which may have
+ * changed.
+ */
+ current_blockbits = fls64(osfs->os_bsize) - 1;
+
/* at least try to account for cached pages. its still racy and
* might be under-reporting if clients haven't announced their
* caches with brw recently */
" pending %llu free %llu avail %llu\n",
tgd->tgd_tot_dirty, tgd->tgd_tot_granted,
tgd->tgd_tot_pending,
- osfs->os_bfree << tgd->tgd_blockbits,
- osfs->os_bavail << tgd->tgd_blockbits);
+ osfs->os_bfree << current_blockbits,
+ osfs->os_bavail << current_blockbits);
osfs->os_bavail -= min_t(u64, osfs->os_bavail,
((tgd->tgd_tot_dirty + tgd->tgd_tot_pending +
- osfs->os_bsize - 1) >> tgd->tgd_blockbits));
+ osfs->os_bsize - 1) >> current_blockbits));
tgt_grant_sanity_check(mdt->mdt_lu_dev.ld_obd, __func__);
CDEBUG(D_CACHE, "%llu blocks: %llu free, %llu avail; "
osfs->os_files, osfs->os_ffree, osfs->os_state);
if (!exp_grant_param_supp(tsi->tsi_exp) &&
- tgd->tgd_blockbits > COMPAT_BSIZE_SHIFT) {
+ current_blockbits > COMPAT_BSIZE_SHIFT) {
/* clients which don't support OBD_CONNECT_GRANT_PARAM
* should not see a block size > page size, otherwise
* cl_lost_grant goes mad. Therefore, we emulate a 4KB (=2^12)
* block size which is the biggest block size known to work
* with all client's page size. */
- osfs->os_blocks <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT;
- osfs->os_bfree <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT;
- osfs->os_bavail <<= tgd->tgd_blockbits - COMPAT_BSIZE_SHIFT;
+ osfs->os_blocks <<= current_blockbits - COMPAT_BSIZE_SHIFT;
+ osfs->os_bfree <<= current_blockbits - COMPAT_BSIZE_SHIFT;
+ osfs->os_bavail <<= current_blockbits - COMPAT_BSIZE_SHIFT;
osfs->os_bsize = 1 << COMPAT_BSIZE_SHIFT;
}
if (rc == 0)
if (buf->lb_len == 0)
RETURN(0);
+ LASSERT(!info->mti_big_acl_used);
again:
rc = mo_xattr_get(env, next, buf, XATTR_NAME_ACL_ACCESS);
if (rc < 0) {
rc = 0;
} else if (rc == -EOPNOTSUPP) {
rc = 0;
- } else {
- if (rc == -ERANGE &&
- exp_connect_large_acl(info->mti_exp) &&
- buf->lb_buf != info->mti_big_acl) {
+ } else if (rc == -ERANGE) {
+ if (exp_connect_large_acl(info->mti_exp) &&
+ !info->mti_big_acl_used) {
if (info->mti_big_acl == NULL) {
info->mti_big_aclsize =
min_t(unsigned int,
buf->lb_buf = info->mti_big_acl;
buf->lb_len = info->mti_big_aclsize;
-
+ info->mti_big_acl_used = 1;
goto again;
}
-
+ /* FS has ACL bigger that our limits */
+ CDEBUG(D_INODE, "%s: "DFID" ACL can't fit into %d\n",
+ mdt_obd_name(mdt), PFID(mdt_object_fid(o)),
+ info->mti_big_aclsize);
+ rc = -E2BIG;
+ } else {
CERROR("%s: unable to read "DFID" ACL: rc = %d\n",
mdt_obd_name(mdt), PFID(mdt_object_fid(o)), rc);
}
} else {
- int client;
- int server;
- int acl_buflen;
- int lmm_buflen = 0;
- int lmmsize = 0;
-
- acl_buflen = req_capsule_get_size(pill, &RMF_ACL, RCL_SERVER);
- if (acl_buflen >= rc)
- goto map;
-
- /* If LOV/LMA EA is small, we can reuse part of their buffer */
- client = ptlrpc_req_get_repsize(pill->rc_req);
- server = lustre_packed_msg_size(pill->rc_req->rq_repmsg);
- if (req_capsule_has_field(pill, &RMF_MDT_MD, RCL_SERVER)) {
- lmm_buflen = req_capsule_get_size(pill, &RMF_MDT_MD,
- RCL_SERVER);
- lmmsize = repbody->mbo_eadatasize;
- }
-
- if (client < server - acl_buflen - lmm_buflen + rc + lmmsize) {
- CDEBUG(D_INODE, "%s: client prepared buffer size %d "
- "is not big enough with the ACL size %d (%d)\n",
- mdt_obd_name(mdt), client, rc,
- server - acl_buflen - lmm_buflen + rc + lmmsize);
- repbody->mbo_aclsize = 0;
- repbody->mbo_valid &= ~OBD_MD_FLACL;
- RETURN(-ERANGE);
- }
-
-map:
- if (buf->lb_buf == info->mti_big_acl)
- info->mti_big_acl_used = 1;
-
rc = nodemap_map_acl(nodemap, buf->lb_buf,
rc, NODEMAP_FS_TO_CLIENT);
/* if all ACLs mapped out, rc is still >= 0 */
rc = mdt_attr_get_complex(info, o, ma);
if (unlikely(rc)) {
- CDEBUG(rc == -ENOENT ? D_OTHER : D_ERROR,
- "%s: getattr error for "DFID": rc = %d\n",
- mdt_obd_name(info->mti_mdt),
- PFID(mdt_object_fid(o)), rc);
+ CDEBUG_LIMIT(rc == -ENOENT ? D_OTHER : D_ERROR,
+ "%s: getattr error for "DFID": rc = %d\n",
+ mdt_obd_name(info->mti_mdt),
+ PFID(mdt_object_fid(o)), rc);
RETURN(rc);
}
if (mdt_body_has_lov(la, reqbody)) {
u32 stripe_count = 1;
+ bool fixed_layout = false;
if (ma->ma_valid & MA_LOV) {
LASSERT(ma->ma_lmm_size);
repbody->mbo_valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
stripe_count = le32_to_cpu(lmv->lmv_stripe_count);
+ fixed_layout = lmv_is_fixed(lmv);
if (magic == LMV_MAGIC_STRIPE && lmv_is_restriping(lmv))
mdt_restripe_migrate_add(info, o);
else if (magic == LMV_MAGIC_V1 &&
!fid_is_root(mdt_object_fid(o)) &&
mdt->mdt_enable_dir_auto_split &&
!o->mot_restriping &&
- stripe_count < atomic_read(&mdt->mdt_mds_mds_conns) + 1)
+ stripe_count < atomic_read(&mdt->mdt_mds_mds_conns) + 1 &&
+ !fixed_layout)
mdt_auto_split_add(info, o);
} else if (S_ISLNK(la->la_mode) &&
reqbody->mbo_valid & OBD_MD_LINKNAME) {
static int mdt_raw_lookup(struct mdt_thread_info *info,
struct mdt_object *parent,
- const struct lu_name *lname,
- struct ldlm_reply *ldlm_rep)
+ const struct lu_name *lname)
{
- struct lu_fid *child_fid = &info->mti_tmp_fid1;
- int rc;
+ struct lu_fid *fid = &info->mti_tmp_fid1;
+ struct mdt_body *repbody;
+ bool is_dotdot = false;
+ bool is_old_parent_stripe = false;
+ bool is_new_parent_checked = false;
+ int rc;
+
ENTRY;
LASSERT(!info->mti_cross_ref);
+ /* Always allow to lookup ".." */
+ if (lname->ln_namelen == 2 &&
+ lname->ln_name[0] == '.' && lname->ln_name[1] == '.') {
+ info->mti_spec.sp_permitted = 1;
+ is_dotdot = true;
+ if (mdt_is_dir_stripe(info, parent) == 1)
+ is_old_parent_stripe = true;
+ }
+ mdt_object_get(info->mti_env, parent);
+lookup:
/* Only got the fid of this obj by name */
- fid_zero(child_fid);
- rc = mdo_lookup(info->mti_env, mdt_object_child(info->mti_object),
- lname, child_fid, &info->mti_spec);
- if (rc == 0) {
- struct mdt_body *repbody;
+ fid_zero(fid);
+ rc = mdo_lookup(info->mti_env, mdt_object_child(parent), lname, fid,
+ &info->mti_spec);
+ mdt_object_put(info->mti_env, parent);
+ if (rc)
+ RETURN(rc);
+
+ /* getattr_name("..") should return master object FID for striped dir */
+ if (is_dotdot && (is_old_parent_stripe || !is_new_parent_checked)) {
+ parent = mdt_object_find(info->mti_env, info->mti_mdt, fid);
+ if (IS_ERR(parent))
+ RETURN(PTR_ERR(parent));
+
+ /* old client getattr_name("..") with stripe FID */
+ if (unlikely(is_old_parent_stripe)) {
+ is_old_parent_stripe = false;
+ goto lookup;
+ }
+
+ /* ".." may be a stripe */
+ if (unlikely(mdt_is_dir_stripe(info, parent) == 1)) {
+ is_new_parent_checked = true;
+ goto lookup;
+ }
- repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
- repbody->mbo_fid1 = *child_fid;
- repbody->mbo_valid = OBD_MD_FLID;
- mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_POS);
- } else if (rc == -ENOENT) {
- mdt_set_disposition(info, ldlm_rep, DISP_LOOKUP_NEG);
+ mdt_object_put(info->mti_env, parent);
}
+ repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
+ repbody->mbo_fid1 = *fid;
+ repbody->mbo_valid = OBD_MD_FLID;
+
RETURN(rc);
}
}
if (lu_name_is_valid(lname)) {
- /* Always allow to lookup ".." */
- if (unlikely(lname->ln_namelen == 2 &&
- lname->ln_name[0] == '.' &&
- lname->ln_name[1] == '.'))
- info->mti_spec.sp_permitted = 1;
-
if (info->mti_body->mbo_valid == OBD_MD_FLID) {
- rc = mdt_raw_lookup(info, parent, lname, ldlm_rep);
+ rc = mdt_raw_lookup(info, parent, lname);
RETURN(rc);
}
PLDLMRES(lock->l_resource),
PFID(mdt_object_fid(child)));
+ if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_PTLRPC_ENQ_RESEND))) {
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
+ OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_ENQ_RESEND,
+ req->rq_deadline -
+ req->rq_arrival_time.tv_sec +
+ cfs_fail_val ?: 3);
+ /* Put the lock to the waiting list and force the cancel */
+ ldlm_set_ast_sent(lock);
+ }
+
if (S_ISREG(lu_object_attr(&child->mot_obj)) &&
!mdt_object_remote(child) && child != parent) {
mdt_object_put(info->mti_env, child);
if (la->la_flags & LUSTRE_IMMUTABLE_FL)
rc = -EACCES;
- if (md_capable(uc, CFS_CAP_DAC_OVERRIDE))
+ if (cap_raised(uc->uc_cap, CAP_DAC_OVERRIDE))
RETURN(0);
if (uc->uc_fsuid == la->la_uid) {
if ((la->la_mode & S_IWUSR) == 0)
tgt_name(tsi->tsi_tgt), vallen);
RETURN(-EINVAL);
}
- if (ptlrpc_req_need_swab(req)) {
+ if (req_capsule_req_need_swab(&req->rq_pill)) {
__swab64s(&cs->cs_recno);
__swab32s(&cs->cs_id);
}
exp_max_brw_size(tsi->tsi_exp));
rdpg->rp_npages = (rdpg->rp_count + PAGE_SIZE - 1) >>
PAGE_SHIFT;
- OBD_ALLOC_PTR_ARRAY(rdpg->rp_pages, rdpg->rp_npages);
+ OBD_ALLOC_PTR_ARRAY_LARGE(rdpg->rp_pages, rdpg->rp_npages);
if (rdpg->rp_pages == NULL)
RETURN(-ENOMEM);
for (i = 0; i < rdpg->rp_npages; i++)
if (rdpg->rp_pages[i] != NULL)
__free_page(rdpg->rp_pages[i]);
- OBD_FREE_PTR_ARRAY(rdpg->rp_pages, rdpg->rp_npages);
+ OBD_FREE_PTR_ARRAY_LARGE(rdpg->rp_pages, rdpg->rp_npages);
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
RETURN(0);
case LUSTRE_Q_SETDEFAULT:
case LUSTRE_Q_SETQUOTAPOOL:
case LUSTRE_Q_SETINFOPOOL:
+ case LUSTRE_Q_SETDEFAULT_POOL:
if (!nodemap_can_setquota(nodemap))
GOTO(out_nodemap, rc = -EPERM);
/* fallthrough */
case LUSTRE_Q_GETDEFAULT:
case LUSTRE_Q_GETQUOTAPOOL:
case LUSTRE_Q_GETINFOPOOL:
+ case LUSTRE_Q_GETDEFAULT_POOL:
if (qmt == NULL)
GOTO(out_nodemap, rc = -EOPNOTSUPP);
/* slave quotactl */
case LUSTRE_Q_GETQUOTAPOOL:
case LUSTRE_Q_SETINFOPOOL:
case LUSTRE_Q_GETINFOPOOL:
+ case LUSTRE_Q_SETDEFAULT_POOL:
+ case LUSTRE_Q_GETDEFAULT_POOL:
/* forward quotactl request to QMT */
rc = qmt_hdls.qmth_quotactl(tsi->tsi_env, qmt, oqctl);
break;
policy->l_inodebits.bits = *ibits;
policy->l_inodebits.try_bits = trybits;
+ policy->l_inodebits.li_gid = lh->mlh_gid;
/*
* Use LDLM_FL_LOCAL_ONLY for this lock. We do not know yet if it is
} else {
rc = err_serious(-EFAULT);
}
+ } else if (ldesc->l_resource.lr_type == LDLM_IBITS &&
+ ldesc->l_policy_data.l_inodebits.bits == MDS_INODELOCK_DOM) {
+ struct ldlm_reply *rep;
+
+ /* No intent was provided but INTENT flag is set along with
+ * DOM bit, this is considered as GLIMPSE request.
+ * This logic is common for MDT and OST glimpse
+ */
+ mdt_ptlrpc_stats_update(req, IT_GLIMPSE);
+ rc = mdt_glimpse_enqueue(info, ns, lockp, flags);
+ /* Check whether the reply has been packed successfully. */
+ if (req->rq_repmsg != NULL) {
+ rep = req_capsule_server_get(info->mti_pill,
+ &RMF_DLM_REP);
+ rep->lock_policy_res2 =
+ ptlrpc_status_hton(rep->lock_policy_res2);
+ }
} else {
/* No intent was provided */
req_capsule_set_size(pill, &RMF_DLM_LVB, RCL_SERVER, 0);
OST_PUNCH, mdt_punch_hdl,
mdt_hp_punch),
TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SYNC, mdt_data_sync),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, OST_FALLOCATE,
+ mdt_fallocate_hdl),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SEEK, tgt_lseek),
};
static struct tgt_handler mdt_sec_ctx_ops[] = {
m->mdt_skip_lfsck = 1;
}
- /* DoM files get IO lock at open optionally by default */
- m->mdt_opts.mo_dom_lock = ALWAYS_DOM_LOCK_ON_OPEN;
+ /* Just try to get a DoM lock by default. Otherwise, having a group
+ * lock granted, it may get blocked for a long time. */
+ m->mdt_opts.mo_dom_lock = TRYLOCK_DOM_ON_OPEN;
/* DoM files are read at open and data is packed in the reply */
m->mdt_opts.mo_dom_read_open = 1;
m->mdt_enable_chprojid_gid = 0;
m->mdt_enable_remote_rename = 1;
m->mdt_dir_restripe_nsonly = 1;
+ m->mdt_enable_remote_subdir_mount = 1;
atomic_set(&m->mdt_mds_mds_conns, 0);
atomic_set(&m->mdt_async_commit_count, 0);
LDLM_NAMESPACE_SERVER,
LDLM_NAMESPACE_GREEDY,
LDLM_NS_TYPE_MDT);
- if (m->mdt_namespace == NULL)
- GOTO(err_fini_seq, rc = -ENOMEM);
+ if (IS_ERR(m->mdt_namespace)) {
+ rc = PTR_ERR(m->mdt_namespace);
+ CERROR("%s: unable to create server namespace: rc = %d\n",
+ obd->obd_name, rc);
+ m->mdt_namespace = NULL;
+ GOTO(err_fini_seq, rc);
+ }
m->mdt_namespace->ns_lvbp = m;
m->mdt_namespace->ns_lvbo = &mdt_lvbo;
if (OCD_HAS_FLAG(data, CKSUM)) {
__u32 cksum_types = data->ocd_cksum_types;
- /* The client set in ocd_cksum_types the checksum types it
- * supports. We have to mask off the algorithms that we don't
- * support */
- data->ocd_cksum_types &=
- obd_cksum_types_supported_server(obd_name);
+ tgt_mask_cksum_types(&mdt->mdt_lut, &data->ocd_cksum_types);
if (unlikely(data->ocd_cksum_types == 0)) {
CERROR("%s: Connect with checksum support but no "
mdt_enable_slc(mdt);
}
+ if (!mdt->mdt_lut.lut_dt_conf.ddp_has_lseek_data_hole)
+ data->ocd_connect_flags2 &= ~OBD_CONNECT2_LSEEK;
+
return 0;
}
rc = mdt_ctxt_add_dirty_flag(&env, info, mfd);
/* Don't unlink orphan on failover umount, LU-184 */
- if (exp->exp_flags & OBD_OPT_FAILOVER) {
+ if (exp->exp_flags & OBD_OPT_FAILOVER ||
+ exp->exp_obd->obd_stopping) {
ma->ma_valid = MA_FLAGS;
ma->ma_attr_flags |= MDS_KEEP_ORPHAN;
}
struct getinfo_fid2path *fp,
struct lu_fid *root_fid)
{
- struct mdt_device *mdt = info->mti_mdt;
- struct mdt_object *mdt_obj;
- struct link_ea_header *leh;
- struct link_ea_entry *lee;
- struct lu_name *tmpname = &info->mti_name;
- struct lu_fid *tmpfid = &info->mti_tmp_fid1;
- struct lu_buf *buf = &info->mti_big_buf;
- char *ptr;
- int reclen;
- struct linkea_data ldata = { NULL };
- int rc = 0;
- bool first = true;
+ struct mdt_device *mdt = info->mti_mdt;
+ struct lu_name *tmpname = &info->mti_name;
+ struct lu_fid *tmpfid = &info->mti_tmp_fid1;
+ struct lu_buf *buf = &info->mti_big_buf;
+ struct linkea_data ldata = { NULL };
+ bool first = true;
+ struct mdt_object *mdt_obj;
+ struct link_ea_header *leh;
+ struct link_ea_entry *lee;
+ char *ptr;
+ int reclen;
+ int rc = 0;
+
ENTRY;
/* temp buffer for path element, the buffer will be finally freed
*tmpfid = fp->gf_fid = *mdt_object_fid(obj);
while (!lu_fid_eq(root_fid, &fp->gf_fid)) {
- struct lu_buf lmv_buf;
-
if (!lu_fid_eq(root_fid, &mdt->mdt_md_root_fid) &&
lu_fid_eq(&mdt->mdt_md_root_fid, &fp->gf_fid))
GOTO(out, rc = -ENOENT);
fp->gf_linkno++;
}
- lmv_buf.lb_buf = info->mti_xattr_buf;
- lmv_buf.lb_len = sizeof(info->mti_xattr_buf);
/* Check if it is slave stripes */
- rc = mo_xattr_get(info->mti_env, mdt_object_child(mdt_obj),
- &lmv_buf, XATTR_NAME_LMV);
+ rc = mdt_is_dir_stripe(info, mdt_obj);
mdt_object_put(info->mti_env, mdt_obj);
- if (rc > 0) {
- union lmv_mds_md *lmm = lmv_buf.lb_buf;
-
- /* For slave stripes, get its master */
- if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE) {
- fp->gf_fid = *tmpfid;
- continue;
- }
- } else if (rc < 0 && rc != -ENODATA) {
+ if (rc < 0)
GOTO(out, rc);
+ if (rc == 1) {
+ fp->gf_fid = *tmpfid;
+ continue;
}
/* Pack the name in the end of the buffer */
fpin = key + cfs_size_round(sizeof(KEY_FID2PATH));
fpout = val;
- if (ptlrpc_req_need_swab(info->mti_pill->rc_req))
+ if (req_capsule_req_need_swab(info->mti_pill))
lustre_swab_fid2path(fpin);
memcpy(fpout, fpin, sizeof(*fpin));
sizeof(struct lu_fid)) {
/* client sent its root FID, which is normally fileset FID */
root_fid = fpin->gf_u.gf_root_fid;
- if (ptlrpc_req_need_swab(info->mti_pill->rc_req))
+ if (req_capsule_req_need_swab(info->mti_pill))
lustre_swab_lu_fid(root_fid);
if (root_fid != NULL && !fid_is_sane(root_fid))
if (rc == 0)
rc = dt_ro(&env, dt);
break;
- case OBD_IOC_ABORT_RECOVERY:
+ case OBD_IOC_ABORT_RECOVERY: {
+ struct obd_ioctl_data *data = karg;
+
CERROR("%s: Aborting recovery for device\n", mdt_obd_name(mdt));
- obd->obd_abort_recovery = 1;
- target_stop_recovery_thread(obd);
+ if (data->ioc_type & OBD_FLG_ABORT_RECOV_MDT) {
+ obd->obd_abort_recov_mdt = 1;
+ wake_up(&obd->obd_next_transno_waitq);
+ } else { /* if (data->ioc_type & OBD_FLG_ABORT_RECOV_OST) */
+ /* lctl didn't set OBD_FLG_ABORT_RECOV_OST < 2.13.57 */
+ obd->obd_abort_recovery = 1;
+ target_stop_recovery_thread(obd);
+ }
rc = 0;
break;
+ }
case OBD_IOC_CHANGELOG_REG:
case OBD_IOC_CHANGELOG_DEREG:
case OBD_IOC_CHANGELOG_CLEAR:
return mdt->mdt_opts.mo_cos != 0;
}
-static struct lu_device_type_operations mdt_device_type_ops = {
- .ldto_device_alloc = mdt_device_alloc,
- .ldto_device_free = mdt_device_free,
- .ldto_device_fini = mdt_device_fini
+static const struct lu_device_type_operations mdt_device_type_ops = {
+ .ldto_device_alloc = mdt_device_alloc,
+ .ldto_device_free = mdt_device_free,
+ .ldto_device_fini = mdt_device_fini
};
static struct lu_device_type mdt_device_type = {
if (rc)
GOTO(lu_fini, rc);
- rc = class_register_type(&mdt_obd_device_ops, NULL, true, NULL,
+ rc = class_register_type(&mdt_obd_device_ops, NULL, true,
LUSTRE_MDT_NAME, &mdt_device_type);
if (rc)
GOTO(mds_fini, rc);