* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
/*
* Initialized in mdt_mod_init().
*/
-unsigned long mdt_num_threads;
+static unsigned long mdt_num_threads;
+static unsigned long mdt_min_threads;
+static unsigned long mdt_max_threads;
/* ptlrpc request handler for MDT. All handlers are
* grouped into several slices - struct mdt_opc_slice,
RETURN(rc);
}
-void mdt_pack_size2body(struct mdt_thread_info *info, struct mdt_object *o)
+/**
+ * Pack SOM attributes into the reply.
+ * Call under a DLM UPDATE lock.
+ */
+static void mdt_pack_size2body(struct mdt_thread_info *info,
+ struct mdt_object *mo)
{
struct mdt_body *b;
- struct lu_attr *attr = &info->mti_attr.ma_attr;
+ struct md_attr *ma = &info->mti_attr;
+ LASSERT(ma->ma_attr.la_valid & LA_MODE);
b = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
- /* Check if Size-on-MDS is enabled. */
- if ((mdt_conn_flags(info) & OBD_CONNECT_SOM) &&
- S_ISREG(attr->la_mode) && mdt_sizeonmds_enabled(o)) {
- b->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
- b->size = attr->la_size;
- b->blocks = attr->la_blocks;
- }
+ /* Check if Size-on-MDS is supported, if this is a regular file,
+ * if SOM is enabled on the object and if SOM cache exists and valid.
+ * Otherwise do not pack Size-on-MDS attributes to the reply. */
+ if (!(mdt_conn_flags(info) & OBD_CONNECT_SOM) ||
+ !S_ISREG(ma->ma_attr.la_mode) ||
+ !mdt_object_is_som_enabled(mo) ||
+ !(ma->ma_valid & MA_SOM))
+ return;
+
+ b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+ b->size = ma->ma_som->msd_size;
+ b->blocks = ma->ma_som->msd_blocks;
}
void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
const struct lu_attr *attr, const struct lu_fid *fid)
{
- /*XXX should pack the reply body according to lu_valid*/
- b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID |
- OBD_MD_FLGID | OBD_MD_FLTYPE |
- OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
- OBD_MD_FLATIME | OBD_MD_FLMTIME ;
+ struct md_attr *ma = &info->mti_attr;
- if (!S_ISREG(attr->la_mode))
- b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
+ LASSERT(ma->ma_valid & MA_INODE);
b->atime = attr->la_atime;
b->mtime = attr->la_mtime;
b->nlink = attr->la_nlink;
b->rdev = attr->la_rdev;
+ /*XXX should pack the reply body according to lu_valid*/
+ b->valid |= OBD_MD_FLCTIME | OBD_MD_FLUID |
+ OBD_MD_FLGID | OBD_MD_FLTYPE |
+ OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLFLAGS |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME ;
+
+ if (!S_ISREG(attr->la_mode)) {
+ b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLRDEV;
+ } else if (ma->ma_need & MA_LOV && ma->ma_lmm_size == 0) {
+ /* means no objects are allocated on osts. */
+ LASSERT(!(ma->ma_valid & MA_LOV));
+ /* just ignore blocks occupied by extend attributes on MDS */
+ b->blocks = 0;
+ /* if no object is allocated on osts, the size on mds is valid. b=22272 */
+ b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
+ }
+
if (fid) {
b->fid1 = *fid;
b->valid |= OBD_MD_FLID;
if (info)
mdt_body_reverse_idmap(info, b);
+
+ if (b->valid & OBD_MD_FLSIZE)
+ CDEBUG(D_VFSTRACE, DFID": returning size %llu\n",
+ PFID(fid), b->size);
}
static inline int mdt_body_has_lov(const struct lu_attr *la,
}
static int mdt_getattr_internal(struct mdt_thread_info *info,
- struct mdt_object *o)
+ struct mdt_object *o, int ma_need)
{
struct md_object *next = mdt_object_child(o);
const struct mdt_body *reqbody = info->mti_body;
/* get default stripe info for this dir. */
ma->ma_need |= MA_LOV_DEF;
}
+ ma->ma_need |= ma_need;
+ if (ma->ma_need & MA_SOM)
+ ma->ma_som = &info->mti_u.som.data;
+
rc = mo_attr_get(env, next, ma);
if (unlikely(rc)) {
CERROR("getattr error for "DFID": %d\n",
repbody->eadatasize = ma->ma_lmv_size;
repbody->valid |= (OBD_MD_FLDIREA|OBD_MD_MEA);
}
- if (!(ma->ma_valid & MA_LOV) && !(ma->ma_valid & MA_LMV)) {
- repbody->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
- }
} else if (S_ISLNK(la->la_mode) &&
reqbody->valid & OBD_MD_LINKNAME) {
buffer->lb_buf = ma->ma_lmm;
CERROR("readlink failed: %d\n", rc);
rc = -EFAULT;
} else {
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
+ rc -= 2;
repbody->valid |= OBD_MD_LINKNAME;
repbody->eadatasize = rc;
/* NULL terminate */
* remote obj, and at that time no capability is available.
*/
mdt_set_capainfo(info, 1, &reqbody->fid1, BYPASS_CAPA);
- rc = mdt_getattr_internal(info, obj);
+ rc = mdt_getattr_internal(info, obj, 0);
if (reqbody->valid & OBD_MD_FLRMTPERM)
mdt_exit_ucred(info);
EXIT;
struct ldlm_lock *lock;
struct ldlm_res_id *res_id;
int is_resent;
+ int ma_need = 0;
int rc;
ENTRY;
/* Finally, we can get attr for child. */
mdt_set_capainfo(info, 0, mdt_object_fid(child),
BYPASS_CAPA);
- rc = mdt_getattr_internal(info, child);
+ rc = mdt_getattr_internal(info, child, 0);
if (unlikely(rc != 0))
mdt_object_unlock(info, child, lhc, 1);
}
relock:
ma = &info->mti_attr;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
mdt_lock_handle_init(lhc);
mdt_lock_reg_init(lhc, LCK_PR);
GOTO(out_child, rc);
}
+ lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
+ /* Get MA_SOM attributes if update lock is given. */
+ if (lock &&
+ lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_UPDATE &&
+ S_ISREG(lu_object_attr(&mdt_object_child(child)->mo_lu)))
+ ma_need = MA_SOM;
+
/* finally, we can get attr for child. */
mdt_set_capainfo(info, 1, child_fid, BYPASS_CAPA);
- rc = mdt_getattr_internal(info, child);
+ rc = mdt_getattr_internal(info, child, ma_need);
if (unlikely(rc != 0)) {
mdt_object_unlock(info, child, lhc, 1);
- } else {
- lock = ldlm_handle2lock(&lhc->mlh_reg_lh);
- if (lock) {
- struct mdt_body *repbody;
+ } else if (lock) {
+ /* Debugging code. */
+ res_id = &lock->l_resource->lr_name;
+ LDLM_DEBUG(lock, "Returning lock to client");
+ LASSERTF(fid_res_name_eq(mdt_object_fid(child),
+ &lock->l_resource->lr_name),
+ "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
+ (unsigned long)res_id->name[0],
+ (unsigned long)res_id->name[1],
+ (unsigned long)res_id->name[2],
+ PFID(mdt_object_fid(child)));
+ mdt_pack_size2body(info, child);
+ }
+ if (lock)
+ LDLM_LOCK_PUT(lock);
- /* Debugging code. */
- res_id = &lock->l_resource->lr_name;
- LDLM_DEBUG(lock, "Returning lock to client\n");
- LASSERTF(fid_res_name_eq(mdt_object_fid(child),
- &lock->l_resource->lr_name),
- "Lock res_id: %lu/%lu/%lu, Fid: "DFID".\n",
- (unsigned long)res_id->name[0],
- (unsigned long)res_id->name[1],
- (unsigned long)res_id->name[2],
- PFID(mdt_object_fid(child)));
- /*
- * Pack Size-on-MDS inode attributes to the body if
- * update lock is given.
- */
- repbody = req_capsule_server_get(info->mti_pill,
- &RMF_MDT_BODY);
- if (lock->l_policy_data.l_inodebits.bits &
- MDS_INODELOCK_UPDATE)
- mdt_pack_size2body(info, child);
- LDLM_LOCK_PUT(lock);
- }
- }
EXIT;
out_child:
mdt_object_put(info->mti_env, child);
vallen = req_capsule_get_size(info->mti_pill, &RMF_SETINFO_VAL,
RCL_CLIENT);
+ /* Swab any part of val you need to here */
if (KEY_IS(KEY_READ_ONLY)) {
req->rq_status = 0;
lustre_msg_set_status(req->rq_repmsg, 0);
- spin_lock(&req->rq_export->exp_lock);
+ cfs_spin_lock(&req->rq_export->exp_lock);
if (*(__u32 *)val)
req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
else
req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
- spin_unlock(&req->rq_export->exp_lock);
+ cfs_spin_unlock(&req->rq_export->exp_lock);
} else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
struct changelog_setinfo *cs =
CERROR("Bad changelog_clear setinfo size %d\n", vallen);
RETURN(-EINVAL);
}
- if (lustre_msg_swabbed(req->rq_reqmsg)) {
+ if (ptlrpc_req_need_swab(req)) {
__swab64s(&cs->cs_recno);
__swab32s(&cs->cs_id);
}
struct lu_rdpg *rdpg)
{
struct ptlrpc_request *req = mdt_info_req(info);
+ struct obd_export *exp = req->rq_export;
struct ptlrpc_bulk_desc *desc;
struct l_wait_info *lwi = &info->mti_u.rdpg.mti_wait_info;
int tmpcount;
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
GOTO(abort_bulk, rc = 0);
- timeout = (int) req->rq_deadline - cfs_time_current_sec();
- if (timeout < 0)
- CERROR("Req deadline already passed %lu (now: %lu)\n",
- req->rq_deadline, cfs_time_current_sec());
- *lwi = LWI_TIMEOUT(cfs_time_seconds(max(timeout, 1)), NULL, NULL);
- rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), lwi);
- LASSERT (rc == 0 || rc == -ETIMEDOUT);
+ do {
+ timeout = (int) req->rq_deadline - cfs_time_current_sec();
+ if (timeout < 0)
+ CERROR("Req deadline already passed %lu (now: %lu)\n",
+ req->rq_deadline, cfs_time_current_sec());
+ *lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(max(timeout, 1)),
+ cfs_time_seconds(1), NULL, NULL);
+ rc = l_wait_event(desc->bd_waitq,
+ !ptlrpc_server_bulk_active(desc) ||
+ exp->exp_failed ||
+ exp->exp_abort_active_req, lwi);
+ LASSERT (rc == 0 || rc == -ETIMEDOUT);
+ } while ((rc == -ETIMEDOUT) &&
+ (req->rq_deadline > cfs_time_current_sec()));
if (rc == 0) {
if (desc->bd_success &&
desc->bd_nob_transferred == rdpg->rp_count)
GOTO(free_desc, rc);
- rc = -ETIMEDOUT; /* XXX should this be a different errno? */
+ rc = -ETIMEDOUT;
+ if (exp->exp_abort_active_req || exp->exp_failed)
+ GOTO(abort_bulk, rc);
}
DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s",
(rc == -ETIMEDOUT) ? "timeout" : "network error",
desc->bd_nob_transferred, rdpg->rp_count,
- req->rq_export->exp_client_uuid.uuid,
- req->rq_export->exp_connection->c_remote_uuid.uuid);
+ exp->exp_client_uuid.uuid,
+ exp->exp_connection->c_remote_uuid.uuid);
- class_fail_export(req->rq_export);
+ class_fail_export(exp);
EXIT;
abort_bulk:
else
rc = ptlrpc_start_bulk_transfer (desc);
if (rc == 0) {
- *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * HZ / 4, HZ,
+ *lwi = LWI_TIMEOUT_INTERVAL(obd_timeout * CFS_HZ / 4, CFS_HZ,
mdt_bulk_timeout, desc);
rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc) ||
desc->bd_export->exp_failed, lwi);
{
struct req_capsule *pill = info->mti_pill;
struct mdt_device *mdt = info->mti_mdt;
+ struct md_quota *mq = md_quota(info->mti_env);
struct mdt_body *repbody;
int rc = 0;
ENTRY;
rc = lustre_msg_get_status(mdt_info_req(info)->rq_repmsg);
GOTO(out_ucred, rc);
}
+ mq->mq_exp = info->mti_exp;
rc = mdt_reint_rec(info, lhc);
EXIT;
out_ucred:
struct obd_quotactl *oqctl;
struct req_capsule *pill = info->mti_pill;
struct obd_export *exp = info->mti_exp;
+ struct md_quota *mq = md_quota(info->mti_env);
struct md_device *next = info->mti_mdt->mdt_child;
int rc;
ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_MDS_QUOTACHECK_NET))
- RETURN(0);
-
oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
if (oqctl == NULL)
RETURN(-EPROTO);
if (rc)
RETURN(rc);
- rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next, exp,
+ mq->mq_exp = exp;
+ rc = next->md_ops->mdo_quota.mqo_check(info->mti_env, next,
oqctl->qc_type);
RETURN(rc);
}
struct obd_quotactl *oqctl, *repoqc;
struct req_capsule *pill = info->mti_pill;
struct obd_export *exp = info->mti_exp;
+ struct md_quota *mq = md_quota(info->mti_env);
struct md_device *next = info->mti_mdt->mdt_child;
const struct md_quota_operations *mqo = &next->md_ops->mdo_quota;
int id, rc;
ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_MDS_QUOTACTL_NET))
- RETURN(0);
-
oqctl = req_capsule_client_get(pill, &RMF_OBD_QUOTACTL);
if (oqctl == NULL)
RETURN(-EPROTO);
repoqc = req_capsule_server_get(pill, &RMF_OBD_QUOTACTL);
LASSERT(repoqc != NULL);
+ mq->mq_exp = exp;
switch (oqctl->qc_cmd) {
case Q_QUOTAON:
- if (info->mti_mdt->mdt_som_conf) {
- /* Quota cannot be used together with SOM while
- * SOM stored blocks in i_blocks but not in SOM EA. */
- LCONSOLE_ERROR("Fail to turn Quota on: SOM is enabled "
- "and temporary conflicts with quota.\n");
- RETURN(-ENOTSUPP);
- }
rc = mqo->mqo_on(info->mti_env, next, oqctl->qc_type);
break;
case Q_QUOTAOFF:
static int mdt_enqueue(struct mdt_thread_info *info)
{
struct ptlrpc_request *req;
- __u64 req_bits;
int rc;
/*
LASSERT(info->mti_dlm_req != NULL);
req = mdt_info_req(info);
-
- /*
- * Lock without inodebits makes no sense and will oops later in
- * ldlm. Let's check it now to see if we have wrong lock from client or
- * bits get corrupted somewhere in mdt_intent_policy().
- */
- req_bits = info->mti_dlm_req->lock_desc.l_policy_data.l_inodebits.bits;
- /* This is disabled because we need to support liblustre flock.
- * LASSERT(req_bits != 0);
- */
-
rc = ldlm_handle_enqueue0(info->mti_mdt->mdt_namespace,
req, info->mti_dlm_req, &cbs);
info->mti_fail_id = OBD_FAIL_LDLM_REPLY;
res_id, LDLM_FL_LOCAL_ONLY | LDLM_FL_ATOMIC_CB,
&info->mti_exp->exp_handle.h_cookie);
if (rc)
- GOTO(out, rc);
-
-out:
- if (rc)
mdt_object_unlock(info, o, lh, 1);
-
+ else if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_MDS_PDO_LOCK)) &&
+ lh->mlh_pdo_hash != 0 &&
+ (lh->mlh_reg_mode == LCK_PW || lh->mlh_reg_mode == LCK_EX)) {
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_PDO_LOCK, 10);
+ }
RETURN(rc);
}
*
* \param info thread info object
* \param o mdt object
- * \param h mdt lock handle referencing regular and PDO locks
+ * \param lh mdt lock handle referencing regular and PDO locks
* \param decref force immediate lock releasing
*/
void mdt_object_unlock(struct mdt_thread_info *info, struct mdt_object *o,
* Checking for various OBD_FAIL_$PREF_$OPC_NET codes. _Do_ not try
* to put same checks into handlers like mdt_close(), mdt_reint(),
* etc., without talking to mdt authors first. Checking same thing
- * there again is useless and returning 0 error wihtout packing reply
+ * there again is useless and returning 0 error without packing reply
* is buggy! Handlers either pack reply or return error.
*
* We return 0 here and do not send any reply in order to emulate
dlm_req = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
if (dlm_req != NULL) {
- if (info->mti_mdt->mdt_opts.mo_compat_resname)
- rc = mdt_lock_resname_compat(info->mti_mdt,
- dlm_req);
- info->mti_dlm_req = dlm_req;
+ if (unlikely(dlm_req->lock_desc.l_resource.lr_type ==
+ LDLM_IBITS &&
+ dlm_req->lock_desc.l_policy_data.\
+ l_inodebits.bits == 0)) {
+ /*
+ * Lock without inodebits makes no sense and
+ * will oops later in ldlm. If client miss to
+ * set such bits, do not trigger ASSERTION.
+ *
+ * For liblustre flock case, it maybe zero.
+ */
+ rc = -EPROTO;
+ } else {
+ if (info->mti_mdt->mdt_opts.mo_compat_resname)
+ rc = mdt_lock_resname_compat(
+ info->mti_mdt,
+ dlm_req);
+ info->mti_dlm_req = dlm_req;
+ }
} else {
rc = -EFAULT;
}
if (likely(rc == 0 && req->rq_export && h->mh_opc != MDS_DISCONNECT))
target_committed_to_req(req);
- if (unlikely((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) &&
+ if (unlikely(req_is_replay(req) &&
lustre_msg_get_transno(req->rq_reqmsg) == 0)) {
DEBUG_REQ(D_ERROR, req, "transno is 0 during REPLAY");
LBUG();
info->mti_fail_id = OBD_FAIL_MDS_ALL_REPLY_NET;
info->mti_transno = lustre_msg_get_transno(req->rq_reqmsg);
- info->mti_mos[0] = NULL;
- info->mti_mos[1] = NULL;
- info->mti_mos[2] = NULL;
- info->mti_mos[3] = NULL;
+ info->mti_mos = NULL;
memset(&info->mti_attr, 0, sizeof(info->mti_attr));
info->mti_body = NULL;
static int mdt_recovery(struct mdt_thread_info *info)
{
struct ptlrpc_request *req = mdt_info_req(info);
- int recovering;
struct obd_device *obd;
ENTRY;
}
}
- if (unlikely(req->rq_export == NULL)) {
+ if (unlikely(!class_connected_export(req->rq_export))) {
CERROR("operation %d on unconnected MDS from %s\n",
lustre_msg_get_opc(req->rq_reqmsg),
libcfs_id2str(req->rq_peer));
obd = req->rq_export->exp_obd;
/* Check for aborted recovery... */
- spin_lock_bh(&obd->obd_processing_task_lock);
- recovering = obd->obd_recovering;
- spin_unlock_bh(&obd->obd_processing_task_lock);
- if (unlikely(recovering)) {
+ if (unlikely(obd->obd_recovering)) {
int rc;
int should_process;
DEBUG_REQ(D_INFO, req, "Got new replay");
if (likely(rc == 0)) {
rc = mdt_recovery(info);
if (likely(rc == +1)) {
- switch (lustre_msg_get_opc(msg)) {
- case MDS_READPAGE:
- req->rq_bulk_read = 1;
- break;
- case MDS_WRITEPAGE:
- req->rq_bulk_write = 1;
- break;
- }
-
h = mdt_handler_find(lustre_msg_get_opc(msg),
supported);
if (likely(h != NULL)) {
new_lock->l_writers--;
}
- new_lock->l_export = class_export_get(req->rq_export);
- atomic_inc(&lock->l_export->exp_locks_count);
-
+ new_lock->l_export = class_export_lock_get(req->rq_export, new_lock);
new_lock->l_blocking_ast = lock->l_blocking_ast;
new_lock->l_completion_ast = lock->l_completion_ast;
new_lock->l_remote_handle = lock->l_remote_handle;
unlock_res_and_lock(new_lock);
- lustre_hash_add(new_lock->l_export->exp_lock_hash,
- &new_lock->l_remote_handle,
- &new_lock->l_exp_hash);
+ cfs_hash_add(new_lock->l_export->exp_lock_hash,
+ &new_lock->l_remote_handle,
+ &new_lock->l_exp_hash);
LDLM_LOCK_RELEASE(new_lock);
lh->mlh_reg_lh.cookie = 0;
dlmreq = req_capsule_client_get(info->mti_pill, &RMF_DLM_REQ);
remote_hdl = dlmreq->lock_handle[0];
- lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
+ lock = cfs_hash_lookup(exp->exp_lock_hash, &remote_hdl);
if (lock) {
if (lock != new_lock) {
lh->mlh_reg_lh.cookie = lock->l_handle.h_cookie;
lh->mlh_reg_lh.cookie);
if (old_lock)
*old_lock = LDLM_LOCK_GET(lock);
- lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
+ cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
return;
}
- lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
+ cfs_hash_put(exp->exp_lock_hash, &lock->l_exp_hash);
}
/*
* checked here.
*/
if (lustre_handle_is_used(&lhc->mlh_reg_lh)) {
+ LASSERTF(rc == 0, "Error occurred but lock handle "
+ "is still in use\n");
rep->lock_policy_res2 = 0;
rc = mdt_intent_lock_replace(info, lockp, NULL, lhc, flags);
RETURN(rc);
req_capsule_extend(pill, &RQF_LDLM_INTENT);
it = req_capsule_client_get(pill, &RMF_LDLM_INTENT);
if (it != NULL) {
- const struct ldlm_request *dlmreq;
- __u64 req_bits;
-
rc = mdt_intent_opc(it->opc, info, lockp, flags);
if (rc == 0)
rc = ELDLM_OK;
- /*
- * Lock without inodebits makes no sense and will oops
+ /* Lock without inodebits makes no sense and will oops
* later in ldlm. Let's check it now to see if we have
- * wrong lock from client or bits get corrupted
- * somewhere in mdt_intent_opc().
- */
- dlmreq = info->mti_dlm_req;
- req_bits = dlmreq->lock_desc.l_policy_data.l_inodebits.bits;
- LASSERT(req_bits != 0);
-
+ * ibits corrupted somewhere in mdt_intent_opc().
+ * The case for client miss to set ibits has been
+ * processed by others. */
+ LASSERT(ergo(info->mti_dlm_req->lock_desc.l_resource.\
+ lr_type == LDLM_IBITS,
+ info->mti_dlm_req->lock_desc.\
+ l_policy_data.l_inodebits.bits != 0));
} else
rc = err_serious(-EFAULT);
} else {
RETURN(rc);
}
-/*
- * Seq wrappers
- */
-static void mdt_seq_adjust(const struct lu_env *env,
- struct mdt_device *m, int lost)
-{
- struct md_site *ms = mdt_md_site(m);
- struct lu_seq_range out;
- ENTRY;
-
- LASSERT(ms && ms->ms_server_seq);
- LASSERT(lost >= 0);
- /* get extra seq from seq_server, moving it's range up */
- while (lost-- > 0) {
- seq_server_alloc_meta(ms->ms_server_seq, NULL, &out, env);
- }
- EXIT;
-}
-
static int mdt_seq_fini(const struct lu_env *env,
struct mdt_device *m)
{
* We'd like to have a mechanism to set this on a per-device
* basis, but alas...
*/
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD
};
.psc_req_portal = MDS_READPAGE_PORTAL,
.psc_rep_portal = MDC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD
};
m->mdt_readpage_service =
.psc_req_portal = MDS_SETATTR_PORTAL,
.psc_rep_portal = MDC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD
};
.psc_req_portal = SEQ_CONTROLLER_PORTAL,
.psc_rep_portal = MDC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = SEQ_NUM_THREADS,
- .psc_max_threads = SEQ_NUM_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
.psc_req_portal = SEQ_METADATA_PORTAL,
.psc_rep_portal = MDC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = SEQ_NUM_THREADS,
- .psc_max_threads = SEQ_NUM_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
.psc_req_portal = SEQ_DATA_PORTAL,
.psc_rep_portal = OSC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = SEQ_NUM_THREADS,
- .psc_max_threads = SEQ_NUM_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD|LCT_DT_THREAD
};
.psc_req_portal = FLD_REQUEST_PORTAL,
.psc_rep_portal = MDC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = FLD_NUM_THREADS,
- .psc_max_threads = FLD_NUM_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_DT_THREAD|LCT_MD_THREAD
};
.psc_req_portal = MDS_MDS_PORTAL,
.psc_rep_portal = MDC_REPLY_PORTAL,
.psc_watchdog_factor = MDT_SERVICE_WATCHDOG_FACTOR,
- .psc_min_threads = min(max(mdt_num_threads, MDT_MIN_THREADS),
- MDT_MAX_THREADS),
- .psc_max_threads = MDT_MAX_THREADS,
+ .psc_min_threads = mdt_min_threads,
+ .psc_max_threads = mdt_max_threads,
.psc_ctx_tags = LCT_MD_THREAD
};
m->mdt_xmds_service =
procfs_entry, target_print_req,"mdt_xmds");
if (m->mdt_xmds_service == NULL) {
- CERROR("failed to start readpage service\n");
+ CERROR("failed to start xmds service\n");
GOTO(err_mdt_svc, rc = -ENOMEM);
}
m->mdt_identity_cache = NULL;
if (m->mdt_namespace != NULL) {
- ldlm_namespace_free(m->mdt_namespace, NULL, d->ld_obd->obd_force);
+ ldlm_namespace_free(m->mdt_namespace, NULL,
+ d->ld_obd->obd_force);
d->ld_obd->obd_namespace = m->mdt_namespace = NULL;
}
*/
mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
- mdt_procfs_fini(m);
- if (obd->obd_proc_exports_entry) {
- lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
- obd->obd_proc_exports_entry = NULL;
- }
lprocfs_free_per_client_stats(obd);
lprocfs_free_obd_stats(obd);
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
+ mdt_procfs_fini(m);
if (ls) {
struct md_site *mite;
OBD_FREE_PTR(mite);
d->ld_site = NULL;
}
- LASSERT(atomic_read(&d->ld_ref) == 0);
+ LASSERT(cfs_atomic_read(&d->ld_ref) == 0);
EXIT;
}
sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
- write_lock(&m->mdt_sptlrpc_lock);
+ cfs_write_lock(&m->mdt_sptlrpc_lock);
sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
m->mdt_sptlrpc_rset = tmp_rset;
- write_unlock(&m->mdt_sptlrpc_lock);
+ cfs_write_unlock(&m->mdt_sptlrpc_lock);
return 0;
}
LCONSOLE_INFO("Disabling ACL\n");
}
+ if (!*p)
+ break;
+
options = ++p;
}
}
static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
- struct lprocfs_static_vars lvars;
struct mdt_thread_info *info;
struct obd_device *obd;
const char *dev = lustre_cfg_string(cfg, 0);
obd = class_name2obd(dev);
LASSERT(obd != NULL);
- spin_lock_init(&m->mdt_transno_lock);
-
m->mdt_max_mdsize = MAX_MD_SIZE;
m->mdt_max_cookiesize = sizeof(struct llog_cookie);
m->mdt_som_conf = 0;
CERROR("CMD Operation not allowed in IOP mode\n");
GOTO(err_lmi, rc = -EINVAL);
}
+ /* Read recovery timeouts */
+ if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_soft)
+ obd->obd_recovery_timeout =
+ lsi->lsi_lmd->lmd_recovery_time_soft;
+
+ if (lsi->lsi_lmd && lsi->lsi_lmd->lmd_recovery_time_hard)
+ obd->obd_recovery_time_hard =
+ lsi->lsi_lmd->lmd_recovery_time_hard;
}
- rwlock_init(&m->mdt_sptlrpc_lock);
+ cfs_rwlock_init(&m->mdt_sptlrpc_lock);
sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
- spin_lock_init(&m->mdt_ioepoch_lock);
+ cfs_spin_lock_init(&m->mdt_ioepoch_lock);
m->mdt_opts.mo_compat_resname = 0;
m->mdt_capa_timeout = CAPA_TIMEOUT;
m->mdt_capa_alg = CAPA_HMAC_ALG_SHA1;
CFS_INIT_LIST_HEAD(&m->mdt_nosquash_nids);
m->mdt_nosquash_str = NULL;
m->mdt_nosquash_strlen = 0;
- init_rwsem(&m->mdt_squash_sem);
-
- spin_lock_init(&m->mdt_client_bitmap_lock);
+ cfs_init_rwsem(&m->mdt_squash_sem);
OBD_ALLOC_PTR(mite);
if (mite == NULL)
GOTO(err_free_site, rc);
}
- lprocfs_mdt_init_vars(&lvars);
- rc = lprocfs_obd_setup(obd, lvars.obd_vars);
- if (rc) {
- CERROR("Can't init lprocfs, rc %d\n", rc);
- GOTO(err_fini_site, rc);
- }
- ptlrpc_lprocfs_register_obd(obd);
-
rc = mdt_procfs_init(m, dev);
if (rc) {
CERROR("Can't init MDT lprocfs, rc %d\n", rc);
GOTO(err_fini_proc, rc);
}
- obd->obd_proc_exports_entry = proc_mkdir("exports",
- obd->obd_proc_entry);
- if (obd->obd_proc_exports_entry)
- lprocfs_add_simple(obd->obd_proc_exports_entry,
- "clear", lprocfs_nid_stats_clear_read,
- lprocfs_nid_stats_clear_write, obd, NULL);
-
/* set server index */
lu_site2md(s)->ms_node_id = node_id;
mdt_stack_fini(env, m, md2lu_dev(m->mdt_child));
err_fini_proc:
mdt_procfs_fini(m);
- if (obd->obd_proc_exports_entry) {
- lprocfs_remove_proc_entry("clear", obd->obd_proc_exports_entry);
- obd->obd_proc_exports_entry = NULL;
- }
- ptlrpc_lprocfs_unregister_obd(obd);
- lprocfs_obd_cleanup(obd);
-err_fini_site:
lu_site_fini(s);
err_free_site:
OBD_FREE_PTR(mite);
lu_object_init(o, h, d);
lu_object_add_top(h, o);
o->lo_ops = &mdt_obj_ops;
+ cfs_sema_init(&mo->mot_ioepoch_sem, 1);
RETURN(o);
} else
RETURN(NULL);
lu_printer_t p, const struct lu_object *o)
{
struct mdt_object *mdto = mdt_obj((struct lu_object *)o);
- return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p(ioepoch=%llu "
- "flags=%llx, epochcount=%d, writecount=%d)",
+ return (*p)(env, cookie, LUSTRE_MDT_NAME"-object@%p(ioepoch="LPU64" "
+ "flags="LPX64", epochcount=%d, writecount=%d)",
mdto, mdto->mot_ioepoch, mdto->mot_flags,
- mdto->mot_epochcount, mdto->mot_writecount);
+ mdto->mot_ioepoch_count, mdto->mot_writecount);
}
static const struct lu_device_operations mdt_lu_ops = {
if (!mdt->mdt_som_conf)
data->ocd_connect_flags &= ~OBD_CONNECT_SOM;
-
- spin_lock(&exp->exp_lock);
+
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connect_flags = data->ocd_connect_flags;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
data->ocd_version = LUSTRE_VERSION_CODE;
exp->exp_mdt_data.med_ibits_known = data->ocd_ibits_known;
}
return -EBADE;
}
- if (mdt->mdt_som_conf &&
- !(exp->exp_connect_flags & OBD_CONNECT_MDS_MDS) &&
- !(exp->exp_connect_flags & OBD_CONNECT_SOM)) {
+ if (mdt->mdt_som_conf && !exp_connect_som(exp) &&
+ !(exp->exp_connect_flags & OBD_CONNECT_MDS_MDS)) {
CWARN("%s: MDS has SOM enabled, but client does not support "
"it\n", mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name);
return -EBADE;
int rc = 0;
if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- read_lock(&mdt->mdt_sptlrpc_lock);
+ cfs_read_lock(&mdt->mdt_sptlrpc_lock);
sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
req->rq_sp_from,
req->rq_peer.nid,
&flvr);
- read_unlock(&mdt->mdt_sptlrpc_lock);
+ cfs_read_unlock(&mdt->mdt_sptlrpc_lock);
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
void *localdata)
{
struct mdt_thread_info *info;
- struct lsd_client_data *lcd;
struct obd_export *lexp;
struct lustre_handle conn = { 0 };
struct mdt_device *mdt;
rc = mdt_connect_internal(lexp, mdt, data);
if (rc == 0) {
- OBD_ALLOC_PTR(lcd);
- if (lcd != NULL) {
- struct mdt_thread_info *mti;
- mti = lu_context_key_get(&env->le_ctx,
- &mdt_thread_key);
- LASSERT(mti != NULL);
- mti->mti_exp = lexp;
- memcpy(lcd->lcd_uuid, cluuid, sizeof lcd->lcd_uuid);
- lexp->exp_mdt_data.med_lcd = lcd;
- rc = mdt_client_new(env, mdt);
- if (rc != 0) {
- OBD_FREE_PTR(lcd);
- lexp->exp_mdt_data.med_lcd = NULL;
- } else {
- mdt_export_stats_init(obd, lexp, localdata);
- }
- } else
- rc = -ENOMEM;
+ struct mdt_thread_info *mti;
+ struct lsd_client_data *lcd = lexp->exp_target_data.ted_lcd;
+ LASSERT(lcd);
+ mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
+ LASSERT(mti != NULL);
+ mti->mti_exp = lexp;
+ memcpy(lcd->lcd_uuid, cluuid, sizeof lcd->lcd_uuid);
+ rc = mdt_client_new(env, mdt);
+ if (rc == 0)
+ mdt_export_stats_init(obd, lexp, 0, localdata);
}
out:
- if (rc != 0)
+ if (rc != 0) {
class_disconnect(lexp);
- else
+ *exp = NULL;
+ } else {
*exp = lexp;
+ }
RETURN(rc);
}
rc = mdt_connect_internal(exp, mdt_dev(obd->obd_lu_dev), data);
if (rc == 0)
- mdt_export_stats_init(obd, exp, localdata);
+ mdt_export_stats_init(obd, exp, 1, localdata);
RETURN(rc);
}
-static int mdt_mfd_cleanup(struct obd_export *exp)
+static int mdt_export_cleanup(struct obd_export *exp)
{
struct mdt_export_data *med = &exp->exp_mdt_data;
struct obd_device *obd = exp->exp_obd;
int rc = 0;
ENTRY;
- spin_lock(&med->med_open_lock);
- while (!list_empty(&med->med_open_head)) {
- struct list_head *tmp = med->med_open_head.next;
- mfd = list_entry(tmp, struct mdt_file_data, mfd_list);
+ cfs_spin_lock(&med->med_open_lock);
+ while (!cfs_list_empty(&med->med_open_head)) {
+ cfs_list_t *tmp = med->med_open_head.next;
+ mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
/* Remove mfd handle so it can't be found again.
* We are consuming the mfd_list reference here. */
class_handle_unhash(&mfd->mfd_handle);
- list_move_tail(&mfd->mfd_list, &closing_list);
+ cfs_list_move_tail(&mfd->mfd_list, &closing_list);
}
- spin_unlock(&med->med_open_lock);
+ cfs_spin_unlock(&med->med_open_lock);
mdt = mdt_dev(obd->obd_lu_dev);
LASSERT(mdt != NULL);
info->mti_mdt = mdt;
info->mti_exp = exp;
- if (!list_empty(&closing_list)) {
+ if (!cfs_list_empty(&closing_list)) {
struct md_attr *ma = &info->mti_attr;
int lmm_size;
int cookie_size;
GOTO(out_cookie, rc = -ENOMEM);
/* Close any open files (which may also cause orphan unlinking). */
- list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
- list_del_init(&mfd->mfd_list);
+ cfs_list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
+ cfs_list_del_init(&mfd->mfd_list);
memset(&ma->ma_attr, 0, sizeof(ma->ma_attr));
ma->ma_lmm_size = lmm_size;
ma->ma_cookie_size = cookie_size;
ma->ma_valid = MA_FLAGS;
mdt_mfd_close(info, mfd);
}
- info->mti_mdt = NULL;
OBD_FREE(ma->ma_cookie, cookie_size);
ma->ma_cookie = NULL;
out_cookie:
ma->ma_lmm = NULL;
}
out_lmm:
+ info->mti_mdt = NULL;
+ /* cleanup client slot early */
+ /* Do not erase record for recoverable client. */
+ if (!obd->obd_fail || exp->exp_failed)
+ mdt_client_del(&env, mdt);
lu_env_fini(&env);
RETURN(rc);
static int mdt_obd_disconnect(struct obd_export *exp)
{
- struct mdt_device *mdt = mdt_dev(exp->exp_obd->obd_lu_dev);
int rc;
ENTRY;
LASSERT(exp);
class_export_get(exp);
- /* Disconnect early so that clients can't keep using export */
- rc = class_disconnect(exp);
- if (mdt->mdt_namespace != NULL || exp->exp_obd->obd_namespace != NULL)
- ldlm_cancel_locks_for_export(exp);
-
- /* release nid stat refererence */
- lprocfs_exp_cleanup(exp);
-
- /* complete all outstanding replies */
- spin_lock(&exp->exp_lock);
- while (!list_empty(&exp->exp_outstanding_replies)) {
- struct ptlrpc_reply_state *rs =
- list_entry(exp->exp_outstanding_replies.next,
- struct ptlrpc_reply_state, rs_exp_list);
- struct ptlrpc_service *svc = rs->rs_service;
-
- spin_lock(&svc->srv_lock);
- list_del_init(&rs->rs_exp_list);
- spin_lock(&rs->rs_lock);
- ptlrpc_schedule_difficult_reply(rs);
- spin_unlock(&rs->rs_lock);
- spin_unlock(&svc->srv_lock);
- }
- spin_unlock(&exp->exp_lock);
- rc = mdt_mfd_cleanup(exp);
+ rc = server_disconnect_export(exp);
+ if (rc != 0)
+ CDEBUG(D_IOCTL, "server disconnect error: %d\n", rc);
+
+ rc = mdt_export_cleanup(exp);
class_export_put(exp);
RETURN(rc);
}
ENTRY;
CFS_INIT_LIST_HEAD(&med->med_open_head);
- spin_lock_init(&med->med_open_lock);
- sema_init(&med->med_idmap_sem, 1);
+ cfs_spin_lock_init(&med->med_open_lock);
+ cfs_sema_init(&med->med_idmap_sem, 1);
med->med_idmap = NULL;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
- spin_unlock(&exp->exp_lock);
- rc = ldlm_init_export(exp);
+ cfs_spin_unlock(&exp->exp_lock);
+ rc = lut_client_alloc(exp);
+ if (rc == 0)
+ rc = ldlm_init_export(exp);
+
if (rc)
CERROR("Error %d while initializing export\n", rc);
RETURN(rc);
}
-static int mdt_destroy_export(struct obd_export *export)
+static int mdt_destroy_export(struct obd_export *exp)
{
struct mdt_export_data *med;
- struct obd_device *obd = export->exp_obd;
- struct mdt_device *mdt;
- struct mdt_thread_info *info;
- struct lu_env env;
int rc = 0;
ENTRY;
- med = &export->exp_mdt_data;
- if (exp_connect_rmtclient(export))
- mdt_cleanup_idmap(med);
+ med = &exp->exp_mdt_data;
+ if (exp_connect_rmtclient(exp))
+ mdt_cleanup_idmap(&exp->exp_mdt_data);
- target_destroy_export(export);
- ldlm_destroy_export(export);
+ target_destroy_export(exp);
+ ldlm_destroy_export(exp);
+ lut_client_free(exp);
- LASSERT(list_empty(&export->exp_outstanding_replies));
- LASSERT(list_empty(&med->med_open_head));
- if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
+ LASSERT(cfs_list_empty(&exp->exp_outstanding_replies));
+ LASSERT(cfs_list_empty(&exp->exp_mdt_data.med_open_head));
+ if (obd_uuid_equals(&exp->exp_client_uuid, &exp->exp_obd->obd_uuid))
RETURN(0);
- mdt = mdt_dev(obd->obd_lu_dev);
- LASSERT(mdt != NULL);
-
- rc = lu_env_init(&env, LCT_MD_THREAD);
- if (rc)
- RETURN(rc);
-
- info = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
- LASSERT(info != NULL);
- memset(info, 0, sizeof *info);
- info->mti_env = &env;
- info->mti_exp = export;
- info->mti_mdt = NULL;
- mdt_client_del(&env, mdt);
-
- lu_env_fini(&env);
RETURN(rc);
}
static void mdt_allow_cli(struct mdt_device *m, unsigned int flag)
{
if (flag & CONFIG_LOG)
- m->mdt_fl_cfglog = 1;
+ cfs_set_bit(MDT_FL_CFGLOG, &m->mdt_state);
/* also notify active event */
if (flag & CONFIG_SYNC)
- m->mdt_fl_synced = 1;
+ cfs_set_bit(MDT_FL_SYNCED, &m->mdt_state);
- if (m->mdt_fl_cfglog && m->mdt_fl_synced)
+ if (cfs_test_bit(MDT_FL_CFGLOG, &m->mdt_state) &&
+ cfs_test_bit(MDT_FL_SYNCED, &m->mdt_state)) {
+ struct obd_device *obd = m->mdt_md_dev.md_lu_dev.ld_obd;
+
/* Open for clients */
- m->mdt_md_dev.md_lu_dev.ld_obd->obd_no_conn = 0;
+ if (obd->obd_no_conn) {
+ cfs_spin_lock_bh(&obd->obd_processing_task_lock);
+ obd->obd_no_conn = 0;
+ cfs_spin_unlock_bh(&obd->obd_processing_task_lock);
+ }
+ }
}
static int mdt_upcall(const struct lu_env *env, struct md_device *md,
m->mdt_max_mdsize, m->mdt_max_cookiesize);
mdt_allow_cli(m, CONFIG_SYNC);
if (data)
- (*(__u64 *)data) = m->mdt_mount_count;
+ (*(__u64 *)data) =
+ m->mdt_lut.lut_obd->u.obt.obt_mount_count;
break;
case MD_NO_TRANS:
mti = lu_context_key_get(&env->le_ctx, &mdt_thread_key);
break;
case MD_LOV_CONFIG:
/* Check that MDT is not yet configured */
- LASSERT(!m->mdt_fl_cfglog);
+ LASSERT(!cfs_test_bit(MDT_FL_CFGLOG, &m->mdt_state));
break;
#ifdef HAVE_QUOTA_SUPPORT
case MD_LOV_QUOTA:
struct getinfo_fid2path *fpout, *fpin;
int rc = 0;
- fpin = key + size_round(sizeof(KEY_FID2PATH));
+ fpin = key + cfs_size_round(sizeof(KEY_FID2PATH));
fpout = val;
- if (lustre_msg_swabbed(mdt_info_req(info)->rq_reqmsg))
+ if (ptlrpc_req_need_swab(info->mti_pill->rc_req))
lustre_swab_fid2path(fpin);
memcpy(fpout, fpin, sizeof(*fpin));
RETURN(rc);
}
+static int mdt_ioc_version_get(struct mdt_thread_info *mti, void *karg)
+{
+ struct obd_ioctl_data *data = karg;
+ struct lu_fid *fid = (struct lu_fid *)data->ioc_inlbuf1;
+ __u64 version;
+ struct mdt_object *obj;
+ struct mdt_lock_handle *lh;
+ int rc;
+ ENTRY;
+ CDEBUG(D_IOCTL, "getting version for "DFID"\n", PFID(fid));
+ if (!fid_is_sane(fid))
+ RETURN(-EINVAL);
+
+ lh = &mti->mti_lh[MDT_LH_PARENT];
+ mdt_lock_reg_init(lh, LCK_CR);
+
+ obj = mdt_object_find_lock(mti, fid, lh, MDS_INODELOCK_UPDATE);
+ if (IS_ERR(obj))
+ RETURN(PTR_ERR(obj));
+
+ rc = mdt_object_exists(obj);
+ if (rc < 0) {
+ rc = -EREMOTE;
+ /**
+ * before calling version get the correct MDS should be
+ * fid, this is error to find remote object here
+ */
+ CERROR("nonlocal object "DFID"\n", PFID(fid));
+ } else {
+ version = mo_version_get(mti->mti_env, mdt_object_child(obj));
+ *(__u64 *)data->ioc_inlbuf2 = version;
+ rc = 0;
+ }
+ mdt_object_unlock_put(mti, obj, lh, 1);
+ RETURN(rc);
+}
+
/* ioctls on obd dev */
static int mdt_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
case OBD_IOC_CHANGELOG_CLEAR:
rc = mdt_ioc_child(&env, mdt, cmd, len, karg);
break;
+ case OBD_IOC_GET_OBJ_VERSION: {
+ struct mdt_thread_info *mti;
+ mti = lu_context_key_get(&env.le_ctx, &mdt_thread_key);
+ memset(mti, 0, sizeof *mti);
+ mti->mti_env = &env;
+ mti->mti_mdt = mdt;
+ mti->mti_exp = exp;
+
+ rc = mdt_ioc_version_get(mti, karg);
+ break;
+ }
default:
CERROR("Not supported cmd = %d for device %s\n",
cmd, obd->obd_name);
int mdt_postrecov(const struct lu_env *env, struct mdt_device *mdt)
{
struct lu_device *ld = md2lu_dev(mdt->mdt_child);
- struct obd_device *obd = mdt2obd_dev(mdt);
#ifdef HAVE_QUOTA_SUPPORT
+ struct obd_device *obd = mdt2obd_dev(mdt);
struct md_device *next = mdt->mdt_child;
#endif
- int rc, lost;
+ int rc;
ENTRY;
- /* if some clients didn't participate in recovery then we can possibly
- * lost sequence. Now we should increase sequence for safe value */
- lost = obd->obd_max_recoverable_clients - obd->obd_connected_clients;
- mdt_seq_adjust(env, mdt, lost);
rc = ld->ld_ops->ldo_recovery_complete(env, ld);
#ifdef HAVE_QUOTA_SUPPORT
return rc;
}
+/**
+ * Send a copytool req to a client
+ * Note this sends a request RPC from a server (MDT) to a client (MDC),
+ * backwards of normal comms.
+ */
+int mdt_hsm_copytool_send(struct obd_export *exp)
+{
+ struct kuc_hdr *lh;
+ struct hsm_action_list *hal;
+ struct hsm_action_item *hai;
+ int rc, len;
+ ENTRY;
+
+ CWARN("%s: writing to mdc at %s\n", exp->exp_obd->obd_name,
+ libcfs_nid2str(exp->exp_connection->c_peer.nid));
+
+ len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
+ /* for mockup below */ 2 * cfs_size_round(sizeof(*hai));
+ OBD_ALLOC(lh, len);
+ if (lh == NULL)
+ RETURN(-ENOMEM);
+
+ lh->kuc_magic = KUC_MAGIC;
+ lh->kuc_transport = KUC_TRANSPORT_HSM;
+ lh->kuc_msgtype = HMT_ACTION_LIST;
+ lh->kuc_msglen = len;
+
+ hal = (struct hsm_action_list *)(lh + 1);
+ hal->hal_version = HAL_VERSION;
+ hal->hal_archive_num = 1;
+ obd_uuid2fsname(hal->hal_fsname, exp->exp_obd->obd_name,
+ MTI_NAME_MAXLEN);
+
+ /* mock up an action list */
+ hal->hal_count = 2;
+ hai = hai_zero(hal);
+ hai->hai_action = HSMA_ARCHIVE;
+ hai->hai_fid.f_oid = 0xA00A;
+ hai->hai_len = sizeof(*hai);
+ hai = hai_next(hai);
+ hai->hai_action = HSMA_RESTORE;
+ hai->hai_fid.f_oid = 0xB00B;
+ hai->hai_len = sizeof(*hai);
+
+ /* Uses the ldlm reverse import; this rpc will be seen by
+ the ldlm_callback_handler */
+ rc = do_set_info_async(exp->exp_imp_reverse,
+ LDLM_SET_INFO, LUSTRE_OBD_VERSION,
+ sizeof(KEY_HSM_COPYTOOL_SEND),
+ KEY_HSM_COPYTOOL_SEND,
+ len, lh, NULL);
+
+ OBD_FREE(lh, len);
+
+ RETURN(rc);
+}
+
static struct obd_ops mdt_obd_device_ops = {
.o_owner = THIS_MODULE,
.o_set_info_async = mdt_obd_set_info_async,
}
/**
- * Enable/disable COS.
+ * Enable/disable COS (Commit On Sharing).
*
* Set/Clear the COS flag in mdt options.
*
}
/**
- * Check COS status.
+ * Check COS (Commit On Sharing) status.
*
- * Return COS flag status/
+ * Return COS flag status.
*
* \param mdt mdt device
*/
llo_local_obj_register(&mdt_last_recv);
- mdt_num_threads = MDT_NUM_THREADS;
+ if (mdt_num_threads > 0) {
+ if (mdt_num_threads > MDT_MAX_THREADS)
+ mdt_num_threads = MDT_MAX_THREADS;
+ if (mdt_num_threads < MDT_MIN_THREADS)
+ mdt_num_threads = MDT_MIN_THREADS;
+ mdt_max_threads = mdt_min_threads = mdt_num_threads;
+ } else {
+ mdt_max_threads = MDT_MAX_THREADS;
+ mdt_min_threads = MDT_MIN_THREADS;
+ if (mdt_min_threads < MDT_NUM_THREADS)
+ mdt_min_threads = MDT_NUM_THREADS;
+ }
+
lprocfs_mdt_init_vars(&lvars);
rc = class_register_type(&mdt_obd_device_ops, NULL,
lvars.module_vars, LUSTRE_MDT_NAME,
static struct mdt_handler mdt_mds_ops[] = {
DEF_MDT_HNDL_F(0, CONNECT, mdt_connect),
DEF_MDT_HNDL_F(0, DISCONNECT, mdt_disconnect),
-DEF_MDT_HNDL_F(0, SET_INFO, mdt_set_info),
+DEF_MDT_HNDL (0, SET_INFO, mdt_set_info,
+ &RQF_OBD_SET_INFO),
DEF_MDT_HNDL_F(0, GET_INFO, mdt_get_info),
DEF_MDT_HNDL_F(0 |HABEO_REFERO, GETSTATUS, mdt_getstatus),
DEF_MDT_HNDL_F(HABEO_CORPUS, GETATTR, mdt_getattr),