unsigned long rs_committed:1;/* the transaction was committed
and the rs was dispatched
by ptlrpc_commit_replies */
- unsigned long rs_convert_lock:1; /* need to convert saved
- * locks to COS mode */
atomic_t rs_refcount; /* number of users */
/** Number of locks awaiting client ACK */
int rs_nlocks;
* @{
*/
void ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock,
- int mode, bool no_ack, bool convert_lock);
+ int mode, bool no_ack);
void ptlrpc_commit_replies(struct obd_export *exp);
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
list_del_init(&rs->rs_exp_list);
spin_lock(&rs->rs_lock);
- /* clear rs_convert_lock to make sure rs is handled and put */
- rs->rs_convert_lock = 0;
ptlrpc_schedule_difficult_reply(rs);
spin_unlock(&rs->rs_lock);
bld.bl_same_client = lock->l_client_cookie ==
lock->l_blocking_lock->l_client_cookie;
/* if two locks are initiated from the same MDT, transactions are
- * independent, or the request lock mode is CR|PR|CW, no need to trigger
+ * independent, or the request lock mode isn't EX|PW, no need to trigger
* CoS because current lock will be downgraded to TXN mode soon, then
* the blocking lock can be granted.
*/
if (lock->l_blocking_lock->l_policy_data.l_inodebits.li_initiator_id ==
lock->l_policy_data.l_inodebits.li_initiator_id ||
- lock->l_blocking_lock->l_req_mode & (LCK_CR | LCK_PR | LCK_CW))
+ !(lock->l_blocking_lock->l_req_mode & (LCK_EX | LCK_PW)))
bld.bl_txn_dependent = false;
else
bld.bl_txn_dependent = true;
break;
case LDLM_IBITS:
- libcfs_debug_msg(msgdata,
- "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s gid %llu flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d initiator: MDT%d\n",
+ if (!lock->l_remote_handle.cookie)
+ libcfs_debug_msg(msgdata,
+ "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s flags: %#llx pid: %u initiator: MDT%d\n",
+ &vaf,
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ refcount_read(&lock->l_handle.h_ref),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ lock->l_policy_data.l_inodebits.bits,
+ lock->l_policy_data.l_inodebits.try_bits,
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, lock->l_pid,
+ lock->l_policy_data.l_inodebits.li_initiator_id);
+ else
+ libcfs_debug_msg(msgdata,
+ "%pV ns: %s lock: %p/%#llx lrc: %d/%d,%d mode: %s/%s res: " DLDLMRES " bits %#llx/%#llx rrc: %d type: %s gid %llu flags: %#llx nid: %s remote: %#llx expref: %d pid: %u timeout: %lld lvb_type: %d\n",
&vaf,
ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
lock->l_remote_handle.cookie,
exp ? refcount_read(&exp->exp_handle.h_ref) : -99,
lock->l_pid, lock->l_callback_timestamp,
- lock->l_lvb_type,
- lock->l_policy_data.l_inodebits.li_initiator_id);
+ lock->l_lvb_type);
break;
default:
* The 'data' parameter is l_ast_data in the first case and
* callback arguments in the second one. Distinguish them by that.
*/
- if (!data || data == lock->l_ast_data || !arg->bl_desc)
- goto skip_cos_checks;
-
- if (lock->l_req_mode & (LCK_PW | LCK_EX)) {
- if (mdt_cos_is_enabled(mdt) &&
- !arg->bl_desc->bl_same_client) {
- mdt_set_lock_sync(lock);
- } else if (mdt_slc_is_enabled(mdt) &&
- arg->bl_desc->bl_txn_dependent) {
- mdt_set_lock_sync(lock);
- /* we may do extra commit here, because there is a small
- * window to miss a commit:
- * 1. lock was unlocked (saved), but not downgraded to
- * TXN mode yet (REP-ACK not received).
- * 2. a conflict lock enqueued and we come herej if we
- * don't trigger commit now, the enqueued lock will wait
- * untill system periodic commit.
- *
- * Fortunately this window is quite small, not to say
- * distributed operation is rare too.
- */
+ if (data && data != lock->l_ast_data && arg->bl_desc) {
+ if (lock->l_req_mode & (LCK_COS | LCK_TXN))
commit_async = true;
- }
- } else if (lock->l_req_mode == LCK_COS || lock->l_req_mode == LCK_TXN) {
- commit_async = true;
+ else if ((lock->l_req_mode & (LCK_PW | LCK_EX)) &&
+ ((mdt_cos_is_enabled(mdt) &&
+ !arg->bl_desc->bl_same_client) ||
+ (mdt_slc_is_enabled(mdt) &&
+ arg->bl_desc->bl_txn_dependent)))
+ mdt_set_lock_sync(lock);
}
-skip_cos_checks:
rc = ldlm_blocking_ast_nocheck(lock);
if (commit_async) {
struct mdt_device *mdt = info->mti_mdt;
struct ldlm_lock *lock = ldlm_handle2lock(h);
struct ptlrpc_request *req = mdt_info_req(info);
- bool cos = mdt_cos_is_enabled(mdt);
- bool convert_lock = !cos && mdt_slc_is_enabled(mdt);
+ bool no_ack = false;
LASSERTF(lock != NULL, "no lock for cookie %#llx\n",
h->cookie);
LDLM_DEBUG(lock, "save lock request %p reply "
"state %p transno %lld\n", req,
req->rq_reply_state, req->rq_transno);
- if (cos) {
- ldlm_lock_mode_downgrade(lock, LCK_COS);
+ if (mdt_cos_is_enabled(mdt)) {
mode = LCK_COS;
+ no_ack = true;
+ ldlm_lock_mode_downgrade(lock, mode);
+ } else if (mdt_slc_is_enabled(mdt)) {
+ no_ack = true;
+ if (mode != LCK_TXN) {
+ mode = LCK_TXN;
+ ldlm_lock_mode_downgrade(lock,
+ mode);
+ }
}
if (req->rq_export->exp_disconnected)
mdt_fid_unlock(h, mode);
else
- ptlrpc_save_lock(req, h, mode, cos,
- convert_lock);
+ ptlrpc_save_lock(req, h, mode, no_ack);
} else {
mdt_fid_unlock(h, mode);
}
ldlm_lock_decref(lh, mode);
}
+/* this is identical to say this is a DNE system */
static inline bool mdt_slc_is_enabled(struct mdt_device *mdt)
{
return mdt->mdt_lut.lut_sync_lock_cancel == SYNC_LOCK_CANCEL_BLOCKING;
spin_lock(&rs->rs_lock);
for (i = 0; i < rs->rs_nlocks; i++)
ptlrpc_save_lock(req, &rs->rs_locks[i],
- rs->rs_modes[i], rs->rs_no_ack,
- rs->rs_convert_lock);
+ rs->rs_modes[i], rs->rs_no_ack);
rs->rs_nlocks = 0;
DEBUG_REQ(D_HA, req, "stole locks for");
nrs->rs_difficult = 1;
nrs->rs_no_ack = rs->rs_no_ack;
- nrs->rs_convert_lock = rs->rs_convert_lock;
for (i = 0; i < rs->rs_nlocks; i++) {
nrs->rs_locks[i] = rs->rs_locks[i];
nrs->rs_modes[i] = rs->rs_modes[i];
* Puts a lock and its mode into reply state assotiated to request reply.
*/
void ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock,
- int mode, bool no_ack, bool convert_lock)
+ int mode, bool no_ack)
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
int idx;
rs->rs_modes[idx] = mode;
rs->rs_difficult = 1;
rs->rs_no_ack = no_ack;
- rs->rs_convert_lock = convert_lock;
}
EXPORT_SYMBOL(ptlrpc_save_lock);
* rs_lock, which we do right next.
*/
if (!rs->rs_committed) {
- /*
- * if rs was commited, no need to convert locks, don't check
- * rs_committed here because rs may never be added into
- * exp_uncommitted_replies and this flag never be set, see
- * target_send_reply()
- */
- if (rs->rs_convert_lock &&
- rs->rs_transno > exp->exp_last_committed) {
- struct ldlm_lock *lock;
- struct ldlm_lock *ack_locks[RS_MAX_LOCKS] = { NULL };
-
- spin_lock(&rs->rs_lock);
- if (rs->rs_convert_lock &&
- rs->rs_transno > exp->exp_last_committed) {
- nlocks = rs->rs_nlocks;
- while (nlocks-- > 0) {
- /*
- * NB don't assume rs is always handled
- * by the same service thread (see
- * ptlrpc_hr_select, so REP-ACK hr may
- * race with trans commit, while the
- * latter will release locks, get locks
- * here early to downgrade to TXN mode
- * safely.
- */
- lock = ldlm_handle2lock(
- &rs->rs_locks[nlocks]);
- LASSERT(lock);
- ack_locks[nlocks] = lock;
- rs->rs_modes[nlocks] = LCK_TXN;
- }
- nlocks = rs->rs_nlocks;
- rs->rs_convert_lock = 0;
- /*
- * clear rs_scheduled so that commit callback
- * can schedule again
- */
- rs->rs_scheduled = 0;
- spin_unlock(&rs->rs_lock);
-
- while (nlocks-- > 0) {
- lock = ack_locks[nlocks];
- ldlm_lock_mode_downgrade(lock, LCK_TXN);
- LDLM_LOCK_PUT(lock);
- }
- RETURN(0);
- }
- spin_unlock(&rs->rs_lock);
- }
-
spin_lock(&exp->exp_uncommitted_replies_lock);
list_del_init(&rs->rs_obd_list);
spin_unlock(&exp->exp_uncommitted_replies_lock);
}
rs->rs_scheduled = 0;
- rs->rs_convert_lock = 0;
if (rs->rs_unlinked) {
/* Off the net */