struct inode *lr_lvb_inode;
};
+static inline struct ldlm_namespace *
+ldlm_res_to_ns(struct ldlm_resource *res)
+{
+ return res->lr_namespace;
+}
+
+static inline struct ldlm_namespace *
+ldlm_lock_to_ns(struct ldlm_lock *lock)
+{
+ return ldlm_res_to_ns(lock->l_resource);
+}
+
+static inline char *
+ldlm_lock_to_ns_name(struct ldlm_lock *lock)
+{
+ return ldlm_lock_to_ns(lock)->ns_name;
+}
+
+static inline struct adaptive_timeout *
+ldlm_lock_to_ns_at(struct ldlm_lock *lock)
+{
+ return &ldlm_lock_to_ns(lock)->ns_at_estimate;
+}
+
struct ldlm_ast_work {
struct ldlm_lock *w_lock;
int w_blocking;
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct ptlrpc_request *r, int increase)
{
- if (res->lr_namespace->ns_lvbo &&
- res->lr_namespace->ns_lvbo->lvbo_update) {
- return res->lr_namespace->ns_lvbo->lvbo_update(res, r,
- increase);
+ if (ldlm_res_to_ns(res)->ns_lvbo &&
+ ldlm_res_to_ns(res)->ns_lvbo->lvbo_update) {
+ return ldlm_res_to_ns(res)->ns_lvbo->lvbo_update(res, r,
+ increase);
}
return 0;
}
cfs_spin_lock(&lock->l_lock);
res = lock->l_resource;
- if (ns_is_server(res->lr_namespace))
+ if (ns_is_server(ldlm_res_to_ns(res)))
/* on server-side resource of lock doesn't change */
cfs_spin_unlock(&lock->l_lock);
{
struct ldlm_resource *res = lock->l_resource;
- if (ns_is_server(res->lr_namespace)) {
+ if (ns_is_server(ldlm_res_to_ns(res))) {
/* on server-side resource of lock doesn't change */
unlock_res(res);
return;
return 1;
CDEBUG(D_DLMTRACE, "contended locks = %d\n", contended_locks);
- if (contended_locks > res->lr_namespace->ns_contended_locks)
+ if (contended_locks > ldlm_res_to_ns(res)->ns_contended_locks)
res->lr_contention_time = now;
return cfs_time_before(now, cfs_time_add(res->lr_contention_time,
- cfs_time_seconds(res->lr_namespace->ns_contention_time)));
+ cfs_time_seconds(ldlm_res_to_ns(res)->ns_contention_time)));
}
struct ldlm_extent_compat_args {
(*flags & LDLM_FL_DENY_ON_CONTENTION) &&
req->l_req_mode != LCK_GROUP &&
req_end - req_start <=
- req->l_resource->lr_namespace->ns_max_nolock_size)
+ ldlm_res_to_ns(req->l_resource)->ns_max_nolock_size)
GOTO(destroylock, compat = -EUSERS);
RETURN(compat);
rc = ldlm_run_ast_work(&rpc_list, LDLM_WORK_BL_AST);
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_OST_FAIL_RACE) &&
- !ns_is_client(res->lr_namespace))
+ !ns_is_client(ldlm_res_to_ns(res)))
class_fail_export(lock->l_export);
lock_res(res);
ldlm_error_t *err, cfs_list_t *work_list)
{
struct ldlm_resource *res = req->l_resource;
- struct ldlm_namespace *ns = res->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
cfs_list_t *tmp;
cfs_list_t *ownlocks = NULL;
struct ldlm_lock *lock = NULL;
LASSERT(lock);
LASSERT(flag == LDLM_CB_CANCELING);
- ns = lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(lock);
/* take lock off the deadlock detection waitq. */
cfs_spin_lock(&ldlm_flock_waitq_lock);
LASSERT(cfs_list_empty(&lock->l_res_link));
LASSERT(cfs_list_empty(&lock->l_pending_chain));
- cfs_atomic_dec(&res->lr_namespace->ns_locks);
+ cfs_atomic_dec(&ldlm_res_to_ns(res)->ns_locks);
lu_ref_del(&res->lr_reference, "lock", lock);
ldlm_resource_putref(res);
lock->l_resource = NULL;
{
int rc = 0;
if (!cfs_list_empty(&lock->l_lru)) {
- struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
cfs_list_del_init(&lock->l_lru);
if (lock->l_flags & LDLM_FL_SKIPPED)
int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
int rc;
+
ENTRY;
cfs_spin_lock(&ns->ns_unused_lock);
rc = ldlm_lock_remove_from_lru_nolock(lock);
void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
lock->l_last_used = cfs_time_current();
LASSERT(cfs_list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
ENTRY;
cfs_spin_lock(&ns->ns_unused_lock);
ldlm_lock_add_to_lru_nolock(lock);
void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+
ENTRY;
cfs_spin_lock(&ns->ns_unused_lock);
if (!cfs_list_empty(&lock->l_lru)) {
CFS_INIT_LIST_HEAD(&lock->l_sl_policy);
CFS_INIT_HLIST_NODE(&lock->l_exp_hash);
- cfs_atomic_inc(&resource->lr_namespace->ns_locks);
+ cfs_atomic_inc(&ldlm_res_to_ns(resource)->ns_locks);
CFS_INIT_LIST_HEAD(&lock->l_handle.h_link);
class_handle_hash(&lock->l_handle, lock_handle_addref);
RETURN(NULL);
LASSERT(lock->l_resource != NULL);
- ns = lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(lock);
LASSERT(ns != NULL);
lu_ref_add_atomic(&lock->l_reference, "handle", cfs_current());
lock_res_and_lock(lock);
- ns = lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(lock);
ldlm_lock_decref_internal_nolock(lock, mode);
if (work_list && lock->l_completion_ast != NULL)
ldlm_add_ast_work_item(lock, NULL, work_list);
- ldlm_pool_add(&res->lr_namespace->ns_pool, lock);
+ ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
EXIT;
}
old_lock = ldlm_handle2lock(lockh);
LASSERT(old_lock);
- ns = old_lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(old_lock);
res_id = &old_lock->l_resource->lr_name;
type = old_lock->l_resource->lr_type;
mode = old_lock->l_req_mode;
{
struct ldlm_lock *lock = *lockp;
struct ldlm_resource *res = lock->l_resource;
- int local = ns_is_client(res->lr_namespace);
+ int local = ns_is_client(ldlm_res_to_ns(res));
ldlm_processing_policy policy;
ldlm_error_t rc = ELDLM_OK;
struct ldlm_interval *node = NULL;
ENTRY;
/* Local lock trees don't get reprocessed. */
- if (ns_is_client(res->lr_namespace)) {
+ if (ns_is_client(ldlm_res_to_ns(res))) {
EXIT;
return;
}
lock_res_and_lock(lock);
res = lock->l_resource;
- ns = res->lr_namespace;
+ ns = ldlm_res_to_ns(res);
/* Please do not, no matter how tempting, remove this LBUG without
* talking to me first. -phik */
* Remove the lock from pool as it will be added again in
* ldlm_grant_lock() called below.
*/
- ns = lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(lock);
ldlm_pool_del(&ns->ns_pool, lock);
lock->l_req_mode = new_mode;
lock_res_and_lock(lock);
res = lock->l_resource;
- ns = res->lr_namespace;
+ ns = ldlm_res_to_ns(res);
old_mode = lock->l_req_mode;
lock->l_req_mode = new_mode;
ldlm_pool_del(&ns->ns_pool, lock);
/* If this is a local resource, put it on the appropriate list. */
- if (ns_is_client(res->lr_namespace)) {
+ if (ns_is_client(ldlm_res_to_ns(res))) {
if (*flags & (LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_GRANTED)) {
ldlm_resource_add_lock(res, &res->lr_converting, lock);
} else {
"res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
"] (req "LPU64"->"LPU64") flags: "LPX64" remote: "LPX64
" expref: %d pid: %u timeout %lu\n",
- lock->l_resource->lr_namespace->ns_name, lock,
+ ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
"res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
"["LPU64"->"LPU64"] flags: "LPX64" remote: "LPX64
" expref: %d pid: %u timeout: %lu\n",
- lock->l_resource->lr_namespace->ns_name, lock,
+ ldlm_lock_to_ns_name(lock), lock,
lock->l_handle.h_cookie, cfs_atomic_read(&lock->l_refc),
lock->l_readers, lock->l_writers,
ldlm_lockname[lock->l_granted_mode],
"res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
"flags: "LPX64" remote: "LPX64" expref: %d "
"pid: %u timeout: %lu\n",
- lock->l_resource->lr_namespace->ns_name,
+ ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
cfs_atomic_read (&lock->l_refc),
lock->l_readers, lock->l_writers,
" ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
"res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
"remote: "LPX64" expref: %d pid: %u timeout %lu\n",
- lock->l_resource->lr_namespace->ns_name,
+ ldlm_lock_to_ns_name(lock),
lock, lock->l_handle.h_cookie,
cfs_atomic_read (&lock->l_refc),
lock->l_readers, lock->l_writers,
LDLM_LOCK_RELEASE(lock);
continue;
}
- lock->l_resource->lr_namespace->ns_timeouts++;
+ ldlm_lock_to_ns(lock)->ns_timeouts++;
LDLM_ERROR(lock, "lock callback timer expired after %lds: "
"evicting client at %s ",
cfs_time_current_sec()- lock->l_last_activity,
/* Server-side enqueue wait time estimate, used in
__ldlm_add_waiting_lock to set future enqueue timers */
if (total_enqueue_wait < ldlm_get_enq_timeout(lock))
- at_measured(&lock->l_resource->lr_namespace->ns_at_estimate,
+ at_measured(ldlm_lock_to_ns_at(lock),
total_enqueue_wait);
else
/* bz18618. Don't add lock enqueue time we spend waiting for a
LDLM_DEBUG(lock, "lock completed after %lus; estimate was %ds. "
"It is likely that a previous callback timed out.",
total_enqueue_wait,
- at_get(&lock->l_resource->lr_namespace->ns_at_estimate));
+ at_get(ldlm_lock_to_ns_at(lock)));
ptlrpc_request_set_replen(req);
last_dump = next_dump;
next_dump = cfs_time_shift(300);
ldlm_namespace_dump(D_DLMTRACE,
- lock->l_resource->lr_namespace);
+ ldlm_lock_to_ns(lock));
if (last_dump == 0)
libcfs_debug_dumplog();
}
from a single node. */
int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
- int timeout = at_get(&lock->l_resource->lr_namespace->ns_at_estimate);
+ int timeout = at_get(ldlm_lock_to_ns_at(lock));
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
CFS_DURATION_T"s", delay);
/* Update our time estimate */
- at_measured(&lock->l_resource->lr_namespace->ns_at_estimate,
+ at_measured(ldlm_lock_to_ns_at(lock),
delay);
result = 0;
}
cfs_spin_unlock(&imp->imp_lock);
}
- if (ns_is_client(lock->l_resource->lr_namespace) &&
+ if (ns_is_client(ldlm_lock_to_ns(lock)) &&
OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
lock->l_flags |= LDLM_FL_FAIL_LOC;
struct ldlm_resource *res;
int rc;
ENTRY;
- if (ns_is_client(lock->l_resource->lr_namespace)) {
+ if (ns_is_client(ldlm_lock_to_ns(lock))) {
CERROR("Trying to cancel local lock\n");
LBUG();
}
}
ldlm_lock_cancel(lock);
} else {
- if (ns_is_client(lock->l_resource->lr_namespace)) {
+ if (ns_is_client(ldlm_lock_to_ns(lock))) {
LDLM_ERROR(lock, "Trying to cancel local lock");
LBUG();
}
RCL_CLIENT, 0);
LASSERT(avail > 0);
- ns = lock->l_resource->lr_namespace;
+ ns = ldlm_lock_to_ns(lock);
flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
int flags)
{
cfs_list_t *tmp;
- int rc = 0, client = ns_is_client(res->lr_namespace);
+ int rc = 0, client = ns_is_client(ldlm_res_to_ns(res));
int local_only = (flags & LDLM_FL_LOCAL_ONLY);
ENTRY;
void __ldlm_resource_putref_final(struct ldlm_resource *res)
{
- struct ldlm_namespace *ns = res->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
LASSERT_SPIN_LOCKED(&ns->ns_hash_lock);
int ldlm_resource_putref_internal(struct ldlm_resource *res, int locked)
{
- struct ldlm_namespace *ns = res->lr_namespace;
+ struct ldlm_namespace *ns = ldlm_res_to_ns(res);
ENTRY;
CDEBUG(D_INFO, "putref res: %p count: %d\n", res,
int mdt_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- struct obd_device *obd = lock->l_resource->lr_namespace->ns_obd;
+ struct obd_device *obd = ldlm_lock_to_ns(lock)->ns_obd;
struct mdt_device *mdt = mdt_dev(obd->obd_lu_dev);
int rc;
ENTRY;
* lock, and should not be granted if the lock will be blocked.
*/
- LASSERT(ns == res->lr_namespace);
+ LASSERT(ns == ldlm_res_to_ns(res));
lock_res(res);
rc = policy(lock, &tmpflags, 0, &err, &rpc_list);
check_res_locked(res);
res->lr_lvb_data = lvb;
res->lr_lvb_len = sizeof(*lvb);
- obd = res->lr_namespace->ns_lvbp;
+ obd = ldlm_res_to_ns(res)->ns_lvbp;
LASSERT(obd != NULL);
CDEBUG(D_INODE, "%s: filter_lvbo_init(o_seq="LPU64", o_id="
disk_update:
/* Update the LVB from the disk inode */
- obd = res->lr_namespace->ns_lvbp;
+ obd = ldlm_res_to_ns(res)->ns_lvbp;
LASSERT(obd);
inode = res->lr_lvb_inode;