static LIST_HEAD(expired_lock_list);
static int ldlm_lock_busy(struct ldlm_lock *lock);
-static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout);
-static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds);
+static int ldlm_add_waiting_lock(struct ldlm_lock *lock, timeout_t timeout);
+static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, timeout_t timeout);
static inline int have_expired_locks(void)
{
/* Check if we need to prolong timeout */
if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
- lock->l_callback_timeout != 0 && /* not AST error */
+ lock->l_callback_timestamp != 0 && /* not AST error */
ldlm_lock_busy(lock)) {
LDLM_DEBUG(lock, "prolong the busy lock");
lock_res_and_lock(lock);
while (!list_empty(&waiting_locks_list)) {
lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
l_pending_chain);
- if (lock->l_callback_timeout > ktime_get_seconds() ||
+ if (lock->l_callback_timestamp > ktime_get_seconds() ||
lock->l_req_mode == LCK_GROUP)
break;
*/
if (!list_empty(&waiting_locks_list)) {
time64_t now = ktime_get_seconds();
- time_t delta = 0;
+ timeout_t delta = 0;
lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
l_pending_chain);
- if (lock->l_callback_timeout - now > 0)
- delta = lock->l_callback_timeout - now;
+ if (lock->l_callback_timestamp - now > 0)
+ delta = lock->l_callback_timestamp - now;
mod_timer(&waiting_locks_timer,
jiffies + cfs_time_seconds(delta));
}
*
* Called with the namespace lock held.
*/
-static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t seconds)
+static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, timeout_t delay)
{
unsigned long timeout_jiffies = jiffies;
time64_t now = ktime_get_seconds();
time64_t deadline;
- time_t timeout;
+ timeout_t timeout;
if (!list_empty(&lock->l_pending_chain))
return 0;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT) ||
OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT))
- seconds = 1;
+ delay = 1;
- deadline = now + seconds;
- if (likely(deadline > lock->l_callback_timeout))
- lock->l_callback_timeout = deadline;
+ deadline = now + delay;
+ if (likely(deadline > lock->l_callback_timestamp))
+ lock->l_callback_timestamp = deadline;
- timeout = clamp_t(time_t, lock->l_callback_timeout - now,
- 0, seconds);
+ timeout = clamp_t(timeout_t, lock->l_callback_timestamp - now,
+ 0, delay);
timeout_jiffies += cfs_time_seconds(timeout);
if (time_before(timeout_jiffies, waiting_locks_timer.expires) ||
obd_stale_export_adjust(lock->l_export);
}
-static int ldlm_add_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
+static int ldlm_add_waiting_lock(struct ldlm_lock *lock, timeout_t timeout)
{
int ret;
if (ret)
ldlm_add_blocked_lock(lock);
- LDLM_DEBUG(lock, "%sadding to wait list(timeout: %lld, AT: %s)",
+ LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
ret == 0 ? "not re-" : "", timeout,
AT_OFF ? "off" : "on");
return ret;
} else {
time64_t now = ktime_get_seconds();
struct ldlm_lock *next;
- time_t delta = 0;
+ timeout_t delta = 0;
next = list_entry(list_next, struct ldlm_lock,
l_pending_chain);
- if (next->l_callback_timeout - now > 0)
- delta = lock->l_callback_timeout - now;
+ if (next->l_callback_timestamp - now > 0)
+ delta = lock->l_callback_timestamp - now;
mod_timer(&waiting_locks_timer,
jiffies + cfs_time_seconds(delta));
*
* Called with namespace lock held.
*/
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, timeout_t timeout)
{
if (lock->l_export == NULL) {
/* We don't have a "waiting locks list" on clients. */
RETURN(0);
}
-int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, time64_t timeout)
+int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, timeout_t timeout)
{
RETURN(0);
}
*
* \retval timeout in seconds to wait for the client reply
*/
-time64_t ldlm_bl_timeout(struct ldlm_lock *lock)
+timeout_t ldlm_bl_timeout(struct ldlm_lock *lock)
{
- time64_t timeout;
+ timeout_t timeout;
if (AT_OFF)
return obd_timeout / 2;
* lock callbacks too...
*/
timeout = at_get(&lock->l_export->exp_bl_lock_at);
- return max(timeout + (timeout >> 1), (time64_t)ldlm_enqueue_min);
+ return max_t(timeout_t, timeout + (timeout >> 1),
+ (timeout_t)ldlm_enqueue_min);
}
EXPORT_SYMBOL(ldlm_bl_timeout);
* the lock to the expired list
*/
LDLM_LOCK_GET(lock);
- lock->l_callback_timeout = 0; /* differentiate it from expired locks */
+ /* differentiate it from expired locks */
+ lock->l_callback_timestamp = 0;
list_add(&lock->l_pending_chain, &expired_lock_list);
wake_up(&expired_lock_wait_queue);
spin_unlock_bh(&waiting_locks_spinlock);
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
body->lock_handle[0] = lock->l_remote_handle;
+ body->lock_handle[1].cookie = lock->l_handle.h_cookie;
body->lock_desc = *desc;
body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
body->lock_handle[0] = lock->l_remote_handle;
+ body->lock_handle[1].cookie = lock->l_handle.h_cookie;
body->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2desc(lock, &body->lock_desc);
if (lvb_len > 0) {
dlm_req->lock_desc.l_resource.lr_type,
&dlm_req->lock_desc.l_policy_data,
&lock->l_policy_data);
- if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT)
+ if (dlm_req->lock_desc.l_resource.lr_type == LDLM_EXTENT) {
lock->l_req_extent = lock->l_policy_data.l_extent;
- else if (dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS)
+ } else if (dlm_req->lock_desc.l_resource.lr_type == LDLM_IBITS) {
lock->l_policy_data.l_inodebits.try_bits =
dlm_req->lock_desc.l_policy_data.l_inodebits.try_bits;
+ lock->l_policy_data.l_inodebits.li_gid =
+ dlm_req->lock_desc.l_policy_data.l_inodebits.li_gid;
+ }
existing_lock:
cookie = req;
if ((flags & LATF_STATS) && ldlm_is_ast_sent(lock) &&
lock->l_blast_sent != 0) {
- time64_t delay = ktime_get_real_seconds() -
- lock->l_blast_sent;
+ timeout_t delay = 0;
+
+ if (ktime_get_real_seconds() > lock->l_blast_sent)
+ delay = ktime_get_real_seconds() -
+ lock->l_blast_sent;
LDLM_DEBUG(lock,
- "server cancels blocked lock after %llds",
- (s64)delay);
+ "server cancels blocked lock after %ds",
+ delay);
at_measured(&lock->l_export->exp_bl_lock_at, delay);
}
ldlm_lock_cancel(lock);
ldlm_callback_reply(req, 0);
while (to > 0) {
- schedule_timeout_interruptible(to);
+ to = schedule_timeout_interruptible(to);
if (ldlm_is_granted(lock) ||
ldlm_is_destroyed(lock))
break;
if (lock->l_granted_mode == LCK_PW &&
!lock->l_readers && !lock->l_writers &&
ktime_after(ktime_get(),
- ktime_add(lock->l_last_used,
- ktime_set(ns->ns_dirty_age_limit, 0)))) {
+ ktime_add(lock->l_last_used, ns->ns_dirty_age_limit))) {
unlock_res_and_lock(lock);
/* For MDS glimpse it is always DOM lock, set corresponding
init_completion(&blwi->blwi_comp);
INIT_LIST_HEAD(&blwi->blwi_head);
- if (memory_pressure_get())
+ if (current->flags & PF_MEMALLOC)
blwi->blwi_mem_pressure = 1;
blwi->blwi_ns = ns;
return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
}
+int ldlm_bl_to_thread_ns(struct ldlm_namespace *ns)
+{
+ return ldlm_bl_to_thread(ns, NULL, NULL, NULL, 0, LCF_ASYNC);
+}
+
int ldlm_bl_thread_wakeup(void)
{
wake_up(&ldlm_state->ldlm_bl_pool->blp_waitq);
ldlm_lock_remove_from_lru(lock);
ldlm_set_bl_ast(lock);
}
+ if (lock->l_remote_handle.cookie == 0)
+ lock->l_remote_handle = dlm_req->lock_handle[1];
unlock_res_and_lock(lock);
/*
static int ldlm_bl_thread_blwi(struct ldlm_bl_pool *blp,
struct ldlm_bl_work_item *blwi)
{
+ /* '1' for consistency with code that checks !mpflag to restore */
+ unsigned int mpflags = 1;
+
ENTRY;
if (blwi->blwi_ns == NULL)
RETURN(LDLM_ITER_STOP);
if (blwi->blwi_mem_pressure)
- memory_pressure_set();
+ mpflags = memalloc_noreclaim_save();
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL2, 4);
LCF_BL_AST);
ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
blwi->blwi_flags);
- } else {
+ } else if (blwi->blwi_lock) {
ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
blwi->blwi_lock);
+ } else {
+ ldlm_pool_recalc(&blwi->blwi_ns->ns_pool, true);
+ spin_lock(&blwi->blwi_ns->ns_lock);
+ blwi->blwi_ns->ns_rpc_recalc = 0;
+ spin_unlock(&blwi->blwi_ns->ns_lock);
+ ldlm_namespace_put(blwi->blwi_ns);
}
+
if (blwi->blwi_mem_pressure)
- memory_pressure_clr();
+ memalloc_noreclaim_restore(mpflags);
if (blwi->blwi_flags & LCF_ASYNC)
OBD_FREE(blwi, sizeof(*blwi));
RETURN(0);
}
-void ldlm_resource_init_once(void *p)
-{
- /*
- * It is import to initialise the spinlock only once,
- * as ldlm_lock_change_resource() could try to lock
- * the resource *after* it has been freed and possibly
- * reused. SLAB_TYPESAFE_BY_RCU ensures the memory won't
- * be freed while the lock is being taken, but we need to
- * ensure that it doesn't get reinitialized either.
- */
- struct ldlm_resource *res = p;
-
- memset(res, 0, sizeof(*res));
- mutex_init(&res->lr_lvb_mutex);
- spin_lock_init(&res->lr_lock);
-}
-
int ldlm_init(void)
{
ldlm_resource_slab = kmem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
- SLAB_TYPESAFE_BY_RCU |
- SLAB_HWCACHE_ALIGN,
- ldlm_resource_init_once);
+ SLAB_HWCACHE_ALIGN, NULL);
if (ldlm_resource_slab == NULL)
return -ENOMEM;
{
if (ldlm_refcount)
CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
- /* These two lines should not be needed, but appear to fix
- * a crash on RHEL7. The slab_cache sometimes gets freed before the
- * last slab is rcu_freed, and that can cause kmem_freepages()
- * to free too many pages and trip a BUG
- */
- kmem_cache_shrink(ldlm_resource_slab);
- synchronize_rcu();
kmem_cache_destroy(ldlm_resource_slab);
/*
* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call