-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
ldlm_cli_pool_pop_slv(pl);
- cfs_spin_lock(&ns->ns_unused_lock);
+ cfs_spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
- cfs_spin_unlock(&ns->ns_unused_lock);
+ cfs_spin_unlock(&ns->ns_lock);
if (nr) {
- canceled = ldlm_cancel_lru(ns, nr, LDLM_SYNC,
+ canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC,
LDLM_CANCEL_SHRINK);
}
#ifdef __KERNEL__
if (!var_name)
RETURN(-ENOMEM);
- parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir, ns->ns_name);
+ parent_ns_proc = lprocfs_srch(ldlm_ns_proc_dir,
+ ldlm_ns_name(ns));
if (parent_ns_proc == NULL) {
CERROR("%s: proc entry is not initialized\n",
- ns->ns_name);
+ ldlm_ns_name(ns));
GOTO(out_free_name, rc = -EINVAL);
}
pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
- ns->ns_name, idx);
+ ldlm_ns_name(ns), idx);
if (client == LDLM_NAMESPACE_SERVER) {
pl->pl_ops = &ldlm_srv_pool_ops;
for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
- ldlm_namespace_put(ns, 1);
+ ldlm_namespace_put(ns);
}
if (nr == 0 || total == 0) {
/*
* Do not call shrink under ldlm_namespace_lock(client)
*/
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
/*
* If list is empty, we can't return any @cached > 0,
* that probably would cause needless shrinker
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
cached += ldlm_pool_granted(&ns->ns_pool);
- ldlm_namespace_put(ns, 1);
+ ldlm_namespace_put(ns);
}
cl_env_reexit(cookie);
- return cached;
+ /* we only decrease the SLV in server pools shrinker, return -1 to
+ * kernel to avoid needless loop. LU-1128 */
+ return (client == LDLM_NAMESPACE_SERVER) ? -1 : cached;
}
-static int ldlm_pools_srv_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr, gfp_mask);
+ return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER,
+ shrink_param(sc, nr_to_scan),
+ shrink_param(sc, gfp_mask));
}
-static int ldlm_pools_cli_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
{
- return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr, gfp_mask);
+ return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT,
+ shrink_param(sc, nr_to_scan),
+ shrink_param(sc, gfp_mask));
}
void ldlm_pools_recalc(ldlm_side_t client)
/*
* Check all modest namespaces first.
*/
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
ns_list_chain)
{
}
ldlm_pool_setup(&ns->ns_pool, l);
}
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
}
/*
* rid of potential deadlock on client nodes when canceling
* locks synchronously.
*/
- cfs_mutex_down(ldlm_namespace_lock(client));
+ cfs_mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
break;
}
ns = ldlm_namespace_first_locked(client);
- cfs_spin_lock(&ns->ns_hash_lock);
+ cfs_spin_lock(&ns->ns_lock);
/*
* skip ns which is being freed, and we don't want to increase
- * its refcount again, not even temporarily. bz21519.
+ * its refcount again, not even temporarily. bz21519 & LU-499.
*/
- if (ns->ns_refcount == 0) {
+ if (ns->ns_stopping) {
skip = 1;
} else {
skip = 0;
- ldlm_namespace_get_locked(ns);
+ ldlm_namespace_get(ns);
}
- cfs_spin_unlock(&ns->ns_hash_lock);
+ cfs_spin_unlock(&ns->ns_lock);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_up(ldlm_namespace_lock(client));
+ cfs_mutex_unlock(ldlm_namespace_lock(client));
/*
* After setup is done - recalc the pool.
*/
if (!skip) {
ldlm_pool_recalc(&ns->ns_pool);
- ldlm_namespace_put(ns, 1);
+ ldlm_namespace_put(ns);
}
}
}
ENTRY;
cfs_daemonize(t_name);
- thread->t_flags = SVC_RUNNING;
+ thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
NULL, NULL);
- l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
- (SVC_STOPPING|SVC_EVENT)),
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
&lwi);
- if (thread->t_flags & SVC_STOPPING) {
- thread->t_flags &= ~SVC_STOPPING;
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
break;
- } else if (thread->t_flags & SVC_EVENT) {
- thread->t_flags &= ~SVC_EVENT;
- }
+ else
+ thread_test_and_clear_flags(thread, SVC_EVENT);
}
- thread->t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
* just drop the VM and FILES in cfs_daemonize() right away.
*/
- rc = cfs_kernel_thread(ldlm_pools_thread_main, ldlm_pools_thread,
- CLONE_VM | CLONE_FILES);
+ rc = cfs_create_thread(ldlm_pools_thread_main, ldlm_pools_thread,
+ CFS_DAEMON_FLAGS);
if (rc < 0) {
CERROR("Can't start pool thread, error %d\n",
rc);
RETURN(rc);
}
l_wait_event(ldlm_pools_thread->t_ctl_waitq,
- (ldlm_pools_thread->t_flags & SVC_RUNNING), &lwi);
+ thread_is_running(ldlm_pools_thread), &lwi);
RETURN(0);
}
return;
}
- ldlm_pools_thread->t_flags = SVC_STOPPING;
+ thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
/*