struct ldlm_lock *lock);
void ldlm_resource_unlink_lock(struct ldlm_lock *lock);
void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc);
-void ldlm_dump_all_namespaces(int level);
+void ldlm_dump_all_namespaces(int level, ldlm_side_t client);
void ldlm_namespace_dump(int level, struct ldlm_namespace *);
void ldlm_resource_dump(int level, struct ldlm_resource *);
int ldlm_lock_change_resource(struct ldlm_namespace *, struct ldlm_lock *,
/* ldlm_pool.c */
int ldlm_pools_init(ldlm_side_t client);
+void ldlm_pools_recalc(ldlm_side_t client);
void ldlm_pools_fini(void);
void ldlm_pools_wakeup(void);
-void ldlm_pools_recalc(void);
-int ldlm_pools_shrink(int nr, unsigned int gfp_mask);
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client);
#define MAX_STRING_SIZE 128
+extern atomic_t ldlm_srv_namespace_nr;
+extern atomic_t ldlm_cli_namespace_nr;
+extern struct semaphore ldlm_srv_namespace_lock;
+extern struct list_head ldlm_srv_namespace_list;
+extern struct semaphore ldlm_cli_namespace_lock;
+extern struct list_head ldlm_cli_namespace_list;
+
+static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr;
+}
+
+static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
+}
+
+static inline struct semaphore *ldlm_namespace_lock(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
+}
+
/* ldlm_request.c */
typedef enum {
LDLM_ASYNC,
struct list_head *work_list);
int ldlm_run_bl_ast_work(struct list_head *rpc_list);
int ldlm_run_cp_ast_work(struct list_head *rpc_list);
+int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
+void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock);
+void ldlm_lock_add_to_lru(struct ldlm_lock *lock);
+void ldlm_lock_touch_in_lru(struct ldlm_lock *lock);
void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
/* ldlm_lockd.c */
{
int rc = 0;
if (!list_empty(&lock->l_lru)) {
+ struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_del_init(&lock->l_lru);
- lock->l_resource->lr_namespace->ns_nr_unused--;
- LASSERT(lock->l_resource->lr_namespace->ns_nr_unused >= 0);
+ ns->ns_nr_unused--;
+ LASSERT(ns->ns_nr_unused >= 0);
rc = 1;
}
return rc;
int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
{
+ struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
int rc;
ENTRY;
- spin_lock(&lock->l_resource->lr_namespace->ns_unused_lock);
+ spin_lock(&ns->ns_unused_lock);
rc = ldlm_lock_remove_from_lru_nolock(lock);
- spin_unlock(&lock->l_resource->lr_namespace->ns_unused_lock);
+ spin_unlock(&ns->ns_unused_lock);
EXIT;
return rc;
}
+void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ lock->l_last_used = cfs_time_current();
+ LASSERT(list_empty(&lock->l_lru));
+ list_add_tail(&lock->l_lru, &ns->ns_unused_list);
+ LASSERT(ns->ns_nr_unused >= 0);
+ ns->ns_nr_unused++;
+}
+
+void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ ENTRY;
+ spin_lock(&ns->ns_unused_lock);
+ ldlm_lock_add_to_lru_nolock(lock);
+ spin_unlock(&ns->ns_unused_lock);
+ EXIT;
+}
+
+void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
+{
+ struct ldlm_namespace *ns = lock->l_resource->lr_namespace;
+ ENTRY;
+ spin_lock(&ns->ns_unused_lock);
+ if (!list_empty(&lock->l_lru)) {
+ ldlm_lock_remove_from_lru_nolock(lock);
+ ldlm_lock_add_to_lru_nolock(lock);
+ }
+ spin_unlock(&ns->ns_unused_lock);
+ EXIT;
+}
+
/* This used to have a 'strict' flag, which recovery would use to mark an
* in-use lock as needing-to-die. Lest I am ever tempted to put it back, I
* shall explain why it's gone: with the new hash table scheme, once you call
lock->l_readers++;
if (mode & (LCK_EX | LCK_CW | LCK_PW | LCK_GROUP))
lock->l_writers++;
- lock->l_last_used = cfs_time_current();
LDLM_LOCK_GET(lock);
LDLM_DEBUG(lock, "ldlm_lock_addref(%s)", ldlm_lockname[mode]);
}
!(lock->l_flags & LDLM_FL_NO_LRU)) {
/* If this is a client-side namespace and this was the last
* reference, put it on the LRU. */
- LASSERT(list_empty(&lock->l_lru));
- LASSERT(ns->ns_nr_unused >= 0);
- lock->l_last_used = cfs_time_current();
- spin_lock(&ns->ns_unused_lock);
- list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- ns->ns_nr_unused++;
- spin_unlock(&ns->ns_unused_lock);
+ ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
- /* Call ldlm_cancel_lru() only if EARLY_CANCEL is not supported
- * by the server, otherwise, it is done on enqueue. */
- if (!exp_connect_cancelset(lock->l_conn_export))
+ /* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
+ * are not supported by the server, otherwise, it is done on
+ * enqueue. */
+ if (!exp_connect_cancelset(lock->l_conn_export) &&
+ !exp_connect_lru_resize(lock->l_conn_export))
ldlm_cancel_lru(ns, 0, LDLM_ASYNC);
} else {
unlock_res_and_lock(lock);
!(lock->l_flags & LDLM_FL_LOCAL))
continue;
- if (flags & LDLM_FL_TEST_LOCK)
+ if (flags & LDLM_FL_TEST_LOCK) {
LDLM_LOCK_GET(lock);
- else
+ ldlm_lock_touch_in_lru(lock);
+ } else {
ldlm_lock_addref_internal_nolock(lock, mode);
+ }
return lock;
}
extern cfs_mem_cache_t *ldlm_resource_slab;
extern cfs_mem_cache_t *ldlm_lock_slab;
extern struct lustre_lock ldlm_handle_lock;
-extern struct list_head ldlm_namespace_list;
-extern struct semaphore ldlm_namespace_lock;
static struct semaphore ldlm_ref_sem;
static int ldlm_refcount;
if (lock->l_granted_mode == LCK_PW &&
!lock->l_readers && !lock->l_writers &&
cfs_time_after(cfs_time_current(),
- cfs_time_add(lock->l_last_used, cfs_time_seconds(10)))) {
+ cfs_time_add(lock->l_last_used,
+ cfs_time_seconds(10)))) {
unlock_res_and_lock(lock);
if (ldlm_bl_to_thread(ns, NULL, lock, 0))
ldlm_handle_bl_callback(ns, NULL, lock);
#endif
ENTRY;
- if (!list_empty(&ldlm_namespace_list)) {
+ if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
+ !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
CERROR("ldlm still has namespaces; clean these up first.\n");
- ldlm_dump_all_namespaces(D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
RETURN(-EBUSY);
}
int __init ldlm_init(void)
{
init_mutex(&ldlm_ref_sem);
- init_mutex(&ldlm_namespace_lock);
+ init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+ init_mutex(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
SLAB_HWCACHE_ALIGN);
extern cfs_proc_dir_entry_t *ldlm_ns_proc_dir;
#endif
-extern atomic_t ldlm_srv_namespace_nr;
-extern atomic_t ldlm_cli_namespace_nr;
-extern struct list_head ldlm_namespace_list;
-extern struct semaphore ldlm_namespace_lock;
-
#define avg(src, add) \
((src) = ((src) + (add)) / 2)
}
static struct ptlrpc_thread *ldlm_pools_thread;
-static struct shrinker *ldlm_pools_shrinker;
+static struct shrinker *ldlm_pools_srv_shrinker;
+static struct shrinker *ldlm_pools_cli_shrinker;
static struct completion ldlm_pools_comp;
void ldlm_pools_wakeup(void)
/* Cancel @nr locks from all namespaces (if possible). Returns number of
* cached locks after shrink is finished. All namespaces are asked to
* cancel approximately equal amount of locks. */
-int ldlm_pools_shrink(int nr, unsigned int gfp_mask)
+static int ldlm_pools_shrink(ldlm_side_t client, int nr,
+ unsigned int gfp_mask)
{
struct ldlm_namespace *ns;
int total = 0, cached = 0;
if (nr != 0 && !(gfp_mask & __GFP_FS))
return -1;
- CDEBUG(D_DLMTRACE, "request to shrink %d locks from all pools\n",
- nr);
- mutex_down(&ldlm_namespace_lock);
- list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain)
+ CDEBUG(D_DLMTRACE, "request to shrink %d %s locks from all pools\n",
+ nr, client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
+
+ if (down_trylock(ldlm_namespace_lock(client)))
+ return nr != 0 ? -1 : 0;
+
+ list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain)
total += ldlm_pool_granted(&ns->ns_pool);
if (nr == 0) {
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(client));
return total;
}
/* Check all namespaces. */
- list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) {
+ list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
struct ldlm_pool *pl = &ns->ns_pool;
int cancel, nr_locks;
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
- cancel = ldlm_pool_shrink(pl, cancel, gfp_mask);
+ ldlm_pool_shrink(pl, cancel, gfp_mask);
cached += ldlm_pool_granted(&ns->ns_pool);
}
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(client));
return cached;
}
-EXPORT_SYMBOL(ldlm_pools_shrink);
-void ldlm_pools_recalc(void)
+static int ldlm_pools_srv_shrink(int nr, unsigned int gfp_mask)
+{
+ return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER, nr, gfp_mask);
+}
+
+static int ldlm_pools_cli_shrink(int nr, unsigned int gfp_mask)
+{
+ return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT, nr, gfp_mask);
+}
+
+void ldlm_pools_recalc(ldlm_side_t client)
{
__u32 nr_l = 0, nr_p = 0, l;
struct ldlm_namespace *ns;
int rc, equal = 0;
/* Check all modest namespaces. */
- mutex_down(&ldlm_namespace_lock);
- list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) {
+ mutex_down(ldlm_namespace_lock(client));
+ list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
continue;
- if (ns->ns_client == LDLM_NAMESPACE_SERVER) {
+ if (client == LDLM_NAMESPACE_SERVER) {
l = ldlm_pool_granted(&ns->ns_pool);
if (l == 0)
l = 1;
}
/* The rest is given to greedy namespaces. */
- list_for_each_entry(ns, &ldlm_namespace_list, ns_list_chain) {
+ list_for_each_entry(ns, ldlm_namespace_list(client), ns_list_chain) {
if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
continue;
- if (ns->ns_client == LDLM_NAMESPACE_SERVER) {
+ if (client == LDLM_NAMESPACE_SERVER) {
if (equal) {
/* In the case 2/3 locks are eaten out by
* modest pools, we re-setup equal limit
* for _all_ pools. */
l = LDLM_POOL_HOST_L /
- atomic_read(&ldlm_srv_namespace_nr);
+ atomic_read(ldlm_namespace_nr(client));
} else {
/* All the rest of greedy pools will have
* all locks in equal parts.*/
l = (LDLM_POOL_HOST_L - nr_l) /
- (atomic_read(&ldlm_srv_namespace_nr) -
+ (atomic_read(ldlm_namespace_nr(client)) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
CERROR("%s: pool recalculation error "
"%d\n", ns->ns_pool.pl_name, rc);
}
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(client));
}
EXPORT_SYMBOL(ldlm_pools_recalc);
struct l_wait_info lwi;
/* Recal all pools on this tick. */
- ldlm_pools_recalc();
+ ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
+ ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
/* Wait until the next check time, or until we're
* stopped. */
ENTRY;
rc = ldlm_pools_thread_start(client);
- if (rc == 0)
- ldlm_pools_shrinker = set_shrinker(DEFAULT_SEEKS,
- ldlm_pools_shrink);
+ if (rc == 0) {
+ ldlm_pools_srv_shrinker = set_shrinker(DEFAULT_SEEKS,
+ ldlm_pools_srv_shrink);
+ ldlm_pools_cli_shrinker = set_shrinker(DEFAULT_SEEKS,
+ ldlm_pools_cli_shrink);
+ }
RETURN(rc);
}
EXPORT_SYMBOL(ldlm_pools_init);
void ldlm_pools_fini(void)
{
- if (ldlm_pools_shrinker != NULL) {
- remove_shrinker(ldlm_pools_shrinker);
- ldlm_pools_shrinker = NULL;
+ if (ldlm_pools_srv_shrinker != NULL) {
+ remove_shrinker(ldlm_pools_srv_shrinker);
+ ldlm_pools_srv_shrinker = NULL;
+ }
+ if (ldlm_pools_cli_shrinker != NULL) {
+ remove_shrinker(ldlm_pools_cli_shrinker);
+ ldlm_pools_cli_shrinker = NULL;
}
ldlm_pools_thread_stop();
}
}
EXPORT_SYMBOL(ldlm_pools_wakeup);
-void ldlm_pools_recalc(void)
+void ldlm_pools_recalc(ldlm_side_t client)
{
return;
}
!lock->l_readers && !lock->l_writers &&
!(lock->l_flags & LDLM_FL_LOCAL) &&
!(lock->l_flags & LDLM_FL_CBPENDING)) {
- lock->l_last_used = cfs_time_current();
- spin_lock(&ns->ns_unused_lock);
- LASSERT(ns->ns_nr_unused >= 0);
- list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- ns->ns_nr_unused++;
- spin_unlock(&ns->ns_unused_lock);
+ ldlm_lock_add_to_lru(lock);
lock->l_flags &= ~LDLM_FL_NO_LRU;
LDLM_DEBUG(lock, "join lock to lru");
count++;
atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
-struct semaphore ldlm_namespace_lock;
-struct list_head ldlm_namespace_list = CFS_LIST_HEAD_INIT(ldlm_namespace_list);
+
+struct semaphore ldlm_srv_namespace_lock;
+struct list_head ldlm_srv_namespace_list =
+ CFS_LIST_HEAD_INIT(ldlm_srv_namespace_list);
+
+struct semaphore ldlm_cli_namespace_lock;
+struct list_head ldlm_cli_namespace_list =
+ CFS_LIST_HEAD_INIT(ldlm_cli_namespace_list);
+
cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
static int ldlm_proc_dump_ns(struct file *file, const char *buffer,
unsigned long count, void *data)
{
- ldlm_dump_all_namespaces(D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
+ ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
RETURN(count);
}
#define ldlm_proc_namespace(ns) do {} while (0)
#endif /* LPROCFS */
-static atomic_t *ldlm_namespace_nr(ldlm_side_t client)
-{
- return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr;
-}
-
struct ldlm_namespace *ldlm_namespace_new(char *name, ldlm_side_t client,
ldlm_appetite_t apt)
{
spin_lock_init(&ns->ns_unused_lock);
ns->ns_connect_flags = 0;
- mutex_down(&ldlm_namespace_lock);
- list_add(&ns->ns_list_chain, &ldlm_namespace_list);
+ mutex_down(ldlm_namespace_lock(client));
+ list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
idx = atomic_read(ldlm_namespace_nr(client));
atomic_inc(ldlm_namespace_nr(client));
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(client));
ldlm_proc_namespace(ns);
RETURN(ns);
out_del:
- mutex_down(&ldlm_namespace_lock);
+ mutex_down(ldlm_namespace_lock(client));
list_del(&ns->ns_list_chain);
atomic_dec(ldlm_namespace_nr(client));
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(client));
out_hash:
POISON(ns->ns_hash, 0x5a, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
OBD_VFREE(ns->ns_hash, sizeof(*ns->ns_hash) * RES_HASH_SIZE);
if (!ns)
RETURN(ELDLM_OK);
- mutex_down(&ldlm_namespace_lock);
+ mutex_down(ldlm_namespace_lock(ns->ns_client));
list_del(&ns->ns_list_chain);
atomic_dec(ldlm_namespace_nr(ns->ns_client));
ldlm_pool_fini(&ns->ns_pool);
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(ns->ns_client));
/* At shutdown time, don't call the cancellation callback */
ldlm_namespace_cleanup(ns, 0);
desc->lr_name = res->lr_name;
}
-void ldlm_dump_all_namespaces(int level)
+void ldlm_dump_all_namespaces(int level, ldlm_side_t client)
{
struct list_head *tmp;
if (!((libcfs_debug | D_ERROR) & level))
return;
- mutex_down(&ldlm_namespace_lock);
+ mutex_down(ldlm_namespace_lock(client));
- list_for_each(tmp, &ldlm_namespace_list) {
+ list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
ldlm_namespace_dump(level, ns);
}
- mutex_up(&ldlm_namespace_lock);
+ mutex_up(ldlm_namespace_lock(client));
}
void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)