From 8cf860b866005c5907e8e7a0ded92cc481fdb6c3 Mon Sep 17 00:00:00 2001 From: Niu Yawei Date: Fri, 3 Jul 2015 05:49:58 -0400 Subject: [PATCH] LU-6775 ldlm: reduce mem footprint of ldlm_resource - Removing the unused lr_most_restr, reduced 4 bytes; - Allocating lr_itree only for LDLM_EXTENT resource, reduced 120 bytes; - Moving fields around to eliminate holes, eliminated 3 holes, reduced 4 bytes; - Making lr_contention_time and lr_lvb_inode into union, reduced 8 bytes; Reduced 136 bytes in total. Signed-off-by: Niu Yawei Change-Id: I232a2669d9f5eaee35eb9ff8313d86d4ac297bc1 Reviewed-on: http://review.whamcloud.com/15485 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Yang Sheng Reviewed-by: Andreas Dilger Reviewed-by: Oleg Drokin --- lustre/include/lustre_dlm.h | 37 +++++++++++++++++++++---------------- lustre/ldlm/ldlm_internal.h | 1 + lustre/ldlm/ldlm_lock.c | 3 --- lustre/ldlm/ldlm_lockd.c | 34 +++++++++++++++++++++++----------- lustre/ldlm/ldlm_resource.c | 37 +++++++++++++++++++++++++++---------- 5 files changed, 72 insertions(+), 40 deletions(-) diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 6a2bc6a..4839506 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -920,6 +920,9 @@ struct ldlm_resource { */ struct hlist_node lr_hash; + /** Reference count for this resource */ + atomic_t lr_refcount; + /** Spinlock to protect locks under this resource. */ spinlock_t lr_lock; @@ -936,39 +939,41 @@ struct ldlm_resource { struct list_head lr_waiting; /** @} */ - /* XXX No longer needed? Remove ASAP */ - ldlm_mode_t lr_most_restr; - - /** Type of locks this resource can hold. Only one type per resource. */ - ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ - /** Resource name */ struct ldlm_res_id lr_name; - /** Reference count for this resource */ - atomic_t lr_refcount; /** * Interval trees (only for extent locks) for all modes of this resource */ - struct ldlm_interval_tree lr_itree[LCK_MODE_NUM]; + struct ldlm_interval_tree *lr_itree; + + union { + /** + * When the resource was considered as contended, + * used only on server side. */ + cfs_time_t lr_contention_time; + /** + * Associated inode, used only on client side. + */ + struct inode *lr_lvb_inode; + }; + + /** Type of locks this resource can hold. Only one type per resource. */ + ldlm_type_t lr_type; /* LDLM_{PLAIN,EXTENT,FLOCK,IBITS} */ /** * Server-side-only lock value block elements. * To serialize lvbo_init. */ - struct mutex lr_lvb_mutex; int lr_lvb_len; - /** is lvb initialized ? */ - bool lr_lvb_initialized; + struct mutex lr_lvb_mutex; /** protected by lr_lock */ void *lr_lvb_data; + /** is lvb initialized ? */ + bool lr_lvb_initialized; - /** When the resource was considered as contended. */ - cfs_time_t lr_contention_time; /** List of references to this resource. For debugging. */ struct lu_ref lr_reference; - - struct inode *lr_lvb_inode; }; static inline bool ldlm_has_layout(struct ldlm_lock *lock) diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index e0e4e66..cdfbe9c 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -116,6 +116,7 @@ extern unsigned int ldlm_enqueue_min; /* ldlm_resource.c */ extern struct kmem_cache *ldlm_resource_slab; extern struct kmem_cache *ldlm_lock_slab; +extern struct kmem_cache *ldlm_interval_tree_slab; int ldlm_resource_putref_locked(struct ldlm_resource *res); void ldlm_resource_insert_lock_after(struct ldlm_lock *original, diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 7c47ca6..b3e914c 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1122,9 +1122,6 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list) else ldlm_resource_add_lock(res, &res->lr_granted, lock); - if (lock->l_granted_mode < res->lr_most_restr) - res->lr_most_restr = lock->l_granted_mode; - ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock); EXIT; } diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index ee22e75..cf55d08 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -3025,23 +3025,34 @@ int ldlm_init(void) ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL); - if (ldlm_lock_slab == NULL) { - kmem_cache_destroy(ldlm_resource_slab); - return -ENOMEM; - } + if (ldlm_lock_slab == NULL) + goto out_resource; ldlm_interval_slab = kmem_cache_create("interval_node", sizeof(struct ldlm_interval), 0, SLAB_HWCACHE_ALIGN, NULL); - if (ldlm_interval_slab == NULL) { - kmem_cache_destroy(ldlm_resource_slab); - kmem_cache_destroy(ldlm_lock_slab); - return -ENOMEM; - } + if (ldlm_interval_slab == NULL) + goto out_lock; + + ldlm_interval_tree_slab = kmem_cache_create("interval_tree", + sizeof(struct ldlm_interval_tree) * LCK_MODE_NUM, + 0, SLAB_HWCACHE_ALIGN, NULL); + if (ldlm_interval_tree_slab == NULL) + goto out_interval; + #if LUSTRE_TRACKS_LOCK_EXP_REFS - class_export_dump_hook = ldlm_dump_export_locks; + class_export_dump_hook = ldlm_dump_export_locks; #endif - return 0; + return 0; + +out_interval: + kmem_cache_destroy(ldlm_interval_slab); +out_lock: + kmem_cache_destroy(ldlm_lock_slab); +out_resource: + kmem_cache_destroy(ldlm_resource_slab); + + return -ENOMEM; } void ldlm_exit(void) @@ -3055,4 +3066,5 @@ void ldlm_exit(void) synchronize_rcu(); kmem_cache_destroy(ldlm_lock_slab); kmem_cache_destroy(ldlm_interval_slab); + kmem_cache_destroy(ldlm_interval_tree_slab); } diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 3dfde84..fd7b65d 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -46,6 +46,7 @@ #include "ldlm_internal.h" struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab; +struct kmem_cache *ldlm_interval_tree_slab; int ldlm_srv_namespace_nr = 0; int ldlm_cli_namespace_nr = 0; @@ -1025,7 +1026,7 @@ struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client) } /** Create and initialize new resource. */ -static struct ldlm_resource *ldlm_resource_new(void) +static struct ldlm_resource *ldlm_resource_new(ldlm_type_t type) { struct ldlm_resource *res; int idx; @@ -1034,17 +1035,25 @@ static struct ldlm_resource *ldlm_resource_new(void) if (res == NULL) return NULL; + if (type == LDLM_EXTENT) { + OBD_SLAB_ALLOC(res->lr_itree, ldlm_interval_tree_slab, + sizeof(*res->lr_itree) * LCK_MODE_NUM); + if (res->lr_itree == NULL) { + OBD_SLAB_FREE_PTR(res, ldlm_resource_slab); + return NULL; + } + /* Initialize interval trees for each lock mode. */ + for (idx = 0; idx < LCK_MODE_NUM; idx++) { + res->lr_itree[idx].lit_size = 0; + res->lr_itree[idx].lit_mode = 1 << idx; + res->lr_itree[idx].lit_root = NULL; + } + } + INIT_LIST_HEAD(&res->lr_granted); INIT_LIST_HEAD(&res->lr_converting); INIT_LIST_HEAD(&res->lr_waiting); - /* Initialize interval trees for each lock mode. */ - for (idx = 0; idx < LCK_MODE_NUM; idx++) { - res->lr_itree[idx].lit_size = 0; - res->lr_itree[idx].lit_mode = 1 << idx; - res->lr_itree[idx].lit_root = NULL; - } - atomic_set(&res->lr_refcount, 1); spin_lock_init(&res->lr_lock); lu_ref_init(&res->lr_reference); @@ -1093,14 +1102,13 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE, "type: %d\n", type); - res = ldlm_resource_new(); + res = ldlm_resource_new(type); if (res == NULL) return ERR_PTR(-ENOMEM); res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd); res->lr_name = *name; res->lr_type = type; - res->lr_most_restr = LCK_NL; cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL : @@ -1111,6 +1119,9 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent, cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); /* Clean lu_ref for failed resource. */ lu_ref_fini(&res->lr_reference); + if (res->lr_itree != NULL) + OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab, + sizeof(*res->lr_itree) * LCK_MODE_NUM); OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res); found: res = hlist_entry(hnode, struct ldlm_resource, lr_hash); @@ -1192,6 +1203,9 @@ int ldlm_resource_putref(struct ldlm_resource *res) cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1); if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) ns->ns_lvbo->lvbo_free(res); + if (res->lr_itree != NULL) + OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab, + sizeof(*res->lr_itree) * LCK_MODE_NUM); OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res); return 1; } @@ -1222,6 +1236,9 @@ int ldlm_resource_putref_locked(struct ldlm_resource *res) */ if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free) ns->ns_lvbo->lvbo_free(res); + if (res->lr_itree != NULL) + OBD_SLAB_FREE(res->lr_itree, ldlm_interval_tree_slab, + sizeof(*res->lr_itree) * LCK_MODE_NUM); OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res); cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1); -- 1.8.3.1