From: Isaac Huang Date: Wed, 27 Feb 2013 22:15:17 +0000 (-0700) Subject: LU-2884 libcfs: SMP optimizations cleanups X-Git-Tag: 2.3.63~33 X-Git-Url: https://git.whamcloud.com/gitweb?a=commitdiff_plain;h=7903fbc92af96ff5d5780fb89911dc8557f13ac8;p=fs%2Flustre-release.git LU-2884 libcfs: SMP optimizations cleanups Miscelaneous cleanups for the SMP optimizations code: - Fixed typos. - Fixed resource leak in lnet_create_locks(). - Fixed incorrect symbols in EXPORT_SYMBOL(). Signed-off-by: Isaac Huang Change-Id: I3b617367e5ed6b11ae327e477fc2c201c453e347 Reviewed-on: http://review.whamcloud.com/5547 Tested-by: Hudson Reviewed-by: Liang Zhen Tested-by: Maloo Reviewed-by: Doug Oucharek Reviewed-by: Oleg Drokin --- diff --git a/libcfs/include/libcfs/libcfs_cpu.h b/libcfs/include/libcfs/libcfs_cpu.h index f894423..d98a6fc 100644 --- a/libcfs/include/libcfs/libcfs_cpu.h +++ b/libcfs/include/libcfs/libcfs_cpu.h @@ -52,12 +52,12 @@ * cpu_npartitions=1: * core[0, 1, ... 7] = partition[0] * - * . User can also specifiy CPU partitions by string pattern + * . User can also specify CPU partitions by string pattern * * Examples: cpu_partitions="0[0,1], 1[2,3]" * cpu_partitions="N 0[0-3], 1[4-8]" * - * The first charactor "N" means following numbers are numa ID + * The first character "N" means following numbers are numa ID * * . NUMA allocators, CPU affinity threads are built over CPU partitions, * instead of HW CPUs or HW nodes. @@ -67,7 +67,7 @@ * configured by cpu_npartitions of the global cfs_cpt_table * * . If cpu_npartitions=1(all CPUs in one pool), lustre should work the - * same way as 2.2 or earlier verison + * same way as 2.2 or earlier versions * * Author: liang@whamcloud.com */ diff --git a/libcfs/libcfs/libcfs_cpu.c b/libcfs/libcfs/libcfs_cpu.c index 1a7ece28..3cb1636 100644 --- a/libcfs/libcfs/libcfs_cpu.c +++ b/libcfs/libcfs/libcfs_cpu.c @@ -40,7 +40,7 @@ #include -/** Global cpu partition table */ +/** Global CPU partition table */ struct cfs_cpt_table *cfs_cpt_table __read_mostly = NULL; EXPORT_SYMBOL(cfs_cpt_table); @@ -89,7 +89,7 @@ cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt) { return 1; } -EXPORT_SYMBOL(cfs_cpt_number); +EXPORT_SYMBOL(cfs_cpt_weight); int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt) @@ -175,7 +175,7 @@ cfs_cpt_of_cpu(struct cfs_cpt_table *cptab, int cpu) { return 0; } -EXPORT_SYMBOL(cfs_cpt_from_cpu); +EXPORT_SYMBOL(cfs_cpt_of_cpu); int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) diff --git a/libcfs/libcfs/linux/linux-cpu.c b/libcfs/libcfs/linux/linux-cpu.c index 4cb06d3..6bf0af5 100644 --- a/libcfs/libcfs/linux/linux-cpu.c +++ b/libcfs/libcfs/linux/linux-cpu.c @@ -55,7 +55,7 @@ CFS_MODULE_PARM(cpu_npartitions, "i", int, 0444, "# of CPU partitions"); * i.e: "0[0,1,2,3] 1[4,5,6,7]", number before bracket is CPU partition ID, * number in bracket is processor ID (core or HT) * - * i.e: "N 0[0,1] 1[2,3]" the first character 'n' means numbers in bracket + * i.e: "N 0[0,1] 1[2,3]" the first character 'N' means numbers in bracket * are NUMA node ID, number before bracket is CPU partition ID. * * NB: If user specified cpu_pattern, cpu_npartitions will be ignored @@ -665,7 +665,7 @@ cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt) return rc; } - /* don't need to set affinity baecause all online CPUs are covered */ + /* don't need to set affinity because all online CPUs are covered */ return 0; } EXPORT_SYMBOL(cfs_cpt_bind); diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index 8721253..803e0903 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -251,6 +251,22 @@ lnet_destroy_remote_nets_table(void) the_lnet.ln_remote_nets_hash = NULL; } +static void +lnet_destroy_locks(void) +{ + if (the_lnet.ln_res_lock != NULL) { + cfs_percpt_lock_free(the_lnet.ln_res_lock); + the_lnet.ln_res_lock = NULL; + } + + if (the_lnet.ln_net_lock != NULL) { + cfs_percpt_lock_free(the_lnet.ln_net_lock); + the_lnet.ln_net_lock = NULL; + } + + lnet_fini_locks(); +} + static int lnet_create_locks(void) { @@ -267,26 +283,10 @@ lnet_create_locks(void) return 0; failed: - lnet_fini_locks(); + lnet_destroy_locks(); return -ENOMEM; } -static void -lnet_destroy_locks(void) -{ - if (the_lnet.ln_res_lock != NULL) { - cfs_percpt_lock_free(the_lnet.ln_res_lock); - the_lnet.ln_res_lock = NULL; - } - - if (the_lnet.ln_net_lock != NULL) { - cfs_percpt_lock_free(the_lnet.ln_net_lock); - the_lnet.ln_net_lock = NULL; - } - - lnet_fini_locks(); -} - void lnet_assert_wire_constants (void) { /* Wire protocol assertions generated by 'wirecheck'