4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
23 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2012, Whamcloud, Inc.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * Author: liang@whamcloud.com
34 # define EXPORT_SYMTAB
36 #define DEBUG_SUBSYSTEM S_LNET
38 #include <libcfs/libcfs.h>
42 /** destroy cpu-partition lock, see libcfs_private.h for more detail */
44 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
46 LASSERT(pcl->pcl_locks != NULL);
47 LASSERT(!pcl->pcl_locked);
49 cfs_percpt_free(pcl->pcl_locks);
50 LIBCFS_FREE(pcl, sizeof(*pcl));
52 CFS_EXPORT_SYMBOL(cfs_percpt_lock_free);
55 * create cpu-partition lock, see libcfs_private.h for more detail.
57 * cpu-partition lock is designed for large-scale SMP system, so we need to
58 * reduce cacheline conflict as possible as we can, that's the
59 * reason we always allocate cacheline-aligned memory block.
61 struct cfs_percpt_lock *
62 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
64 struct cfs_percpt_lock *pcl;
68 /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
69 LIBCFS_ALLOC(pcl, sizeof(*pcl));
73 pcl->pcl_cptab = cptab;
74 pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
75 if (pcl->pcl_locks == NULL) {
76 LIBCFS_FREE(pcl, sizeof(*pcl));
80 cfs_percpt_for_each(lock, i, pcl->pcl_locks)
85 CFS_EXPORT_SYMBOL(cfs_percpt_lock_alloc);
88 * lock a CPU partition
90 * \a index != CFS_PERCPT_LOCK_EX
91 * hold private lock indexed by \a index
93 * \a index == CFS_PERCPT_LOCK_EX
94 * exclusively lock @pcl and nobody can take private lock
97 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
99 int ncpt = cfs_cpt_number(pcl->pcl_cptab);
102 LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
106 } else { /* serialize with exclusive lock */
107 while (pcl->pcl_locked)
111 if (likely(index != CFS_PERCPT_LOCK_EX)) {
112 spin_lock(pcl->pcl_locks[index]);
116 /* exclusive lock request */
117 for (i = 0; i < ncpt; i++) {
118 spin_lock(pcl->pcl_locks[i]);
120 LASSERT(!pcl->pcl_locked);
121 /* nobody should take private lock after this
122 * so I wouldn't starve for too long time */
127 CFS_EXPORT_SYMBOL(cfs_percpt_lock);
129 /** unlock a CPU partition */
131 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
133 int ncpt = cfs_cpt_number(pcl->pcl_cptab);
136 index = ncpt == 1 ? 0 : index;
138 if (likely(index != CFS_PERCPT_LOCK_EX)) {
139 spin_unlock(pcl->pcl_locks[index]);
143 for (i = ncpt - 1; i >= 0; i--) {
145 LASSERT(pcl->pcl_locked);
148 spin_unlock(pcl->pcl_locks[i]);
151 CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
153 #else /* !__KERNEL__ */
154 # ifdef HAVE_LIBPTHREAD
156 struct cfs_percpt_lock *
157 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
159 struct cfs_percpt_lock *pcl;
163 pthread_mutex_init(&pcl->pcl_mutex, NULL);
169 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
171 pthread_mutex_destroy(&pcl->pcl_mutex);
176 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int lock)
178 pthread_mutex_lock(&(pcl)->pcl_mutex);
182 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int lock)
184 pthread_mutex_unlock(&(pcl)->pcl_mutex);
187 # else /* !HAVE_LIBPTHREAD */
189 struct cfs_percpt_lock *
190 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
192 return ((struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
196 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
198 LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
202 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
204 LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
208 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
210 LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
213 # endif /* HAVE_LIBPTHREAD */
214 #endif /* __KERNEL__ */
216 /** free cpu-partition refcount */
218 cfs_percpt_atomic_free(cfs_atomic_t **refs)
220 cfs_percpt_free(refs);
222 CFS_EXPORT_SYMBOL(cfs_percpt_atomic_free);
224 /** allocate cpu-partition refcount with initial value @init_val */
226 cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
232 refs = cfs_percpt_alloc(cptab, sizeof(*ref));
236 cfs_percpt_for_each(ref, i, refs)
237 cfs_atomic_set(ref, init_val);
240 CFS_EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
242 /** return sum of cpu-partition refs */
244 cfs_percpt_atomic_summary(cfs_atomic_t **refs)
250 cfs_percpt_for_each(ref, i, refs)
251 val += cfs_atomic_read(ref);
255 CFS_EXPORT_SYMBOL(cfs_percpt_atomic_summary);