4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
23 /* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, Intel Corporation.
27 * This file is part of Lustre, http://www.lustre.org/
28 * Lustre is a trademark of Sun Microsystems, Inc.
30 * Author: liang@whamcloud.com
33 #define DEBUG_SUBSYSTEM S_LNET
35 #include <libcfs/libcfs.h>
39 /** destroy cpu-partition lock, see libcfs_private.h for more detail */
41 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
43 LASSERT(pcl->pcl_locks != NULL);
44 LASSERT(!pcl->pcl_locked);
46 cfs_percpt_free(pcl->pcl_locks);
47 LIBCFS_FREE(pcl, sizeof(*pcl));
49 CFS_EXPORT_SYMBOL(cfs_percpt_lock_free);
52 * create cpu-partition lock, see libcfs_private.h for more detail.
54 * cpu-partition lock is designed for large-scale SMP system, so we need to
55 * reduce cacheline conflict as possible as we can, that's the
56 * reason we always allocate cacheline-aligned memory block.
58 struct cfs_percpt_lock *
59 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
61 struct cfs_percpt_lock *pcl;
65 /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
66 LIBCFS_ALLOC(pcl, sizeof(*pcl));
70 pcl->pcl_cptab = cptab;
71 pcl->pcl_locks = cfs_percpt_alloc(cptab, sizeof(*lock));
72 if (pcl->pcl_locks == NULL) {
73 LIBCFS_FREE(pcl, sizeof(*pcl));
77 cfs_percpt_for_each(lock, i, pcl->pcl_locks)
82 CFS_EXPORT_SYMBOL(cfs_percpt_lock_alloc);
85 * lock a CPU partition
87 * \a index != CFS_PERCPT_LOCK_EX
88 * hold private lock indexed by \a index
90 * \a index == CFS_PERCPT_LOCK_EX
91 * exclusively lock @pcl and nobody can take private lock
94 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
96 int ncpt = cfs_cpt_number(pcl->pcl_cptab);
99 LASSERT(index >= CFS_PERCPT_LOCK_EX && index < ncpt);
103 } else { /* serialize with exclusive lock */
104 while (pcl->pcl_locked)
108 if (likely(index != CFS_PERCPT_LOCK_EX)) {
109 spin_lock(pcl->pcl_locks[index]);
113 /* exclusive lock request */
114 for (i = 0; i < ncpt; i++) {
115 spin_lock(pcl->pcl_locks[i]);
117 LASSERT(!pcl->pcl_locked);
118 /* nobody should take private lock after this
119 * so I wouldn't starve for too long time */
124 CFS_EXPORT_SYMBOL(cfs_percpt_lock);
126 /** unlock a CPU partition */
128 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
130 int ncpt = cfs_cpt_number(pcl->pcl_cptab);
133 index = ncpt == 1 ? 0 : index;
135 if (likely(index != CFS_PERCPT_LOCK_EX)) {
136 spin_unlock(pcl->pcl_locks[index]);
140 for (i = ncpt - 1; i >= 0; i--) {
142 LASSERT(pcl->pcl_locked);
145 spin_unlock(pcl->pcl_locks[i]);
148 CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
150 #else /* !__KERNEL__ */
151 # ifdef HAVE_LIBPTHREAD
153 struct cfs_percpt_lock *
154 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
156 struct cfs_percpt_lock *pcl;
160 pthread_mutex_init(&pcl->pcl_mutex, NULL);
166 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
168 pthread_mutex_destroy(&pcl->pcl_mutex);
173 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int lock)
175 pthread_mutex_lock(&(pcl)->pcl_mutex);
179 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int lock)
181 pthread_mutex_unlock(&(pcl)->pcl_mutex);
184 # else /* !HAVE_LIBPTHREAD */
186 struct cfs_percpt_lock *
187 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
189 return ((struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
193 cfs_percpt_lock_free(struct cfs_percpt_lock *pcl)
195 LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
199 cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
201 LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
205 cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
207 LASSERT(pcl == (struct cfs_percpt_lock *) &CFS_PERCPT_LOCK_MAGIC);
210 # endif /* HAVE_LIBPTHREAD */
211 #endif /* __KERNEL__ */
213 /** free cpu-partition refcount */
215 cfs_percpt_atomic_free(cfs_atomic_t **refs)
217 cfs_percpt_free(refs);
219 CFS_EXPORT_SYMBOL(cfs_percpt_atomic_free);
221 /** allocate cpu-partition refcount with initial value @init_val */
223 cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
229 refs = cfs_percpt_alloc(cptab, sizeof(*ref));
233 cfs_percpt_for_each(ref, i, refs)
234 cfs_atomic_set(ref, init_val);
237 CFS_EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
239 /** return sum of cpu-partition refs */
241 cfs_percpt_atomic_summary(cfs_atomic_t **refs)
247 cfs_percpt_for_each(ref, i, refs)
248 val += cfs_atomic_read(ref);
252 CFS_EXPORT_SYMBOL(cfs_percpt_atomic_summary);