1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
5 * Author: Liang Zhen <liangzhen@clusterfs.com>
6 * Nikita Danilov <nikita@clusterfs.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 * Darwin porting library
24 * Make things easy to port
26 #define DEBUG_SUBSYSTEM S_LNET
28 #include <mach/mach_types.h>
30 #include <sys/malloc.h>
32 #include <libcfs/libcfs.h>
33 #include <libcfs/kp30.h>
34 #include "darwin-internal.h"
36 #if CFS_INDIVIDUAL_ZONE
37 extern zone_t zinit( vm_size_t, vm_size_t, vm_size_t, const char *);
38 extern void * zalloc(zone_t zone);
39 extern void *zalloc_noblock(zone_t zone);
40 extern void zfree(zone_t zone, void *addr);
43 struct list_head *z_nob; /* Pointer to z_link */
44 struct list_head z_link; /* Do NOT access it directly */
47 static struct cfs_zone_nob cfs_zone_nob;
48 static spinlock_t cfs_zone_guard;
50 cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
52 cfs_mem_cache_t *walker = NULL;
54 LASSERT(cfs_zone_nob.z_nob != NULL);
56 spin_lock(&cfs_zone_guard);
57 list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
58 if (!strcmp(walker->mc_name, name) && \
59 walker->mc_size == objsize)
62 spin_unlock(&cfs_zone_guard);
68 * our wrapper around kern/zalloc.c:zinit()
70 * Creates copy of name and calls zinit() to do real work. Needed because zone
71 * survives kext unloading, so that @name cannot be just static string
72 * embedded into kext image.
74 cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name)
76 cfs_mem_cache_t *mc = NULL;
79 MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
81 CERROR("cfs_mem_cache created fail!\n");
85 cname = _MALLOC(strlen(name) + 1, M_TEMP, M_WAITOK);
86 LASSERT(cname != NULL);
87 mc->mc_cache = zinit(objsize, (KMEM_MAX_ZONE * objsize), 0, strcpy(cname, name));
88 mc->mc_size = objsize;
89 CFS_INIT_LIST_HEAD(&mc->mc_link);
90 strncpy(mc->mc_name, name, 1 + strlen(name));
94 void mem_cache_destroy(cfs_mem_cache_t *mc)
97 * zone can NOT be destroyed after creating,
98 * so just keep it in list.
100 * We will not lost a zone after we unload
101 * libcfs, it can be found by from libcfs.zone
106 #define mem_cache_alloc(mc) zalloc((mc)->mc_cache)
108 # define mem_cache_alloc_nb(mc) zalloc((mc)->mc_cache)
110 /* XXX Liang: Tiger doesn't export zalloc_noblock() */
111 # define mem_cache_alloc_nb(mc) zalloc_noblock((mc)->mc_cache)
113 #define mem_cache_free(mc, p) zfree((mc)->mc_cache, p)
115 #else /* !CFS_INDIVIDUAL_ZONE */
118 mem_cache_find(const char *name, size_t objsize)
123 cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name)
125 cfs_mem_cache_t *mc = NULL;
127 MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
129 CERROR("cfs_mem_cache created fail!\n");
132 mc->mc_cache = OSMalloc_Tagalloc(name, OSMT_DEFAULT);
137 void mem_cache_destroy(cfs_mem_cache_t *mc)
139 OSMalloc_Tagfree(mc->mc_cache);
143 #define mem_cache_alloc(mc) OSMalloc((mc)->mc_size, (mc)->mc_cache)
144 #define mem_cache_alloc_nb(mc) OSMalloc_noblock((mc)->mc_size, (mc)->mc_cache)
145 #define mem_cache_free(mc, p) OSFree(p, (mc)->mc_size, (mc)->mc_cache)
147 #endif /* !CFS_INDIVIDUAL_ZONE */
150 cfs_mem_cache_create (const char *name,
151 size_t objsize, size_t off, unsigned long arg1)
155 mc = mem_cache_find(name, objsize);
158 mc = mem_cache_create(objsize, name);
162 int cfs_mem_cache_destroy (cfs_mem_cache_t *cachep)
164 mem_cache_destroy(cachep);
168 void *cfs_mem_cache_alloc (cfs_mem_cache_t *cachep, int flags)
172 /* zalloc_canblock() is not exported... Emulate it. */
173 if (flags & CFS_ALLOC_ATOMIC) {
174 result = (void *)mem_cache_alloc_nb(cachep);
176 LASSERT(get_preemption_level() == 0);
177 result = (void *)mem_cache_alloc(cachep);
179 if (result != NULL && (flags & CFS_ALLOC_ZERO))
180 memset(result, 0, cachep->mc_size);
185 void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp)
187 mem_cache_free(cachep, objp);
190 /* ---------------------------------------------------------------------------
193 * --------------------------------------------------------------------------- */
199 static unsigned int raw_pages = 0;
200 static cfs_mem_cache_t *raw_page_cache = NULL;
202 static struct xnu_page_ops raw_page_ops;
203 static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = {
204 [XNU_PAGE_RAW] = &raw_page_ops
207 #if defined(LIBCFS_DEBUG)
208 static int page_type_is_valid(cfs_page_t *page)
210 LASSERT(page != NULL);
211 return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
214 static int page_is_raw(cfs_page_t *page)
216 return page->type == XNU_PAGE_RAW;
220 static struct xnu_raw_page *as_raw(cfs_page_t *page)
222 LASSERT(page_is_raw(page));
223 return list_entry(page, struct xnu_raw_page, header);
226 static void *raw_page_address(cfs_page_t *pg)
228 return (void *)as_raw(pg)->virtual;
231 static void *raw_page_map(cfs_page_t *pg)
233 return (void *)as_raw(pg)->virtual;
236 static void raw_page_unmap(cfs_page_t *pg)
240 static struct xnu_page_ops raw_page_ops = {
241 .page_map = raw_page_map,
242 .page_unmap = raw_page_unmap,
243 .page_address = raw_page_address
246 extern int get_preemption_level(void);
248 struct list_head page_death_row;
249 spinlock_t page_death_row_phylax;
251 static void raw_page_finish(struct xnu_raw_page *pg)
254 if (pg->virtual != NULL)
255 cfs_mem_cache_free(raw_page_cache, pg->virtual);
259 void raw_page_death_row_clean(void)
261 struct xnu_raw_page *pg;
263 spin_lock(&page_death_row_phylax);
264 while (!list_empty(&page_death_row)) {
265 pg = container_of(page_death_row.next,
266 struct xnu_raw_page, link);
268 spin_unlock(&page_death_row_phylax);
270 spin_lock(&page_death_row_phylax);
272 spin_unlock(&page_death_row_phylax);
276 void free_raw_page(struct xnu_raw_page *pg)
278 if (!atomic_dec_and_test(&pg->count))
281 * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
282 * block. (raw_page_done()->upl_abort() can block too) On the other
283 * hand, cfs_free_page() may be called in non-blockable context. To
284 * work around this, park pages on global list when cannot block.
286 if (get_preemption_level() > 0) {
287 spin_lock(&page_death_row_phylax);
288 list_add(&pg->link, &page_death_row);
289 spin_unlock(&page_death_row_phylax);
292 raw_page_death_row_clean();
296 cfs_page_t *cfs_alloc_page(u_int32_t flags)
298 struct xnu_raw_page *page;
301 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
302 * from here: this will lead to infinite recursion.
305 page = cfs_alloc(sizeof *page, flags);
307 page->virtual = cfs_mem_cache_alloc(raw_page_cache, flags);
308 if (page->virtual != NULL) {
310 page->header.type = XNU_PAGE_RAW;
311 atomic_set(&page->count, 1);
317 return page != NULL ? &page->header : NULL;
320 void cfs_free_page(cfs_page_t *pages)
322 free_raw_page(as_raw(pages));
325 void cfs_get_page(cfs_page_t *p)
327 atomic_inc(&as_raw(p)->count);
330 int cfs_put_page_testzero(cfs_page_t *p)
332 return atomic_dec_and_test(&as_raw(p)->count);
335 int cfs_page_count(cfs_page_t *p)
337 return atomic_read(&as_raw(p)->count);
341 * Generic page operations
344 void *cfs_page_address(cfs_page_t *pg)
347 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
348 * from here: this will lead to infinite recursion.
350 LASSERT(page_type_is_valid(pg));
351 return page_ops[pg->type]->page_address(pg);
354 void *cfs_kmap(cfs_page_t *pg)
356 LASSERT(page_type_is_valid(pg));
357 return page_ops[pg->type]->page_map(pg);
360 void cfs_kunmap(cfs_page_t *pg)
362 LASSERT(page_type_is_valid(pg));
363 return page_ops[pg->type]->page_unmap(pg);
366 void xnu_page_ops_register(int type, struct xnu_page_ops *ops)
368 LASSERT(0 <= type && type < XNU_PAGE_NTYPES);
369 LASSERT(ops != NULL);
370 LASSERT(page_ops[type] == NULL);
372 page_ops[type] = ops;
375 void xnu_page_ops_unregister(int type)
377 LASSERT(0 <= type && type < XNU_PAGE_NTYPES);
378 LASSERT(page_ops[type] != NULL);
380 page_ops[type] = NULL;
384 * Portable memory allocator API
386 #ifdef HAVE_GET_PREEMPTION_LEVEL
387 extern int get_preemption_level(void);
389 #define get_preemption_level() (0)
392 void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
397 if (flags & CFS_ALLOC_ATOMIC) {
400 LASSERT(get_preemption_level() == 0);
404 if (flags & CFS_ALLOC_ZERO)
407 return _MALLOC(nr_bytes, M_TEMP, mflags);
410 void cfs_free(void *addr)
412 return _FREE(addr, M_TEMP);
415 void *cfs_alloc_large(size_t nr_bytes)
417 LASSERT(get_preemption_level() == 0);
418 return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
421 void cfs_free_large(void *addr)
423 LASSERT(get_preemption_level() == 0);
424 return _FREE(addr, M_TEMP);
428 * Lookup cfs_zone_nob by sysctl.zone, if it cannot be
429 * found (first load of * libcfs since boot), allocate
430 * sysctl libcfs.zone.
432 int cfs_mem_init(void)
434 #if CFS_INDIVIDUAL_ZONE
438 len = sizeof(struct cfs_zone_nob);
439 rc = sysctlbyname("libcfs.zone",
440 (void *)&cfs_zone_nob, &len, NULL, 0);
442 /* zone_nob is not register in libcfs_sysctl */
443 struct cfs_zone_nob *nob;
444 struct sysctl_oid *oid;
446 assert(cfs_sysctl_isvalid());
448 nob = _MALLOC(sizeof(struct cfs_zone_nob),
449 M_TEMP, M_WAITOK | M_ZERO);
450 CFS_INIT_LIST_HEAD(&nob->z_link);
451 nob->z_nob = &nob->z_link;
452 oid = cfs_alloc_sysctl_struct(NULL, OID_AUTO, CTLFLAG_RD | CTLFLAG_KERN,
453 "zone", nob, sizeof(struct cfs_zone_nob));
458 sysctl_register_oid(oid);
460 cfs_zone_nob.z_nob = nob->z_nob;
462 spin_lock_init(&cfs_zone_guard);
464 CFS_INIT_LIST_HEAD(&page_death_row);
465 spin_lock_init(&page_death_row_phylax);
466 raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
470 void cfs_mem_fini(void)
472 raw_page_death_row_clean();
473 spin_lock_done(&page_death_row_phylax);
474 cfs_mem_cache_destroy(raw_page_cache);
476 #if CFS_INDIVIDUAL_ZONE
477 cfs_zone_nob.z_nob = NULL;
478 spin_lock_done(&cfs_zone_guard);