1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <libcfs/libcfs.h>
42 cfs_mem_cache_t *cfs_page_t_slab = NULL;
43 cfs_mem_cache_t *cfs_page_p_slab = NULL;
45 cfs_page_t * virt_to_page(void * addr)
48 pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
55 memset(pg, 0, sizeof(cfs_page_t));
56 pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
58 cfs_atomic_set(&pg->count, 1);
59 cfs_set_bit(PG_virt, &(pg->flags));
66 * To allocate the cfs_page_t and also 1 page of memory
69 * flags: the allocation options
72 * pointer to the cfs_page_t strcture in success or
73 * NULL in failure case
79 cfs_atomic_t libcfs_total_pages;
81 cfs_page_t * cfs_alloc_page(int flags)
84 pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
91 memset(pg, 0, sizeof(cfs_page_t));
92 pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0);
93 cfs_atomic_set(&pg->count, 1);
96 if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
97 memset(pg->addr, 0, CFS_PAGE_SIZE);
99 cfs_atomic_inc(&libcfs_total_pages);
101 cfs_enter_debugger();
102 cfs_mem_cache_free(cfs_page_t_slab, pg);
111 * To free the cfs_page_t including the page
114 * pg: pointer to the cfs_page_t strcture
122 void cfs_free_page(cfs_page_t *pg)
125 ASSERT(pg->addr != NULL);
126 ASSERT(cfs_atomic_read(&pg->count) <= 1);
128 if (!cfs_test_bit(PG_virt, &pg->flags)) {
129 cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
130 cfs_atomic_dec(&libcfs_total_pages);
132 cfs_enter_debugger();
134 cfs_mem_cache_free(cfs_page_t_slab, pg);
137 cfs_page_t *cfs_alloc_pages(unsigned int flags, unsigned int order)
140 pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
143 cfs_enter_debugger();
147 memset(pg, 0, sizeof(cfs_page_t));
148 pg->addr = cfs_alloc((CFS_PAGE_SIZE << order),0);
149 cfs_atomic_set(&pg->count, 1);
152 if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
153 memset(pg->addr, 0, CFS_PAGE_SIZE << order);
155 cfs_atomic_add(1 << order, &libcfs_total_pages);
157 cfs_enter_debugger();
158 cfs_mem_cache_free(cfs_page_t_slab, pg);
165 void __cfs_free_pages(cfs_page_t *pg, unsigned int order)
168 ASSERT(pg->addr != NULL);
169 ASSERT(cfs_atomic_read(&pg->count) <= 1);
171 cfs_atomic_sub(1 << order, &libcfs_total_pages);
173 cfs_mem_cache_free(cfs_page_t_slab, pg);
176 int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
178 KdPrint(("cfs_mem_is_in_cache: not implemented. (should maintain a"
179 "chain to keep all allocations traced.)\n"));
185 * To allocate memory from system pool
188 * nr_bytes: length in bytes of the requested buffer
189 * flags: flags indiction
192 * NULL: if there's no enough memory space in system
193 * the address of the allocated memory in success.
196 * This operation can be treated as atomic.
200 cfs_alloc(size_t nr_bytes, u_int32_t flags)
204 /* Ignore the flags: always allcoate from NonPagedPool */
205 ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
206 if (ptr != NULL && (flags & CFS_ALLOC_ZERO)) {
207 memset(ptr, 0, nr_bytes);
211 cfs_enter_debugger();
219 * To free the sepcified memory to system pool
222 * addr: pointer to the buffer to be freed
228 * This operation can be treated as atomic.
239 * To allocate large block of memory from system pool
242 * nr_bytes: length in bytes of the requested buffer
245 * NULL: if there's no enough memory space in system
246 * the address of the allocated memory in success.
253 cfs_alloc_large(size_t nr_bytes)
255 return cfs_alloc(nr_bytes, 0);
260 * To free the sepcified memory to system pool
263 * addr: pointer to the buffer to be freed
273 cfs_free_large(void *addr)
280 * cfs_mem_cache_create
281 * To create a SLAB cache
284 * name: name string of the SLAB cache to be created
285 * size: size in bytes of SLAB entry buffer
286 * offset: offset in the page
287 * flags: SLAB creation flags
290 * The poitner of cfs_memory_cache structure in success.
291 * NULL pointer in failure case.
294 * 1, offset won't be used here.
295 * 2, it could be better to induce a lock to protect the access of the
296 * SLAB structure on SMP if there's not outside lock protection.
297 * 3, parameters C/D are removed.
301 cfs_mem_cache_create(
308 cfs_mem_cache_t * kmc = NULL;
310 /* The name of the SLAB could not exceed 20 chars */
312 if (name && strlen(name) >= 20) {
316 /* Allocate and initialize the SLAB strcture */
318 kmc = cfs_alloc (sizeof(cfs_mem_cache_t), 0);
324 memset(kmc, 0, sizeof(cfs_mem_cache_t));
328 strcpy(&kmc->name[0], name);
331 /* Initialize the corresponding LookAside list */
333 ExInitializeNPagedLookasideList(
348 * cfs_mem_cache_destroy
349 * To destroy the unused SLAB cache
352 * kmc: the SLAB cache to be destroied.
355 * 0: in success case.
356 * 1: in failure case.
362 int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc)
366 ExDeleteNPagedLookasideList(&(kmc->npll));
374 * cfs_mem_cache_alloc
375 * To allocate an object (LookAside entry) from the SLAB
378 * kmc: the SLAB cache to be allocated from.
379 * flags: flags for allocation options
382 * object buffer address: in success case.
383 * NULL: in failure case.
389 void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags)
393 buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
400 * To free an object (LookAside entry) to the SLAB cache
403 * kmc: the SLAB cache to be freed to.
404 * buf: the pointer to the object to be freed.
413 void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
415 ExFreeToNPagedLookasideList(&(kmc->npll), buf);
418 cfs_spinlock_t shrinker_guard = {0};
419 CFS_LIST_HEAD(shrinker_hdr);
420 cfs_timer_t shrinker_timer = {0};
422 struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
424 struct cfs_shrinker * s = (struct cfs_shrinker *)
425 cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
430 cfs_spin_lock(&shrinker_guard);
431 cfs_list_add(&s->list, &shrinker_hdr);
432 cfs_spin_unlock(&shrinker_guard);
438 void cfs_remove_shrinker(struct cfs_shrinker *s)
440 struct cfs_shrinker *tmp;
441 cfs_spin_lock(&shrinker_guard);
443 cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
444 struct cfs_shrinker, list) {
446 cfs_list_del(&tmp->list);
451 cfs_list_del(&s->list);
453 cfs_spin_unlock(&shrinker_guard);
457 /* time ut test proc */
458 void shrinker_timer_proc(ulong_ptr_t arg)
460 struct cfs_shrinker *s;
461 cfs_spin_lock(&shrinker_guard);
463 cfs_list_for_each_entry_typed(s, &shrinker_hdr,
464 struct cfs_shrinker, list) {
465 s->cb(s->nr, __GFP_FS);
467 cfs_spin_unlock(&shrinker_guard);
468 cfs_timer_arm(&shrinker_timer, 300);
471 int start_shrinker_timer()
473 /* initialize shriner timer */
474 cfs_timer_init(&shrinker_timer, shrinker_timer_proc, NULL);
476 /* start the timer to trigger in 5 minutes */
477 cfs_timer_arm(&shrinker_timer, 300);
482 void stop_shrinker_timer()
484 /* cancel the timer */
485 cfs_timer_disarm(&shrinker_timer);
486 cfs_timer_done(&shrinker_timer);