4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <libcfs/libcfs.h>
42 struct kmem_cache *cfs_page_t_slab;
43 struct kmem_cache *cfs_page_p_slab;
45 struct page *virt_to_page(void *addr)
48 pg = kmem_cache_alloc(cfs_page_t_slab, 0);
55 memset(pg, 0, sizeof(struct page));
56 pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
58 cfs_atomic_set(&pg->count, 1);
59 set_bit(PG_virt, &(pg->flags));
66 * To allocate the struct page and also 1 page of memory
69 * flags: the allocation options
72 * pointer to the struct page strcture in success or
73 * NULL in failure case
79 cfs_atomic_t libcfs_total_pages;
81 struct page *alloc_page(int flags)
84 pg = kmem_cache_alloc(cfs_page_t_slab, 0);
91 memset(pg, 0, sizeof(struct page));
92 pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
93 cfs_atomic_set(&pg->count, 1);
96 if (cfs_is_flag_set(flags, __GFP_ZERO))
97 memset(pg->addr, 0, PAGE_CACHE_SIZE);
98 cfs_atomic_inc(&libcfs_total_pages);
100 cfs_enter_debugger();
101 kmem_cache_free(cfs_page_t_slab, pg);
110 * To free the struct page including the page
113 * pg: pointer to the struct page strcture
121 void __free_page(struct page *pg)
124 ASSERT(pg->addr != NULL);
125 ASSERT(cfs_atomic_read(&pg->count) <= 1);
127 if (!test_bit(PG_virt, &pg->flags)) {
128 kmem_cache_free(cfs_page_p_slab, pg->addr);
129 cfs_atomic_dec(&libcfs_total_pages);
131 cfs_enter_debugger();
133 kmem_cache_free(cfs_page_t_slab, pg);
136 int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
138 KdPrint(("kmem_is_in_cache: not implemented. (should maintain a"
139 "chain to keep all allocations traced.)\n"));
145 * To allocate memory from system pool
148 * nr_bytes: length in bytes of the requested buffer
149 * flags: flags indiction
152 * NULL: if there's no enough memory space in system
153 * the address of the allocated memory in success.
156 * This operation can be treated as atomic.
160 kmalloc(size_t nr_bytes, u_int32_t flags)
164 /* Ignore the flags: always allcoate from NonPagedPool */
165 ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
166 if (ptr != NULL && (flags & __GFP_ZERO))
167 memset(ptr, 0, nr_bytes);
170 cfs_enter_debugger();
177 * To free the sepcified memory to system pool
180 * addr: pointer to the buffer to be freed
186 * This operation can be treated as atomic.
197 * To allocate large block of memory from system pool
200 * nr_bytes: length in bytes of the requested buffer
203 * NULL: if there's no enough memory space in system
204 * the address of the allocated memory in success.
211 vmalloc(size_t nr_bytes)
213 return kmalloc(nr_bytes, 0);
218 * To free the sepcified memory to system pool
221 * addr: pointer to the buffer to be freed
230 void vfree(void *addr)
238 * To create a SLAB cache
241 * name: name string of the SLAB cache to be created
242 * size: size in bytes of SLAB entry buffer
243 * offset: offset in the page
244 * flags: SLAB creation flags
247 * The poitner of cfs_memory_cache structure in success.
248 * NULL pointer in failure case.
251 * 1, offset won't be used here.
252 * 2, it could be better to induce a lock to protect the access of the
253 * SLAB structure on SMP if there's not outside lock protection.
254 * 3, parameters C/D are removed.
257 struct kmem_cache *kmem_cache_create(const char *name, size_t size,
258 size_t offset, unsigned long flags,
261 struct kmem_cache *kmc = NULL;
263 /* The name of the SLAB could not exceed 20 chars */
265 if (name && strlen(name) >= 20)
268 /* Allocate and initialize the SLAB strcture */
270 kmc = kmalloc(sizeof(struct kmem_cache), 0);
275 memset(kmc, 0, sizeof(struct kmem_cache));
279 strcpy(&kmc->name[0], name);
282 /* Initialize the corresponding LookAside list */
284 ExInitializeNPagedLookasideList(
300 * To destroy the unused SLAB cache
303 * kmc: the SLAB cache to be destroied.
306 * 0: in success case.
307 * 1: in failure case.
313 kmem_cache_destroy(struct kmem_cache *kmc)
317 ExDeleteNPagedLookasideList(&(kmc->npll));
326 * To allocate an object (LookAside entry) from the SLAB
329 * kmc: the SLAB cache to be allocated from.
330 * flags: flags for allocation options
333 * object buffer address: in success case.
334 * NULL: in failure case.
340 void *kmem_cache_alloc(struct kmem_cache *kmc, int flags)
344 buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
351 * To free an object (LookAside entry) to the SLAB cache
354 * kmc: the SLAB cache to be freed to.
355 * buf: the pointer to the object to be freed.
364 void kmem_cache_free(struct kmem_cache *kmc, void *buf)
366 ExFreeToNPagedLookasideList(&(kmc->npll), buf);
369 spinlock_t shrinker_guard = {0};
370 CFS_LIST_HEAD(shrinker_hdr);
371 cfs_timer_t shrinker_timer = {0};
373 struct shrinker *set_shrinker(int seeks, shrink_callback cb)
375 struct shrinker *s = (struct shrinker *)
376 kmalloc(sizeof(struct shrinker), __GFP_ZERO);
381 spin_lock(&shrinker_guard);
382 cfs_list_add(&s->list, &shrinker_hdr);
383 spin_unlock(&shrinker_guard);
389 void remove_shrinker(struct shrinker *s)
391 struct shrinker *tmp;
392 spin_lock(&shrinker_guard);
394 cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
395 struct shrinker, list) {
397 cfs_list_del(&tmp->list);
402 cfs_list_del(&s->list);
404 spin_unlock(&shrinker_guard);
408 /* time ut test proc */
409 void shrinker_timer_proc(ulong_ptr_t arg)
412 spin_lock(&shrinker_guard);
414 cfs_list_for_each_entry_typed(s, &shrinker_hdr,
415 struct shrinker, list) {
416 s->cb(s->nr, __GFP_FS);
418 spin_unlock(&shrinker_guard);
419 cfs_timer_arm(&shrinker_timer, 300);
422 int start_shrinker_timer()
424 /* initialize shriner timer */
425 cfs_timer_init(&shrinker_timer, shrinker_timer_proc, NULL);
427 /* start the timer to trigger in 5 minutes */
428 cfs_timer_arm(&shrinker_timer, 300);
433 void stop_shrinker_timer()
435 /* cancel the timer */
436 cfs_timer_disarm(&shrinker_timer);
437 cfs_timer_done(&shrinker_timer);