1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
38 * lustre/obdclass/lu_object.c
41 * These are the only exported functions, they provide some generic
42 * infrastructure for managing object devices
44 * Author: Nikita Danilov <nikita.danilov@sun.com>
47 #define DEBUG_SUBSYSTEM S_CLASS
49 # define EXPORT_SYMTAB
52 #include <libcfs/libcfs.h>
55 # include <linux/module.h>
59 #include <libcfs/libcfs_hash.h>
60 #include <obd_class.h>
61 #include <obd_support.h>
62 #include <lustre_disk.h>
63 #include <lustre_fid.h>
64 #include <lu_object.h>
65 #include <libcfs/list.h>
66 /* lu_time_global_{init,fini}() */
69 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
72 * Decrease reference counter on object. If last reference is freed, return
73 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
74 * case, free object immediately.
76 void lu_object_put(const struct lu_env *env, struct lu_object *o)
78 struct lu_site_bkt_data *bkt;
79 struct lu_object_header *top;
81 struct lu_object *orig;
85 site = o->lo_dev->ld_site;
88 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
89 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
91 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
92 if (lu_object_is_dying(top)) {
95 * somebody may be waiting for this, currently only
96 * used for cl_object, see cl_object_put_last().
98 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
103 LASSERT(bkt->lsb_busy > 0);
106 * When last reference is released, iterate over object
107 * layers, and notify them that object is no longer busy.
109 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
110 if (o->lo_ops->loo_object_release != NULL)
111 o->lo_ops->loo_object_release(env, o);
114 if (!lu_object_is_dying(top)) {
115 LASSERT(cfs_list_empty(&top->loh_lru));
116 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
117 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
122 * If object is dying (will not be cached), removed it
123 * from hash table and LRU.
125 * This is done with hash table and LRU lists locked. As the only
126 * way to acquire first reference to previously unreferenced
127 * object is through hash-table lookup (lu_object_find()),
128 * or LRU scanning (lu_site_purge()), that are done under hash-table
129 * and LRU lock, no race with concurrent object lookup is possible
130 * and we can safely destroy object below.
132 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
133 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
135 * Object was already removed from hash and lru above, can
138 lu_object_free(env, orig);
140 EXPORT_SYMBOL(lu_object_put);
143 * Allocate new object.
145 * This follows object creation protocol, described in the comment within
146 * struct lu_device_operations definition.
148 static struct lu_object *lu_object_alloc(const struct lu_env *env,
149 struct lu_device *dev,
150 const struct lu_fid *f,
151 const struct lu_object_conf *conf)
153 struct lu_object *scan;
154 struct lu_object *top;
161 * Create top-level object slice. This will also create
164 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
166 RETURN(ERR_PTR(-ENOMEM));
168 * This is the only place where object fid is assigned. It's constant
171 LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
172 top->lo_header->loh_fid = *f;
173 layers = &top->lo_header->loh_layers;
176 * Call ->loo_object_init() repeatedly, until no more new
177 * object slices are created.
180 cfs_list_for_each_entry(scan, layers, lo_linkage) {
181 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
184 scan->lo_header = top->lo_header;
185 result = scan->lo_ops->loo_object_init(env, scan, conf);
187 lu_object_free(env, top);
188 RETURN(ERR_PTR(result));
190 scan->lo_flags |= LU_OBJECT_ALLOCATED;
194 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
195 if (scan->lo_ops->loo_object_start != NULL) {
196 result = scan->lo_ops->loo_object_start(env, scan);
198 lu_object_free(env, top);
199 RETURN(ERR_PTR(result));
204 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
211 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
213 struct lu_site_bkt_data *bkt;
214 struct lu_site *site;
215 struct lu_object *scan;
219 site = o->lo_dev->ld_site;
220 layers = &o->lo_header->loh_layers;
221 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
223 * First call ->loo_object_delete() method to release all resources.
225 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
226 if (scan->lo_ops->loo_object_delete != NULL)
227 scan->lo_ops->loo_object_delete(env, scan);
231 * Then, splice object layers into stand-alone list, and call
232 * ->loo_object_free() on all layers to free memory. Splice is
233 * necessary, because lu_object_header is freed together with the
236 CFS_INIT_LIST_HEAD(&splice);
237 cfs_list_splice_init(layers, &splice);
238 while (!cfs_list_empty(&splice)) {
240 * Free layers in bottom-to-top order, so that object header
241 * lives as long as possible and ->loo_object_free() methods
242 * can look at its contents.
244 o = container_of0(splice.prev, struct lu_object, lo_linkage);
245 cfs_list_del_init(&o->lo_linkage);
246 LASSERT(o->lo_ops->loo_object_free != NULL);
247 o->lo_ops->loo_object_free(env, o);
250 if (cfs_waitq_active(&bkt->lsb_marche_funebre))
251 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
255 * Free \a nr objects from the cold end of the site LRU list.
257 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
259 struct lu_object_header *h;
260 struct lu_object_header *temp;
261 struct lu_site_bkt_data *bkt;
271 CFS_INIT_LIST_HEAD(&dispose);
273 * Under LRU list lock, scan LRU list and move unreferenced objects to
274 * the dispose list, removing them from LRU and hash table.
276 start = s->ls_purge_start;
277 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
280 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
284 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
285 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
287 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
288 LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
290 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
291 LASSERT(bd.bd_bucket == bd2.bd_bucket);
293 cfs_hash_bd_del_locked(s->ls_obj_hash,
295 cfs_list_move(&h->loh_lru, &dispose);
299 if (nr != ~0 && --nr == 0)
302 if (count > 0 && --count == 0)
306 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
309 * Free everything on the dispose list. This is safe against
310 * races due to the reasons described in lu_object_put().
312 while (!cfs_list_empty(&dispose)) {
313 h = container_of0(dispose.next,
314 struct lu_object_header, loh_lru);
315 cfs_list_del_init(&h->loh_lru);
316 lu_object_free(env, lu_object_top(h));
317 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
324 if (nr != 0 && did_sth && start != 0) {
325 start = 0; /* restart from the first bucket */
328 /* race on s->ls_purge_start, but nobody cares */
329 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
333 EXPORT_SYMBOL(lu_site_purge);
338 * Code below has to jump through certain loops to output object description
339 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
340 * composes object description from strings that are parts of _lines_ of
341 * output (i.e., strings that are not terminated by newline). This doesn't fit
342 * very well into libcfs_debug_msg() interface that assumes that each message
343 * supplied to it is a self-contained output line.
345 * To work around this, strings are collected in a temporary buffer
346 * (implemented as a value of lu_cdebug_key key), until terminating newline
347 * character is detected.
355 * XXX overflow is not handled correctly.
360 struct lu_cdebug_data {
364 char lck_area[LU_CDEBUG_LINE];
367 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
368 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
371 * Key, holding temporary buffer. This key is registered very early by
374 struct lu_context_key lu_global_key = {
375 .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
376 .lct_init = lu_global_key_init,
377 .lct_fini = lu_global_key_fini
381 * Printer function emitting messages through libcfs_debug_msg().
383 int lu_cdebug_printer(const struct lu_env *env,
384 void *cookie, const char *format, ...)
386 struct libcfs_debug_msg_data *msgdata = cookie;
387 struct lu_cdebug_data *key;
392 va_start(args, format);
394 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
395 LASSERT(key != NULL);
397 used = strlen(key->lck_area);
398 complete = format[strlen(format) - 1] == '\n';
400 * Append new chunk to the buffer.
402 vsnprintf(key->lck_area + used,
403 ARRAY_SIZE(key->lck_area) - used, format, args);
405 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
406 libcfs_debug_msg(msgdata, "%s", key->lck_area);
407 key->lck_area[0] = 0;
412 EXPORT_SYMBOL(lu_cdebug_printer);
415 * Print object header.
417 void lu_object_header_print(const struct lu_env *env, void *cookie,
418 lu_printer_t printer,
419 const struct lu_object_header *hdr)
421 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
422 hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
424 cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
425 cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
427 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
429 EXPORT_SYMBOL(lu_object_header_print);
432 * Print human readable representation of the \a o to the \a printer.
434 void lu_object_print(const struct lu_env *env, void *cookie,
435 lu_printer_t printer, const struct lu_object *o)
437 static const char ruler[] = "........................................";
438 struct lu_object_header *top;
442 lu_object_header_print(env, cookie, printer, top);
443 (*printer)(env, cookie, "{ \n");
444 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
445 depth = o->lo_depth + 4;
448 * print `.' \a depth times followed by type name and address
450 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
451 o->lo_dev->ld_type->ldt_name, o);
452 if (o->lo_ops->loo_object_print != NULL)
453 o->lo_ops->loo_object_print(env, cookie, printer, o);
454 (*printer)(env, cookie, "\n");
456 (*printer)(env, cookie, "} header@%p\n", top);
458 EXPORT_SYMBOL(lu_object_print);
461 * Check object consistency.
463 int lu_object_invariant(const struct lu_object *o)
465 struct lu_object_header *top;
468 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
469 if (o->lo_ops->loo_object_invariant != NULL &&
470 !o->lo_ops->loo_object_invariant(o))
475 EXPORT_SYMBOL(lu_object_invariant);
477 static struct lu_object *htable_lookup(struct lu_site *s,
479 const struct lu_fid *f,
480 cfs_waitlink_t *waiter,
483 struct lu_site_bkt_data *bkt;
484 struct lu_object_header *h;
485 cfs_hlist_node_t *hnode;
486 __u64 ver = cfs_hash_bd_version_get(bd);
492 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
493 /* cfs_hash_bd_lookup_intent is a somehow "internal" function
494 * of cfs_hash, but we don't want refcount on object right now */
495 hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
497 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
501 h = container_of0(hnode, struct lu_object_header, loh_hash);
502 if (likely(!lu_object_is_dying(h))) {
503 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
504 cfs_list_del_init(&h->loh_lru);
505 return lu_object_top(h);
509 * Lookup found an object being destroyed this object cannot be
510 * returned (to assure that references to dying objects are eventually
511 * drained), and moreover, lookup has to wait until object is freed.
513 cfs_atomic_dec(&h->loh_ref);
515 cfs_waitlink_init(waiter);
516 cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
517 cfs_set_current_state(CFS_TASK_UNINT);
518 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
519 return ERR_PTR(-EAGAIN);
523 * Search cache for an object with the fid \a f. If such object is found,
524 * return it. Otherwise, create new object, insert it into cache and return
525 * it. In any case, additional reference is acquired on the returned object.
527 struct lu_object *lu_object_find(const struct lu_env *env,
528 struct lu_device *dev, const struct lu_fid *f,
529 const struct lu_object_conf *conf)
531 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
533 EXPORT_SYMBOL(lu_object_find);
535 static struct lu_object *lu_object_new(const struct lu_env *env,
536 struct lu_device *dev,
537 const struct lu_fid *f,
538 const struct lu_object_conf *conf)
543 struct lu_site_bkt_data *bkt;
545 o = lu_object_alloc(env, dev, f, conf);
546 if (unlikely(IS_ERR(o)))
549 hs = dev->ld_site->ls_obj_hash;
550 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
551 bkt = cfs_hash_bd_extra_get(hs, &bd);
552 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
554 cfs_hash_bd_unlock(hs, &bd, 1);
559 * Core logic of lu_object_find*() functions.
561 static struct lu_object *lu_object_find_try(const struct lu_env *env,
562 struct lu_device *dev,
563 const struct lu_fid *f,
564 const struct lu_object_conf *conf,
565 cfs_waitlink_t *waiter)
568 struct lu_object *shadow;
575 * This uses standard index maintenance protocol:
577 * - search index under lock, and return object if found;
578 * - otherwise, unlock index, allocate new object;
579 * - lock index and search again;
580 * - if nothing is found (usual case), insert newly created
582 * - otherwise (race: other thread inserted object), free
583 * object just allocated.
587 * For "LOC_F_NEW" case, we are sure the object is new established.
588 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
589 * just alloc and insert directly.
591 * If dying object is found during index search, add @waiter to the
592 * site wait-queue and return ERR_PTR(-EAGAIN).
594 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
595 return lu_object_new(env, dev, f, conf);
599 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
600 o = htable_lookup(s, &bd, f, waiter, &version);
601 cfs_hash_bd_unlock(hs, &bd, 1);
606 * Allocate new object. This may result in rather complicated
607 * operations, including fld queries, inode loading, etc.
609 o = lu_object_alloc(env, dev, f, conf);
610 if (unlikely(IS_ERR(o)))
613 LASSERT(lu_fid_eq(lu_object_fid(o), f));
615 cfs_hash_bd_lock(hs, &bd, 1);
617 shadow = htable_lookup(s, &bd, f, waiter, &version);
618 if (likely(shadow == NULL)) {
619 struct lu_site_bkt_data *bkt;
621 bkt = cfs_hash_bd_extra_get(hs, &bd);
622 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
624 cfs_hash_bd_unlock(hs, &bd, 1);
628 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
629 cfs_hash_bd_unlock(hs, &bd, 1);
630 lu_object_free(env, o);
635 * Much like lu_object_find(), but top level device of object is specifically
636 * \a dev rather than top level device of the site. This interface allows
637 * objects of different "stacking" to be created within the same site.
639 struct lu_object *lu_object_find_at(const struct lu_env *env,
640 struct lu_device *dev,
641 const struct lu_fid *f,
642 const struct lu_object_conf *conf)
644 struct lu_site_bkt_data *bkt;
645 struct lu_object *obj;
649 obj = lu_object_find_try(env, dev, f, conf, &wait);
650 if (obj != ERR_PTR(-EAGAIN))
653 * lu_object_find_try() already added waiter into the
656 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
657 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
658 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
661 EXPORT_SYMBOL(lu_object_find_at);
664 * Find object with given fid, and return its slice belonging to given device.
666 struct lu_object *lu_object_find_slice(const struct lu_env *env,
667 struct lu_device *dev,
668 const struct lu_fid *f,
669 const struct lu_object_conf *conf)
671 struct lu_object *top;
672 struct lu_object *obj;
674 top = lu_object_find(env, dev, f, conf);
676 obj = lu_object_locate(top->lo_header, dev->ld_type);
678 lu_object_put(env, top);
683 EXPORT_SYMBOL(lu_object_find_slice);
686 * Global list of all device types.
688 static CFS_LIST_HEAD(lu_device_types);
690 int lu_device_type_init(struct lu_device_type *ldt)
694 CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
695 result = ldt->ldt_ops->ldto_init(ldt);
697 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
700 EXPORT_SYMBOL(lu_device_type_init);
702 void lu_device_type_fini(struct lu_device_type *ldt)
704 cfs_list_del_init(&ldt->ldt_linkage);
705 ldt->ldt_ops->ldto_fini(ldt);
707 EXPORT_SYMBOL(lu_device_type_fini);
709 void lu_types_stop(void)
711 struct lu_device_type *ldt;
713 cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
714 if (ldt->ldt_device_nr == 0)
715 ldt->ldt_ops->ldto_stop(ldt);
718 EXPORT_SYMBOL(lu_types_stop);
721 * Global list of all sites on this node
723 static CFS_LIST_HEAD(lu_sites);
724 static CFS_DEFINE_MUTEX(lu_sites_guard);
727 * Global environment used by site shrinker.
729 static struct lu_env lu_shrink_env;
731 struct lu_site_print_arg {
732 struct lu_env *lsp_env;
734 lu_printer_t lsp_printer;
738 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
739 cfs_hlist_node_t *hnode, void *data)
741 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
742 struct lu_object_header *h;
744 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
745 if (!cfs_list_empty(&h->loh_layers)) {
746 const struct lu_object *o;
748 o = lu_object_top(h);
749 lu_object_print(arg->lsp_env, arg->lsp_cookie,
750 arg->lsp_printer, o);
752 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
753 arg->lsp_printer, h);
759 * Print all objects in \a s.
761 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
762 lu_printer_t printer)
764 struct lu_site_print_arg arg = {
765 .lsp_env = (struct lu_env *)env,
766 .lsp_cookie = cookie,
767 .lsp_printer = printer,
770 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
772 EXPORT_SYMBOL(lu_site_print);
775 LU_CACHE_PERCENT_MAX = 50,
776 LU_CACHE_PERCENT_DEFAULT = 20
779 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
780 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
781 "Percentage of memory to be used as lu_object cache");
784 * Return desired hash table order.
786 static int lu_htable_order(void)
788 unsigned long cache_size;
792 * Calculate hash table size, assuming that we want reasonable
793 * performance when 20% of total memory is occupied by cache of
796 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
798 cache_size = cfs_num_physpages;
800 #if BITS_PER_LONG == 32
801 /* limit hashtable size for lowmem systems to low RAM */
802 if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
803 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
806 /* clear off unreasonable cache setting. */
807 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
808 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
809 " the range of (0, %u]. Will use default value: %u.\n",
810 lu_cache_percent, LU_CACHE_PERCENT_MAX,
811 LU_CACHE_PERCENT_DEFAULT);
813 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
815 cache_size = cache_size / 100 * lu_cache_percent *
816 (CFS_PAGE_SIZE / 1024);
818 for (bits = 1; (1 << bits) < cache_size; ++bits) {
824 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
825 const void *key, unsigned mask)
827 struct lu_fid *fid = (struct lu_fid *)key;
830 hash = fid_flatten32(fid);
831 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
832 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
834 /* give me another random factor */
835 hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
837 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
838 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
843 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
845 return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
848 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
850 struct lu_object_header *h;
852 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
856 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
858 struct lu_object_header *h;
860 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
861 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
864 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
866 struct lu_object_header *h;
868 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
869 if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
870 struct lu_site_bkt_data *bkt;
873 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
874 bkt = cfs_hash_bd_extra_get(hs, &bd);
879 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
881 LBUG(); /* we should never called it */
884 cfs_hash_ops_t lu_site_hash_ops = {
885 .hs_hash = lu_obj_hop_hash,
886 .hs_key = lu_obj_hop_key,
887 .hs_keycmp = lu_obj_hop_keycmp,
888 .hs_object = lu_obj_hop_object,
889 .hs_get = lu_obj_hop_get,
890 .hs_put_locked = lu_obj_hop_put_locked,
894 * Initialize site \a s, with \a d as the top level device.
896 #define LU_SITE_BITS_MIN 12
897 #define LU_SITE_BITS_MAX 24
899 * total 256 buckets, we don't want too many buckets because:
900 * - consume too much memory
901 * - avoid unbalanced LRU list
903 #define LU_SITE_BKT_BITS 8
905 int lu_site_init(struct lu_site *s, struct lu_device *top)
907 struct lu_site_bkt_data *bkt;
914 memset(s, 0, sizeof *s);
915 bits = lu_htable_order();
916 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
917 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
918 bits >= LU_SITE_BITS_MIN; bits--) {
919 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
920 bits - LU_SITE_BKT_BITS,
923 CFS_HASH_SPIN_BKTLOCK |
924 CFS_HASH_NO_ITEMREF |
926 CFS_HASH_ASSERT_EMPTY);
927 if (s->ls_obj_hash != NULL)
931 if (s->ls_obj_hash == NULL) {
932 CERROR("failed to create lu_site hash with bits: %d\n", bits);
936 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
937 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
938 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
939 cfs_waitq_init(&bkt->lsb_marche_funebre);
942 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
943 if (s->ls_stats == NULL) {
944 cfs_hash_putref(s->ls_obj_hash);
945 s->ls_obj_hash = NULL;
949 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
950 0, "created", "created");
951 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
952 0, "cache_hit", "cache_hit");
953 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
954 0, "cache_miss", "cache_miss");
955 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
956 0, "cache_race", "cache_race");
957 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
958 0, "cache_death_race", "cache_death_race");
959 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
960 0, "lru_purged", "lru_purged");
962 CFS_INIT_LIST_HEAD(&s->ls_linkage);
966 lu_ref_add(&top->ld_reference, "site-top", s);
968 CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
969 cfs_spin_lock_init(&s->ls_ld_lock);
971 cfs_spin_lock(&s->ls_ld_lock);
972 cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage);
973 cfs_spin_unlock(&s->ls_ld_lock);
977 EXPORT_SYMBOL(lu_site_init);
980 * Finalize \a s and release its resources.
982 void lu_site_fini(struct lu_site *s)
984 cfs_mutex_lock(&lu_sites_guard);
985 cfs_list_del_init(&s->ls_linkage);
986 cfs_mutex_unlock(&lu_sites_guard);
988 if (s->ls_obj_hash != NULL) {
989 cfs_hash_putref(s->ls_obj_hash);
990 s->ls_obj_hash = NULL;
993 if (s->ls_top_dev != NULL) {
994 s->ls_top_dev->ld_site = NULL;
995 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
996 lu_device_put(s->ls_top_dev);
997 s->ls_top_dev = NULL;
1000 if (s->ls_stats != NULL)
1001 lprocfs_free_stats(&s->ls_stats);
1003 EXPORT_SYMBOL(lu_site_fini);
1006 * Called when initialization of stack for this site is completed.
1008 int lu_site_init_finish(struct lu_site *s)
1011 cfs_mutex_lock(&lu_sites_guard);
1012 result = lu_context_refill(&lu_shrink_env.le_ctx);
1014 cfs_list_add(&s->ls_linkage, &lu_sites);
1015 cfs_mutex_unlock(&lu_sites_guard);
1018 EXPORT_SYMBOL(lu_site_init_finish);
1021 * Acquire additional reference on device \a d
1023 void lu_device_get(struct lu_device *d)
1025 cfs_atomic_inc(&d->ld_ref);
1027 EXPORT_SYMBOL(lu_device_get);
1030 * Release reference on device \a d.
1032 void lu_device_put(struct lu_device *d)
1034 LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1035 cfs_atomic_dec(&d->ld_ref);
1037 EXPORT_SYMBOL(lu_device_put);
1040 * Initialize device \a d of type \a t.
1042 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1044 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1045 t->ldt_ops->ldto_start(t);
1046 memset(d, 0, sizeof *d);
1047 cfs_atomic_set(&d->ld_ref, 0);
1049 lu_ref_init(&d->ld_reference);
1050 CFS_INIT_LIST_HEAD(&d->ld_linkage);
1053 EXPORT_SYMBOL(lu_device_init);
1056 * Finalize device \a d.
1058 void lu_device_fini(struct lu_device *d)
1060 struct lu_device_type *t;
1063 if (d->ld_obd != NULL) {
1064 d->ld_obd->obd_lu_dev = NULL;
1068 lu_ref_fini(&d->ld_reference);
1069 LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1070 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1071 LASSERT(t->ldt_device_nr > 0);
1072 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1073 t->ldt_ops->ldto_stop(t);
1075 EXPORT_SYMBOL(lu_device_fini);
1078 * Initialize object \a o that is part of compound object \a h and was created
1081 int lu_object_init(struct lu_object *o,
1082 struct lu_object_header *h, struct lu_device *d)
1084 memset(o, 0, sizeof *o);
1088 o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1089 CFS_INIT_LIST_HEAD(&o->lo_linkage);
1092 EXPORT_SYMBOL(lu_object_init);
1095 * Finalize object and release its resources.
1097 void lu_object_fini(struct lu_object *o)
1099 struct lu_device *dev = o->lo_dev;
1101 LASSERT(cfs_list_empty(&o->lo_linkage));
1104 lu_ref_del_at(&dev->ld_reference,
1105 o->lo_dev_ref , "lu_object", o);
1110 EXPORT_SYMBOL(lu_object_fini);
1113 * Add object \a o as first layer of compound object \a h
1115 * This is typically called by the ->ldo_object_alloc() method of top-level
1118 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1120 cfs_list_move(&o->lo_linkage, &h->loh_layers);
1122 EXPORT_SYMBOL(lu_object_add_top);
1125 * Add object \a o as a layer of compound object, going after \a before.
1127 * This is typically called by the ->ldo_object_alloc() method of \a
1130 void lu_object_add(struct lu_object *before, struct lu_object *o)
1132 cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1134 EXPORT_SYMBOL(lu_object_add);
1137 * Initialize compound object.
1139 int lu_object_header_init(struct lu_object_header *h)
1141 memset(h, 0, sizeof *h);
1142 cfs_atomic_set(&h->loh_ref, 1);
1143 CFS_INIT_HLIST_NODE(&h->loh_hash);
1144 CFS_INIT_LIST_HEAD(&h->loh_lru);
1145 CFS_INIT_LIST_HEAD(&h->loh_layers);
1146 lu_ref_init(&h->loh_reference);
1149 EXPORT_SYMBOL(lu_object_header_init);
1152 * Finalize compound object.
1154 void lu_object_header_fini(struct lu_object_header *h)
1156 LASSERT(cfs_list_empty(&h->loh_layers));
1157 LASSERT(cfs_list_empty(&h->loh_lru));
1158 LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1159 lu_ref_fini(&h->loh_reference);
1161 EXPORT_SYMBOL(lu_object_header_fini);
1164 * Given a compound object, find its slice, corresponding to the device type
1167 struct lu_object *lu_object_locate(struct lu_object_header *h,
1168 const struct lu_device_type *dtype)
1170 struct lu_object *o;
1172 cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1173 if (o->lo_dev->ld_type == dtype)
1178 EXPORT_SYMBOL(lu_object_locate);
1183 * Finalize and free devices in the device stack.
1185 * Finalize device stack by purging object cache, and calling
1186 * lu_device_type_operations::ldto_device_fini() and
1187 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1189 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1191 struct lu_site *site = top->ld_site;
1192 struct lu_device *scan;
1193 struct lu_device *next;
1195 lu_site_purge(env, site, ~0);
1196 for (scan = top; scan != NULL; scan = next) {
1197 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1198 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1199 lu_device_put(scan);
1203 lu_site_purge(env, site, ~0);
1205 if (!cfs_hash_is_empty(site->ls_obj_hash)) {
1207 * Uh-oh, objects still exist.
1209 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
1211 lu_site_print(env, site, &msgdata, lu_cdebug_printer);
1214 for (scan = top; scan != NULL; scan = next) {
1215 const struct lu_device_type *ldt = scan->ld_type;
1216 struct obd_type *type;
1218 next = ldt->ldt_ops->ldto_device_free(env, scan);
1219 type = ldt->ldt_obd_type;
1222 class_put_type(type);
1226 EXPORT_SYMBOL(lu_stack_fini);
1230 * Maximal number of tld slots.
1232 LU_CONTEXT_KEY_NR = 32
1235 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1237 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1240 * Global counter incremented whenever key is registered, unregistered,
1241 * revived or quiesced. This is used to void unnecessary calls to
1242 * lu_context_refill(). No locking is provided, as initialization and shutdown
1243 * are supposed to be externally serialized.
1245 static unsigned key_set_version = 0;
1250 int lu_context_key_register(struct lu_context_key *key)
1255 LASSERT(key->lct_init != NULL);
1256 LASSERT(key->lct_fini != NULL);
1257 LASSERT(key->lct_tags != 0);
1258 LASSERT(key->lct_owner != NULL);
1261 cfs_spin_lock(&lu_keys_guard);
1262 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1263 if (lu_keys[i] == NULL) {
1265 cfs_atomic_set(&key->lct_used, 1);
1267 lu_ref_init(&key->lct_reference);
1273 cfs_spin_unlock(&lu_keys_guard);
1276 EXPORT_SYMBOL(lu_context_key_register);
1278 static void key_fini(struct lu_context *ctx, int index)
1280 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1281 struct lu_context_key *key;
1283 key = lu_keys[index];
1284 LASSERT(key != NULL);
1285 LASSERT(key->lct_fini != NULL);
1286 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1288 key->lct_fini(ctx, key, ctx->lc_value[index]);
1289 lu_ref_del(&key->lct_reference, "ctx", ctx);
1290 cfs_atomic_dec(&key->lct_used);
1291 LASSERT(key->lct_owner != NULL);
1292 if (!(ctx->lc_tags & LCT_NOREF)) {
1293 LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1294 cfs_module_put(key->lct_owner);
1296 ctx->lc_value[index] = NULL;
1303 void lu_context_key_degister(struct lu_context_key *key)
1305 LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1306 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1308 lu_context_key_quiesce(key);
1311 cfs_spin_lock(&lu_keys_guard);
1312 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1313 if (lu_keys[key->lct_index]) {
1314 lu_keys[key->lct_index] = NULL;
1315 lu_ref_fini(&key->lct_reference);
1317 cfs_spin_unlock(&lu_keys_guard);
1319 LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1320 "key has instances: %d\n",
1321 cfs_atomic_read(&key->lct_used));
1323 EXPORT_SYMBOL(lu_context_key_degister);
1326 * Register a number of keys. This has to be called after all keys have been
1327 * initialized by a call to LU_CONTEXT_KEY_INIT().
1329 int lu_context_key_register_many(struct lu_context_key *k, ...)
1331 struct lu_context_key *key = k;
1337 result = lu_context_key_register(key);
1340 key = va_arg(args, struct lu_context_key *);
1341 } while (key != NULL);
1347 lu_context_key_degister(k);
1348 k = va_arg(args, struct lu_context_key *);
1355 EXPORT_SYMBOL(lu_context_key_register_many);
1358 * De-register a number of keys. This is a dual to
1359 * lu_context_key_register_many().
1361 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1367 lu_context_key_degister(k);
1368 k = va_arg(args, struct lu_context_key*);
1369 } while (k != NULL);
1372 EXPORT_SYMBOL(lu_context_key_degister_many);
1375 * Revive a number of keys.
1377 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1383 lu_context_key_revive(k);
1384 k = va_arg(args, struct lu_context_key*);
1385 } while (k != NULL);
1388 EXPORT_SYMBOL(lu_context_key_revive_many);
1391 * Quiescent a number of keys.
1393 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1399 lu_context_key_quiesce(k);
1400 k = va_arg(args, struct lu_context_key*);
1401 } while (k != NULL);
1404 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1407 * Return value associated with key \a key in context \a ctx.
1409 void *lu_context_key_get(const struct lu_context *ctx,
1410 const struct lu_context_key *key)
1412 LINVRNT(ctx->lc_state == LCS_ENTERED);
1413 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1414 LASSERT(lu_keys[key->lct_index] == key);
1415 return ctx->lc_value[key->lct_index];
1417 EXPORT_SYMBOL(lu_context_key_get);
1420 * List of remembered contexts. XXX document me.
1422 static CFS_LIST_HEAD(lu_context_remembered);
1425 * Destroy \a key in all remembered contexts. This is used to destroy key
1426 * values in "shared" contexts (like service threads), when a module owning
1427 * the key is about to be unloaded.
1429 void lu_context_key_quiesce(struct lu_context_key *key)
1431 struct lu_context *ctx;
1432 extern unsigned cl_env_cache_purge(unsigned nr);
1434 if (!(key->lct_tags & LCT_QUIESCENT)) {
1436 * XXX layering violation.
1438 cl_env_cache_purge(~0);
1439 key->lct_tags |= LCT_QUIESCENT;
1441 * XXX memory barrier has to go here.
1443 cfs_spin_lock(&lu_keys_guard);
1444 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1446 key_fini(ctx, key->lct_index);
1447 cfs_spin_unlock(&lu_keys_guard);
1451 EXPORT_SYMBOL(lu_context_key_quiesce);
1453 void lu_context_key_revive(struct lu_context_key *key)
1455 key->lct_tags &= ~LCT_QUIESCENT;
1458 EXPORT_SYMBOL(lu_context_key_revive);
1460 static void keys_fini(struct lu_context *ctx)
1464 cfs_spin_lock(&lu_keys_guard);
1465 if (ctx->lc_value != NULL) {
1466 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1468 OBD_FREE(ctx->lc_value,
1469 ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1470 ctx->lc_value = NULL;
1472 cfs_spin_unlock(&lu_keys_guard);
1475 static int keys_fill(struct lu_context *ctx)
1479 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1480 struct lu_context_key *key;
1483 if (ctx->lc_value[i] == NULL && key != NULL &&
1484 (key->lct_tags & ctx->lc_tags) &&
1486 * Don't create values for a LCT_QUIESCENT key, as this
1487 * will pin module owning a key.
1489 !(key->lct_tags & LCT_QUIESCENT)) {
1492 LINVRNT(key->lct_init != NULL);
1493 LINVRNT(key->lct_index == i);
1495 value = key->lct_init(ctx, key);
1496 if (unlikely(IS_ERR(value)))
1497 return PTR_ERR(value);
1499 LASSERT(key->lct_owner != NULL);
1500 if (!(ctx->lc_tags & LCT_NOREF))
1501 cfs_try_module_get(key->lct_owner);
1502 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1503 cfs_atomic_inc(&key->lct_used);
1505 * This is the only place in the code, where an
1506 * element of ctx->lc_value[] array is set to non-NULL
1509 ctx->lc_value[i] = value;
1510 if (key->lct_exit != NULL)
1511 ctx->lc_tags |= LCT_HAS_EXIT;
1513 ctx->lc_version = key_set_version;
1518 static int keys_init(struct lu_context *ctx)
1522 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1523 if (likely(ctx->lc_value != NULL))
1524 result = keys_fill(ctx);
1534 * Initialize context data-structure. Create values for all keys.
1536 int lu_context_init(struct lu_context *ctx, __u32 tags)
1538 memset(ctx, 0, sizeof *ctx);
1539 ctx->lc_state = LCS_INITIALIZED;
1540 ctx->lc_tags = tags;
1541 if (tags & LCT_REMEMBER) {
1542 cfs_spin_lock(&lu_keys_guard);
1543 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1544 cfs_spin_unlock(&lu_keys_guard);
1546 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1547 return keys_init(ctx);
1549 EXPORT_SYMBOL(lu_context_init);
1552 * Finalize context data-structure. Destroy key values.
1554 void lu_context_fini(struct lu_context *ctx)
1556 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1557 ctx->lc_state = LCS_FINALIZED;
1559 cfs_spin_lock(&lu_keys_guard);
1560 cfs_list_del_init(&ctx->lc_remember);
1561 cfs_spin_unlock(&lu_keys_guard);
1563 EXPORT_SYMBOL(lu_context_fini);
1566 * Called before entering context.
1568 void lu_context_enter(struct lu_context *ctx)
1570 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1571 ctx->lc_state = LCS_ENTERED;
1573 EXPORT_SYMBOL(lu_context_enter);
1576 * Called after exiting from \a ctx
1578 void lu_context_exit(struct lu_context *ctx)
1582 LINVRNT(ctx->lc_state == LCS_ENTERED);
1583 ctx->lc_state = LCS_LEFT;
1584 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1585 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1586 if (ctx->lc_value[i] != NULL) {
1587 struct lu_context_key *key;
1590 LASSERT(key != NULL);
1591 if (key->lct_exit != NULL)
1593 key, ctx->lc_value[i]);
1598 EXPORT_SYMBOL(lu_context_exit);
1601 * Allocate for context all missing keys that were registered after context
1604 int lu_context_refill(struct lu_context *ctx)
1606 LINVRNT(ctx->lc_value != NULL);
1607 return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1609 EXPORT_SYMBOL(lu_context_refill);
1612 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1613 * obd being added. Currently, this is only used on client side, specifically
1614 * for echo device client, for other stack (like ptlrpc threads), context are
1615 * predefined when the lu_device type are registered, during the module probe
1618 __u32 lu_context_tags_default = 0;
1619 __u32 lu_session_tags_default = 0;
1621 void lu_context_tags_update(__u32 tags)
1623 cfs_spin_lock(&lu_keys_guard);
1624 lu_context_tags_default |= tags;
1626 cfs_spin_unlock(&lu_keys_guard);
1628 EXPORT_SYMBOL(lu_context_tags_update);
1630 void lu_context_tags_clear(__u32 tags)
1632 cfs_spin_lock(&lu_keys_guard);
1633 lu_context_tags_default &= ~tags;
1635 cfs_spin_unlock(&lu_keys_guard);
1637 EXPORT_SYMBOL(lu_context_tags_clear);
1639 void lu_session_tags_update(__u32 tags)
1641 cfs_spin_lock(&lu_keys_guard);
1642 lu_session_tags_default |= tags;
1644 cfs_spin_unlock(&lu_keys_guard);
1646 EXPORT_SYMBOL(lu_session_tags_update);
1648 void lu_session_tags_clear(__u32 tags)
1650 cfs_spin_lock(&lu_keys_guard);
1651 lu_session_tags_default &= ~tags;
1653 cfs_spin_unlock(&lu_keys_guard);
1655 EXPORT_SYMBOL(lu_session_tags_clear);
1657 int lu_env_init(struct lu_env *env, __u32 tags)
1662 result = lu_context_init(&env->le_ctx, tags);
1663 if (likely(result == 0))
1664 lu_context_enter(&env->le_ctx);
1667 EXPORT_SYMBOL(lu_env_init);
1669 void lu_env_fini(struct lu_env *env)
1671 lu_context_exit(&env->le_ctx);
1672 lu_context_fini(&env->le_ctx);
1675 EXPORT_SYMBOL(lu_env_fini);
1677 int lu_env_refill(struct lu_env *env)
1681 result = lu_context_refill(&env->le_ctx);
1682 if (result == 0 && env->le_ses != NULL)
1683 result = lu_context_refill(env->le_ses);
1686 EXPORT_SYMBOL(lu_env_refill);
1689 * Currently, this API will only be used by echo client.
1690 * Because echo client and normal lustre client will share
1691 * same cl_env cache. So echo client needs to refresh
1692 * the env context after it get one from the cache, especially
1693 * when normal client and echo client co-exist in the same client.
1695 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1700 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1701 env->le_ctx.lc_version = 0;
1702 env->le_ctx.lc_tags |= ctags;
1705 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1706 env->le_ses->lc_version = 0;
1707 env->le_ses->lc_tags |= stags;
1710 result = lu_env_refill(env);
1714 EXPORT_SYMBOL(lu_env_refill_by_tags);
1716 static struct cfs_shrinker *lu_site_shrinker = NULL;
1718 typedef struct lu_site_stats{
1719 unsigned lss_populated;
1720 unsigned lss_max_search;
1725 static void lu_site_stats_get(cfs_hash_t *hs,
1726 lu_site_stats_t *stats, int populated)
1731 cfs_hash_for_each_bucket(hs, &bd, i) {
1732 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1733 cfs_hlist_head_t *hhead;
1735 cfs_hash_bd_lock(hs, &bd, 1);
1736 stats->lss_busy += bkt->lsb_busy;
1737 stats->lss_total += cfs_hash_bd_count_get(&bd);
1738 stats->lss_max_search = max((int)stats->lss_max_search,
1739 cfs_hash_bd_depmax_get(&bd));
1741 cfs_hash_bd_unlock(hs, &bd, 1);
1745 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1746 if (!cfs_hlist_empty(hhead))
1747 stats->lss_populated++;
1749 cfs_hash_bd_unlock(hs, &bd, 1);
1755 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1757 lu_site_stats_t stats;
1759 struct lu_site *tmp;
1761 int remain = shrink_param(sc, nr_to_scan);
1762 CFS_LIST_HEAD(splice);
1765 if (!(shrink_param(sc, gfp_mask) & __GFP_FS))
1767 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1770 cfs_mutex_lock(&lu_sites_guard);
1771 cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1772 if (shrink_param(sc, nr_to_scan) != 0) {
1773 remain = lu_site_purge(&lu_shrink_env, s, remain);
1775 * Move just shrunk site to the tail of site list to
1776 * assure shrinking fairness.
1778 cfs_list_move_tail(&s->ls_linkage, &splice);
1781 memset(&stats, 0, sizeof(stats));
1782 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1783 cached += stats.lss_total - stats.lss_busy;
1784 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1787 cfs_list_splice(&splice, lu_sites.prev);
1788 cfs_mutex_unlock(&lu_sites_guard);
1790 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1791 if (shrink_param(sc, nr_to_scan) == 0)
1792 CDEBUG(D_INODE, "%d objects cached\n", cached);
1801 * Environment to be used in debugger, contains all tags.
1803 struct lu_env lu_debugging_env;
1806 * Debugging printer function using printk().
1808 int lu_printk_printer(const struct lu_env *env,
1809 void *unused, const char *format, ...)
1813 va_start(args, format);
1814 vprintk(format, args);
1819 void lu_debugging_setup(void)
1821 lu_env_init(&lu_debugging_env, ~0);
1824 void lu_context_keys_dump(void)
1828 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1829 struct lu_context_key *key;
1833 CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1834 i, key, key->lct_tags,
1835 key->lct_init, key->lct_fini, key->lct_exit,
1836 key->lct_index, cfs_atomic_read(&key->lct_used),
1837 key->lct_owner ? key->lct_owner->name : "",
1839 lu_ref_print(&key->lct_reference);
1843 EXPORT_SYMBOL(lu_context_keys_dump);
1844 #else /* !__KERNEL__ */
1845 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1849 #endif /* __KERNEL__ */
1851 int cl_global_init(void);
1852 void cl_global_fini(void);
1853 int lu_ref_global_init(void);
1854 void lu_ref_global_fini(void);
1856 int dt_global_init(void);
1857 void dt_global_fini(void);
1859 int llo_global_init(void);
1860 void llo_global_fini(void);
1863 * Initialization of global lu_* data.
1865 int lu_global_init(void)
1869 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1871 result = lu_ref_global_init();
1875 LU_CONTEXT_KEY_INIT(&lu_global_key);
1876 result = lu_context_key_register(&lu_global_key);
1880 * At this level, we don't know what tags are needed, so allocate them
1881 * conservatively. This should not be too bad, because this
1882 * environment is global.
1884 cfs_mutex_lock(&lu_sites_guard);
1885 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1886 cfs_mutex_unlock(&lu_sites_guard);
1891 * seeks estimation: 3 seeks to read a record from oi, one to read
1892 * inode, one for ea. Unfortunately setting this high value results in
1893 * lu_object/inode cache consuming all the memory.
1895 lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1896 if (lu_site_shrinker == NULL)
1899 result = lu_time_global_init();
1904 result = dt_global_init();
1908 result = llo_global_init();
1912 result = cl_global_init();
1919 * Dual to lu_global_init().
1921 void lu_global_fini(void)
1928 lu_time_global_fini();
1929 if (lu_site_shrinker != NULL) {
1930 cfs_remove_shrinker(lu_site_shrinker);
1931 lu_site_shrinker = NULL;
1934 lu_context_key_degister(&lu_global_key);
1937 * Tear shrinker environment down _after_ de-registering
1938 * lu_global_key, because the latter has a value in the former.
1940 cfs_mutex_lock(&lu_sites_guard);
1941 lu_env_fini(&lu_shrink_env);
1942 cfs_mutex_unlock(&lu_sites_guard);
1944 lu_ref_global_fini();
1947 struct lu_buf LU_BUF_NULL = {
1951 EXPORT_SYMBOL(LU_BUF_NULL);
1953 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1956 struct lprocfs_counter ret;
1958 lprocfs_stats_collect(stats, idx, &ret);
1959 return (__u32)ret.lc_count;
1966 * Output site statistical counters into a buffer. Suitable for
1967 * lprocfs_rd_*()-style functions.
1969 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1971 lu_site_stats_t stats;
1973 memset(&stats, 0, sizeof(stats));
1974 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1976 return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
1979 stats.lss_populated,
1980 CFS_HASH_NHLIST(s->ls_obj_hash),
1981 stats.lss_max_search,
1982 ls_stats_read(s->ls_stats, LU_SS_CREATED),
1983 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1984 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1985 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1986 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
1987 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
1989 EXPORT_SYMBOL(lu_site_stats_print);
1991 const char *lu_time_names[LU_TIME_NR] = {
1992 [LU_TIME_FIND_LOOKUP] = "find_lookup",
1993 [LU_TIME_FIND_ALLOC] = "find_alloc",
1994 [LU_TIME_FIND_INSERT] = "find_insert"
1996 EXPORT_SYMBOL(lu_time_names);
1999 * Helper function to initialize a number of kmem slab caches at once.
2001 int lu_kmem_init(struct lu_kmem_descr *caches)
2004 struct lu_kmem_descr *iter = caches;
2006 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2007 *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
2010 if (*iter->ckd_cache == NULL) {
2012 /* free all previously allocated caches */
2013 lu_kmem_fini(caches);
2019 EXPORT_SYMBOL(lu_kmem_init);
2022 * Helper function to finalize a number of kmem slab cached at once. Dual to
2025 void lu_kmem_fini(struct lu_kmem_descr *caches)
2029 for (; caches->ckd_cache != NULL; ++caches) {
2030 if (*caches->ckd_cache != NULL) {
2031 rc = cfs_mem_cache_destroy(*caches->ckd_cache);
2032 LASSERTF(rc == 0, "couldn't destroy %s slab\n",
2034 *caches->ckd_cache = NULL;
2038 EXPORT_SYMBOL(lu_kmem_fini);