4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include <libcfs/libcfs.h>
50 # include <linux/module.h>
54 #include <libcfs/libcfs_hash.h>
55 #include <obd_class.h>
56 #include <obd_support.h>
57 #include <lustre_disk.h>
58 #include <lustre_fid.h>
59 #include <lu_object.h>
60 #include <libcfs/list.h>
62 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
65 * Decrease reference counter on object. If last reference is freed, return
66 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
67 * case, free object immediately.
69 void lu_object_put(const struct lu_env *env, struct lu_object *o)
71 struct lu_site_bkt_data *bkt;
72 struct lu_object_header *top;
74 struct lu_object *orig;
76 const struct lu_fid *fid;
79 site = o->lo_dev->ld_site;
83 * till we have full fids-on-OST implemented anonymous objects
84 * are possible in OSP. such an object isn't listed in the site
85 * so we should not remove it from the site.
87 fid = lu_object_fid(o);
88 if (fid_is_zero(fid)) {
89 LASSERT(top->loh_hash.next == NULL
90 && top->loh_hash.pprev == NULL);
91 LASSERT(cfs_list_empty(&top->loh_lru));
92 if (!cfs_atomic_dec_and_test(&top->loh_ref))
94 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
95 if (o->lo_ops->loo_object_release != NULL)
96 o->lo_ops->loo_object_release(env, o);
98 lu_object_free(env, orig);
102 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
103 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
105 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
106 if (lu_object_is_dying(top)) {
109 * somebody may be waiting for this, currently only
110 * used for cl_object, see cl_object_put_last().
112 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
117 LASSERT(bkt->lsb_busy > 0);
120 * When last reference is released, iterate over object
121 * layers, and notify them that object is no longer busy.
123 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
124 if (o->lo_ops->loo_object_release != NULL)
125 o->lo_ops->loo_object_release(env, o);
128 if (!lu_object_is_dying(top)) {
129 LASSERT(cfs_list_empty(&top->loh_lru));
130 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
131 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
136 * If object is dying (will not be cached), removed it
137 * from hash table and LRU.
139 * This is done with hash table and LRU lists locked. As the only
140 * way to acquire first reference to previously unreferenced
141 * object is through hash-table lookup (lu_object_find()),
142 * or LRU scanning (lu_site_purge()), that are done under hash-table
143 * and LRU lock, no race with concurrent object lookup is possible
144 * and we can safely destroy object below.
146 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
147 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
148 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
150 * Object was already removed from hash and lru above, can
153 lu_object_free(env, orig);
155 EXPORT_SYMBOL(lu_object_put);
158 * Put object and don't keep in cache. This is temporary solution for
159 * multi-site objects when its layering is not constant.
161 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
163 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
164 return lu_object_put(env, o);
166 EXPORT_SYMBOL(lu_object_put_nocache);
169 * Kill the object and take it out of LRU cache.
170 * Currently used by client code for layout change.
172 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
174 struct lu_object_header *top;
177 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
178 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
179 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
182 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
183 cfs_list_del_init(&top->loh_lru);
184 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
185 cfs_hash_bd_unlock(obj_hash, &bd, 1);
188 EXPORT_SYMBOL(lu_object_unhash);
191 * Allocate new object.
193 * This follows object creation protocol, described in the comment within
194 * struct lu_device_operations definition.
196 static struct lu_object *lu_object_alloc(const struct lu_env *env,
197 struct lu_device *dev,
198 const struct lu_fid *f,
199 const struct lu_object_conf *conf)
201 struct lu_object *scan;
202 struct lu_object *top;
209 * Create top-level object slice. This will also create
212 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
214 RETURN(ERR_PTR(-ENOMEM));
218 * This is the only place where object fid is assigned. It's constant
221 top->lo_header->loh_fid = *f;
222 layers = &top->lo_header->loh_layers;
225 * Call ->loo_object_init() repeatedly, until no more new
226 * object slices are created.
229 cfs_list_for_each_entry(scan, layers, lo_linkage) {
230 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
233 scan->lo_header = top->lo_header;
234 result = scan->lo_ops->loo_object_init(env, scan, conf);
236 lu_object_free(env, top);
237 RETURN(ERR_PTR(result));
239 scan->lo_flags |= LU_OBJECT_ALLOCATED;
243 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
244 if (scan->lo_ops->loo_object_start != NULL) {
245 result = scan->lo_ops->loo_object_start(env, scan);
247 lu_object_free(env, top);
248 RETURN(ERR_PTR(result));
253 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
260 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
262 struct lu_site_bkt_data *bkt;
263 struct lu_site *site;
264 struct lu_object *scan;
268 site = o->lo_dev->ld_site;
269 layers = &o->lo_header->loh_layers;
270 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
272 * First call ->loo_object_delete() method to release all resources.
274 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
275 if (scan->lo_ops->loo_object_delete != NULL)
276 scan->lo_ops->loo_object_delete(env, scan);
280 * Then, splice object layers into stand-alone list, and call
281 * ->loo_object_free() on all layers to free memory. Splice is
282 * necessary, because lu_object_header is freed together with the
285 CFS_INIT_LIST_HEAD(&splice);
286 cfs_list_splice_init(layers, &splice);
287 while (!cfs_list_empty(&splice)) {
289 * Free layers in bottom-to-top order, so that object header
290 * lives as long as possible and ->loo_object_free() methods
291 * can look at its contents.
293 o = container_of0(splice.prev, struct lu_object, lo_linkage);
294 cfs_list_del_init(&o->lo_linkage);
295 LASSERT(o->lo_ops->loo_object_free != NULL);
296 o->lo_ops->loo_object_free(env, o);
299 if (cfs_waitq_active(&bkt->lsb_marche_funebre))
300 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
304 * Free \a nr objects from the cold end of the site LRU list.
306 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
308 struct lu_object_header *h;
309 struct lu_object_header *temp;
310 struct lu_site_bkt_data *bkt;
320 CFS_INIT_LIST_HEAD(&dispose);
322 * Under LRU list lock, scan LRU list and move unreferenced objects to
323 * the dispose list, removing them from LRU and hash table.
325 start = s->ls_purge_start;
326 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
329 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
333 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
334 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
336 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
337 LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
339 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
340 LASSERT(bd.bd_bucket == bd2.bd_bucket);
342 cfs_hash_bd_del_locked(s->ls_obj_hash,
344 cfs_list_move(&h->loh_lru, &dispose);
348 if (nr != ~0 && --nr == 0)
351 if (count > 0 && --count == 0)
355 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
358 * Free everything on the dispose list. This is safe against
359 * races due to the reasons described in lu_object_put().
361 while (!cfs_list_empty(&dispose)) {
362 h = container_of0(dispose.next,
363 struct lu_object_header, loh_lru);
364 cfs_list_del_init(&h->loh_lru);
365 lu_object_free(env, lu_object_top(h));
366 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
373 if (nr != 0 && did_sth && start != 0) {
374 start = 0; /* restart from the first bucket */
377 /* race on s->ls_purge_start, but nobody cares */
378 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
382 EXPORT_SYMBOL(lu_site_purge);
387 * Code below has to jump through certain loops to output object description
388 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
389 * composes object description from strings that are parts of _lines_ of
390 * output (i.e., strings that are not terminated by newline). This doesn't fit
391 * very well into libcfs_debug_msg() interface that assumes that each message
392 * supplied to it is a self-contained output line.
394 * To work around this, strings are collected in a temporary buffer
395 * (implemented as a value of lu_cdebug_key key), until terminating newline
396 * character is detected.
404 * XXX overflow is not handled correctly.
409 struct lu_cdebug_data {
413 char lck_area[LU_CDEBUG_LINE];
416 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
417 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
420 * Key, holding temporary buffer. This key is registered very early by
423 struct lu_context_key lu_global_key = {
424 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
425 LCT_MG_THREAD | LCT_CL_THREAD,
426 .lct_init = lu_global_key_init,
427 .lct_fini = lu_global_key_fini
431 * Printer function emitting messages through libcfs_debug_msg().
433 int lu_cdebug_printer(const struct lu_env *env,
434 void *cookie, const char *format, ...)
436 struct libcfs_debug_msg_data *msgdata = cookie;
437 struct lu_cdebug_data *key;
442 va_start(args, format);
444 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
445 LASSERT(key != NULL);
447 used = strlen(key->lck_area);
448 complete = format[strlen(format) - 1] == '\n';
450 * Append new chunk to the buffer.
452 vsnprintf(key->lck_area + used,
453 ARRAY_SIZE(key->lck_area) - used, format, args);
455 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
456 libcfs_debug_msg(msgdata, "%s", key->lck_area);
457 key->lck_area[0] = 0;
462 EXPORT_SYMBOL(lu_cdebug_printer);
465 * Print object header.
467 void lu_object_header_print(const struct lu_env *env, void *cookie,
468 lu_printer_t printer,
469 const struct lu_object_header *hdr)
471 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
472 hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
474 cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
475 cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
477 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
479 EXPORT_SYMBOL(lu_object_header_print);
482 * Print human readable representation of the \a o to the \a printer.
484 void lu_object_print(const struct lu_env *env, void *cookie,
485 lu_printer_t printer, const struct lu_object *o)
487 static const char ruler[] = "........................................";
488 struct lu_object_header *top;
492 lu_object_header_print(env, cookie, printer, top);
493 (*printer)(env, cookie, "{ \n");
494 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
495 depth = o->lo_depth + 4;
498 * print `.' \a depth times followed by type name and address
500 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
501 o->lo_dev->ld_type->ldt_name, o);
502 if (o->lo_ops->loo_object_print != NULL)
503 o->lo_ops->loo_object_print(env, cookie, printer, o);
504 (*printer)(env, cookie, "\n");
506 (*printer)(env, cookie, "} header@%p\n", top);
508 EXPORT_SYMBOL(lu_object_print);
511 * Check object consistency.
513 int lu_object_invariant(const struct lu_object *o)
515 struct lu_object_header *top;
518 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
519 if (o->lo_ops->loo_object_invariant != NULL &&
520 !o->lo_ops->loo_object_invariant(o))
525 EXPORT_SYMBOL(lu_object_invariant);
527 static struct lu_object *htable_lookup(struct lu_site *s,
529 const struct lu_fid *f,
530 cfs_waitlink_t *waiter,
533 struct lu_site_bkt_data *bkt;
534 struct lu_object_header *h;
535 cfs_hlist_node_t *hnode;
536 __u64 ver = cfs_hash_bd_version_get(bd);
542 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
543 /* cfs_hash_bd_peek_locked is a somehow "internal" function
544 * of cfs_hash, it doesn't add refcount on object. */
545 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
547 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
551 h = container_of0(hnode, struct lu_object_header, loh_hash);
552 if (likely(!lu_object_is_dying(h))) {
553 cfs_hash_get(s->ls_obj_hash, hnode);
554 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
555 cfs_list_del_init(&h->loh_lru);
556 return lu_object_top(h);
560 * Lookup found an object being destroyed this object cannot be
561 * returned (to assure that references to dying objects are eventually
562 * drained), and moreover, lookup has to wait until object is freed.
565 cfs_waitlink_init(waiter);
566 cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
567 cfs_set_current_state(CFS_TASK_UNINT);
568 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
569 return ERR_PTR(-EAGAIN);
573 * Search cache for an object with the fid \a f. If such object is found,
574 * return it. Otherwise, create new object, insert it into cache and return
575 * it. In any case, additional reference is acquired on the returned object.
577 struct lu_object *lu_object_find(const struct lu_env *env,
578 struct lu_device *dev, const struct lu_fid *f,
579 const struct lu_object_conf *conf)
581 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
583 EXPORT_SYMBOL(lu_object_find);
585 static struct lu_object *lu_object_new(const struct lu_env *env,
586 struct lu_device *dev,
587 const struct lu_fid *f,
588 const struct lu_object_conf *conf)
593 struct lu_site_bkt_data *bkt;
595 o = lu_object_alloc(env, dev, f, conf);
596 if (unlikely(IS_ERR(o)))
599 hs = dev->ld_site->ls_obj_hash;
600 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
601 bkt = cfs_hash_bd_extra_get(hs, &bd);
602 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
604 cfs_hash_bd_unlock(hs, &bd, 1);
609 * Core logic of lu_object_find*() functions.
611 static struct lu_object *lu_object_find_try(const struct lu_env *env,
612 struct lu_device *dev,
613 const struct lu_fid *f,
614 const struct lu_object_conf *conf,
615 cfs_waitlink_t *waiter)
618 struct lu_object *shadow;
625 * This uses standard index maintenance protocol:
627 * - search index under lock, and return object if found;
628 * - otherwise, unlock index, allocate new object;
629 * - lock index and search again;
630 * - if nothing is found (usual case), insert newly created
632 * - otherwise (race: other thread inserted object), free
633 * object just allocated.
637 * For "LOC_F_NEW" case, we are sure the object is new established.
638 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
639 * just alloc and insert directly.
641 * If dying object is found during index search, add @waiter to the
642 * site wait-queue and return ERR_PTR(-EAGAIN).
644 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
645 return lu_object_new(env, dev, f, conf);
649 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
650 o = htable_lookup(s, &bd, f, waiter, &version);
651 cfs_hash_bd_unlock(hs, &bd, 1);
656 * Allocate new object. This may result in rather complicated
657 * operations, including fld queries, inode loading, etc.
659 o = lu_object_alloc(env, dev, f, conf);
660 if (unlikely(IS_ERR(o)))
663 LASSERT(lu_fid_eq(lu_object_fid(o), f));
665 cfs_hash_bd_lock(hs, &bd, 1);
667 shadow = htable_lookup(s, &bd, f, waiter, &version);
668 if (likely(shadow == NULL)) {
669 struct lu_site_bkt_data *bkt;
671 bkt = cfs_hash_bd_extra_get(hs, &bd);
672 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
674 cfs_hash_bd_unlock(hs, &bd, 1);
678 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
679 cfs_hash_bd_unlock(hs, &bd, 1);
680 lu_object_free(env, o);
685 * Much like lu_object_find(), but top level device of object is specifically
686 * \a dev rather than top level device of the site. This interface allows
687 * objects of different "stacking" to be created within the same site.
689 struct lu_object *lu_object_find_at(const struct lu_env *env,
690 struct lu_device *dev,
691 const struct lu_fid *f,
692 const struct lu_object_conf *conf)
694 struct lu_site_bkt_data *bkt;
695 struct lu_object *obj;
699 obj = lu_object_find_try(env, dev, f, conf, &wait);
700 if (obj != ERR_PTR(-EAGAIN))
703 * lu_object_find_try() already added waiter into the
706 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
707 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
708 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
711 EXPORT_SYMBOL(lu_object_find_at);
714 * Find object with given fid, and return its slice belonging to given device.
716 struct lu_object *lu_object_find_slice(const struct lu_env *env,
717 struct lu_device *dev,
718 const struct lu_fid *f,
719 const struct lu_object_conf *conf)
721 struct lu_object *top;
722 struct lu_object *obj;
724 top = lu_object_find(env, dev, f, conf);
726 obj = lu_object_locate(top->lo_header, dev->ld_type);
728 lu_object_put(env, top);
733 EXPORT_SYMBOL(lu_object_find_slice);
736 * Global list of all device types.
738 static CFS_LIST_HEAD(lu_device_types);
740 int lu_device_type_init(struct lu_device_type *ldt)
744 CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
745 if (ldt->ldt_ops->ldto_init)
746 result = ldt->ldt_ops->ldto_init(ldt);
748 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
751 EXPORT_SYMBOL(lu_device_type_init);
753 void lu_device_type_fini(struct lu_device_type *ldt)
755 cfs_list_del_init(&ldt->ldt_linkage);
756 if (ldt->ldt_ops->ldto_fini)
757 ldt->ldt_ops->ldto_fini(ldt);
759 EXPORT_SYMBOL(lu_device_type_fini);
761 void lu_types_stop(void)
763 struct lu_device_type *ldt;
765 cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
766 if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
767 ldt->ldt_ops->ldto_stop(ldt);
770 EXPORT_SYMBOL(lu_types_stop);
773 * Global list of all sites on this node
775 static CFS_LIST_HEAD(lu_sites);
776 static DEFINE_MUTEX(lu_sites_guard);
779 * Global environment used by site shrinker.
781 static struct lu_env lu_shrink_env;
783 struct lu_site_print_arg {
784 struct lu_env *lsp_env;
786 lu_printer_t lsp_printer;
790 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
791 cfs_hlist_node_t *hnode, void *data)
793 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
794 struct lu_object_header *h;
796 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
797 if (!cfs_list_empty(&h->loh_layers)) {
798 const struct lu_object *o;
800 o = lu_object_top(h);
801 lu_object_print(arg->lsp_env, arg->lsp_cookie,
802 arg->lsp_printer, o);
804 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
805 arg->lsp_printer, h);
811 * Print all objects in \a s.
813 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
814 lu_printer_t printer)
816 struct lu_site_print_arg arg = {
817 .lsp_env = (struct lu_env *)env,
818 .lsp_cookie = cookie,
819 .lsp_printer = printer,
822 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
824 EXPORT_SYMBOL(lu_site_print);
827 LU_CACHE_PERCENT_MAX = 50,
828 LU_CACHE_PERCENT_DEFAULT = 20
831 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
832 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
833 "Percentage of memory to be used as lu_object cache");
836 * Return desired hash table order.
838 static int lu_htable_order(void)
840 unsigned long cache_size;
844 * Calculate hash table size, assuming that we want reasonable
845 * performance when 20% of total memory is occupied by cache of
848 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
850 cache_size = cfs_num_physpages;
852 #if BITS_PER_LONG == 32
853 /* limit hashtable size for lowmem systems to low RAM */
854 if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
855 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
858 /* clear off unreasonable cache setting. */
859 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
860 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
861 " the range of (0, %u]. Will use default value: %u.\n",
862 lu_cache_percent, LU_CACHE_PERCENT_MAX,
863 LU_CACHE_PERCENT_DEFAULT);
865 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
867 cache_size = cache_size / 100 * lu_cache_percent *
868 (CFS_PAGE_SIZE / 1024);
870 for (bits = 1; (1 << bits) < cache_size; ++bits) {
876 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
877 const void *key, unsigned mask)
879 struct lu_fid *fid = (struct lu_fid *)key;
882 hash = fid_flatten32(fid);
883 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
884 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
886 /* give me another random factor */
887 hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
889 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
890 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
895 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
897 return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
900 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
902 struct lu_object_header *h;
904 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
908 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
910 struct lu_object_header *h;
912 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
913 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
916 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
918 struct lu_object_header *h;
920 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
921 if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
922 struct lu_site_bkt_data *bkt;
925 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
926 bkt = cfs_hash_bd_extra_get(hs, &bd);
931 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
933 LBUG(); /* we should never called it */
936 cfs_hash_ops_t lu_site_hash_ops = {
937 .hs_hash = lu_obj_hop_hash,
938 .hs_key = lu_obj_hop_key,
939 .hs_keycmp = lu_obj_hop_keycmp,
940 .hs_object = lu_obj_hop_object,
941 .hs_get = lu_obj_hop_get,
942 .hs_put_locked = lu_obj_hop_put_locked,
945 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
947 spin_lock(&s->ls_ld_lock);
948 if (cfs_list_empty(&d->ld_linkage))
949 cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
950 spin_unlock(&s->ls_ld_lock);
952 EXPORT_SYMBOL(lu_dev_add_linkage);
954 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
956 spin_lock(&s->ls_ld_lock);
957 cfs_list_del_init(&d->ld_linkage);
958 spin_unlock(&s->ls_ld_lock);
960 EXPORT_SYMBOL(lu_dev_del_linkage);
963 * Initialize site \a s, with \a d as the top level device.
965 #define LU_SITE_BITS_MIN 12
966 #define LU_SITE_BITS_MAX 24
968 * total 256 buckets, we don't want too many buckets because:
969 * - consume too much memory
970 * - avoid unbalanced LRU list
972 #define LU_SITE_BKT_BITS 8
974 int lu_site_init(struct lu_site *s, struct lu_device *top)
976 struct lu_site_bkt_data *bkt;
983 memset(s, 0, sizeof *s);
984 bits = lu_htable_order();
985 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
986 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
987 bits >= LU_SITE_BITS_MIN; bits--) {
988 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
989 bits - LU_SITE_BKT_BITS,
992 CFS_HASH_SPIN_BKTLOCK |
993 CFS_HASH_NO_ITEMREF |
995 CFS_HASH_ASSERT_EMPTY);
996 if (s->ls_obj_hash != NULL)
1000 if (s->ls_obj_hash == NULL) {
1001 CERROR("failed to create lu_site hash with bits: %d\n", bits);
1005 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1006 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1007 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
1008 cfs_waitq_init(&bkt->lsb_marche_funebre);
1011 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1012 if (s->ls_stats == NULL) {
1013 cfs_hash_putref(s->ls_obj_hash);
1014 s->ls_obj_hash = NULL;
1018 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1019 0, "created", "created");
1020 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1021 0, "cache_hit", "cache_hit");
1022 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1023 0, "cache_miss", "cache_miss");
1024 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1025 0, "cache_race", "cache_race");
1026 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1027 0, "cache_death_race", "cache_death_race");
1028 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1029 0, "lru_purged", "lru_purged");
1031 CFS_INIT_LIST_HEAD(&s->ls_linkage);
1032 s->ls_top_dev = top;
1035 lu_ref_add(&top->ld_reference, "site-top", s);
1037 CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
1038 spin_lock_init(&s->ls_ld_lock);
1040 lu_dev_add_linkage(s, top);
1044 EXPORT_SYMBOL(lu_site_init);
1047 * Finalize \a s and release its resources.
1049 void lu_site_fini(struct lu_site *s)
1051 mutex_lock(&lu_sites_guard);
1052 cfs_list_del_init(&s->ls_linkage);
1053 mutex_unlock(&lu_sites_guard);
1055 if (s->ls_obj_hash != NULL) {
1056 cfs_hash_putref(s->ls_obj_hash);
1057 s->ls_obj_hash = NULL;
1060 if (s->ls_top_dev != NULL) {
1061 s->ls_top_dev->ld_site = NULL;
1062 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1063 lu_device_put(s->ls_top_dev);
1064 s->ls_top_dev = NULL;
1067 if (s->ls_stats != NULL)
1068 lprocfs_free_stats(&s->ls_stats);
1070 EXPORT_SYMBOL(lu_site_fini);
1073 * Called when initialization of stack for this site is completed.
1075 int lu_site_init_finish(struct lu_site *s)
1078 mutex_lock(&lu_sites_guard);
1079 result = lu_context_refill(&lu_shrink_env.le_ctx);
1081 cfs_list_add(&s->ls_linkage, &lu_sites);
1082 mutex_unlock(&lu_sites_guard);
1085 EXPORT_SYMBOL(lu_site_init_finish);
1088 * Acquire additional reference on device \a d
1090 void lu_device_get(struct lu_device *d)
1092 cfs_atomic_inc(&d->ld_ref);
1094 EXPORT_SYMBOL(lu_device_get);
1097 * Release reference on device \a d.
1099 void lu_device_put(struct lu_device *d)
1101 LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1102 cfs_atomic_dec(&d->ld_ref);
1104 EXPORT_SYMBOL(lu_device_put);
1107 * Initialize device \a d of type \a t.
1109 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1111 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1112 t->ldt_ops->ldto_start(t);
1113 memset(d, 0, sizeof *d);
1114 cfs_atomic_set(&d->ld_ref, 0);
1116 lu_ref_init(&d->ld_reference);
1117 CFS_INIT_LIST_HEAD(&d->ld_linkage);
1120 EXPORT_SYMBOL(lu_device_init);
1123 * Finalize device \a d.
1125 void lu_device_fini(struct lu_device *d)
1127 struct lu_device_type *t;
1130 if (d->ld_obd != NULL) {
1131 d->ld_obd->obd_lu_dev = NULL;
1135 lu_ref_fini(&d->ld_reference);
1136 LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1137 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1138 LASSERT(t->ldt_device_nr > 0);
1139 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1140 t->ldt_ops->ldto_stop(t);
1142 EXPORT_SYMBOL(lu_device_fini);
1145 * Initialize object \a o that is part of compound object \a h and was created
1148 int lu_object_init(struct lu_object *o,
1149 struct lu_object_header *h, struct lu_device *d)
1151 memset(o, 0, sizeof *o);
1155 o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1156 CFS_INIT_LIST_HEAD(&o->lo_linkage);
1159 EXPORT_SYMBOL(lu_object_init);
1162 * Finalize object and release its resources.
1164 void lu_object_fini(struct lu_object *o)
1166 struct lu_device *dev = o->lo_dev;
1168 LASSERT(cfs_list_empty(&o->lo_linkage));
1171 lu_ref_del_at(&dev->ld_reference,
1172 o->lo_dev_ref , "lu_object", o);
1177 EXPORT_SYMBOL(lu_object_fini);
1180 * Add object \a o as first layer of compound object \a h
1182 * This is typically called by the ->ldo_object_alloc() method of top-level
1185 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1187 cfs_list_move(&o->lo_linkage, &h->loh_layers);
1189 EXPORT_SYMBOL(lu_object_add_top);
1192 * Add object \a o as a layer of compound object, going after \a before.
1194 * This is typically called by the ->ldo_object_alloc() method of \a
1197 void lu_object_add(struct lu_object *before, struct lu_object *o)
1199 cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1201 EXPORT_SYMBOL(lu_object_add);
1204 * Initialize compound object.
1206 int lu_object_header_init(struct lu_object_header *h)
1208 memset(h, 0, sizeof *h);
1209 cfs_atomic_set(&h->loh_ref, 1);
1210 CFS_INIT_HLIST_NODE(&h->loh_hash);
1211 CFS_INIT_LIST_HEAD(&h->loh_lru);
1212 CFS_INIT_LIST_HEAD(&h->loh_layers);
1213 lu_ref_init(&h->loh_reference);
1216 EXPORT_SYMBOL(lu_object_header_init);
1219 * Finalize compound object.
1221 void lu_object_header_fini(struct lu_object_header *h)
1223 LASSERT(cfs_list_empty(&h->loh_layers));
1224 LASSERT(cfs_list_empty(&h->loh_lru));
1225 LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1226 lu_ref_fini(&h->loh_reference);
1228 EXPORT_SYMBOL(lu_object_header_fini);
1231 * Given a compound object, find its slice, corresponding to the device type
1234 struct lu_object *lu_object_locate(struct lu_object_header *h,
1235 const struct lu_device_type *dtype)
1237 struct lu_object *o;
1239 cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1240 if (o->lo_dev->ld_type == dtype)
1245 EXPORT_SYMBOL(lu_object_locate);
1250 * Finalize and free devices in the device stack.
1252 * Finalize device stack by purging object cache, and calling
1253 * lu_device_type_operations::ldto_device_fini() and
1254 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1256 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1258 struct lu_site *site = top->ld_site;
1259 struct lu_device *scan;
1260 struct lu_device *next;
1262 lu_site_purge(env, site, ~0);
1263 for (scan = top; scan != NULL; scan = next) {
1264 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1265 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1266 lu_device_put(scan);
1270 lu_site_purge(env, site, ~0);
1272 for (scan = top; scan != NULL; scan = next) {
1273 const struct lu_device_type *ldt = scan->ld_type;
1274 struct obd_type *type;
1276 next = ldt->ldt_ops->ldto_device_free(env, scan);
1277 type = ldt->ldt_obd_type;
1280 class_put_type(type);
1284 EXPORT_SYMBOL(lu_stack_fini);
1288 * Maximal number of tld slots.
1290 LU_CONTEXT_KEY_NR = 40
1293 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1295 static DEFINE_SPINLOCK(lu_keys_guard);
1298 * Global counter incremented whenever key is registered, unregistered,
1299 * revived or quiesced. This is used to void unnecessary calls to
1300 * lu_context_refill(). No locking is provided, as initialization and shutdown
1301 * are supposed to be externally serialized.
1303 static unsigned key_set_version = 0;
1308 int lu_context_key_register(struct lu_context_key *key)
1313 LASSERT(key->lct_init != NULL);
1314 LASSERT(key->lct_fini != NULL);
1315 LASSERT(key->lct_tags != 0);
1316 LASSERT(key->lct_owner != NULL);
1319 spin_lock(&lu_keys_guard);
1320 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1321 if (lu_keys[i] == NULL) {
1323 cfs_atomic_set(&key->lct_used, 1);
1325 lu_ref_init(&key->lct_reference);
1331 spin_unlock(&lu_keys_guard);
1334 EXPORT_SYMBOL(lu_context_key_register);
1336 static void key_fini(struct lu_context *ctx, int index)
1338 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1339 struct lu_context_key *key;
1341 key = lu_keys[index];
1342 LASSERT(key != NULL);
1343 LASSERT(key->lct_fini != NULL);
1344 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1346 key->lct_fini(ctx, key, ctx->lc_value[index]);
1347 lu_ref_del(&key->lct_reference, "ctx", ctx);
1348 cfs_atomic_dec(&key->lct_used);
1350 LASSERT(key->lct_owner != NULL);
1351 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1352 LINVRNT(cfs_module_refcount(key->lct_owner) > 0);
1353 cfs_module_put(key->lct_owner);
1355 ctx->lc_value[index] = NULL;
1362 void lu_context_key_degister(struct lu_context_key *key)
1364 LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1365 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1367 lu_context_key_quiesce(key);
1370 spin_lock(&lu_keys_guard);
1371 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1372 if (lu_keys[key->lct_index]) {
1373 lu_keys[key->lct_index] = NULL;
1374 lu_ref_fini(&key->lct_reference);
1376 spin_unlock(&lu_keys_guard);
1378 LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1379 "key has instances: %d\n",
1380 cfs_atomic_read(&key->lct_used));
1382 EXPORT_SYMBOL(lu_context_key_degister);
1385 * Register a number of keys. This has to be called after all keys have been
1386 * initialized by a call to LU_CONTEXT_KEY_INIT().
1388 int lu_context_key_register_many(struct lu_context_key *k, ...)
1390 struct lu_context_key *key = k;
1396 result = lu_context_key_register(key);
1399 key = va_arg(args, struct lu_context_key *);
1400 } while (key != NULL);
1406 lu_context_key_degister(k);
1407 k = va_arg(args, struct lu_context_key *);
1414 EXPORT_SYMBOL(lu_context_key_register_many);
1417 * De-register a number of keys. This is a dual to
1418 * lu_context_key_register_many().
1420 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1426 lu_context_key_degister(k);
1427 k = va_arg(args, struct lu_context_key*);
1428 } while (k != NULL);
1431 EXPORT_SYMBOL(lu_context_key_degister_many);
1434 * Revive a number of keys.
1436 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1442 lu_context_key_revive(k);
1443 k = va_arg(args, struct lu_context_key*);
1444 } while (k != NULL);
1447 EXPORT_SYMBOL(lu_context_key_revive_many);
1450 * Quiescent a number of keys.
1452 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1458 lu_context_key_quiesce(k);
1459 k = va_arg(args, struct lu_context_key*);
1460 } while (k != NULL);
1463 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1466 * Return value associated with key \a key in context \a ctx.
1468 void *lu_context_key_get(const struct lu_context *ctx,
1469 const struct lu_context_key *key)
1471 LINVRNT(ctx->lc_state == LCS_ENTERED);
1472 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1473 LASSERT(lu_keys[key->lct_index] == key);
1474 return ctx->lc_value[key->lct_index];
1476 EXPORT_SYMBOL(lu_context_key_get);
1479 * List of remembered contexts. XXX document me.
1481 static CFS_LIST_HEAD(lu_context_remembered);
1484 * Destroy \a key in all remembered contexts. This is used to destroy key
1485 * values in "shared" contexts (like service threads), when a module owning
1486 * the key is about to be unloaded.
1488 void lu_context_key_quiesce(struct lu_context_key *key)
1490 struct lu_context *ctx;
1491 extern unsigned cl_env_cache_purge(unsigned nr);
1493 if (!(key->lct_tags & LCT_QUIESCENT)) {
1495 * XXX layering violation.
1497 cl_env_cache_purge(~0);
1498 key->lct_tags |= LCT_QUIESCENT;
1500 * XXX memory barrier has to go here.
1502 spin_lock(&lu_keys_guard);
1503 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1505 key_fini(ctx, key->lct_index);
1506 spin_unlock(&lu_keys_guard);
1510 EXPORT_SYMBOL(lu_context_key_quiesce);
1512 void lu_context_key_revive(struct lu_context_key *key)
1514 key->lct_tags &= ~LCT_QUIESCENT;
1517 EXPORT_SYMBOL(lu_context_key_revive);
1519 static void keys_fini(struct lu_context *ctx)
1523 if (ctx->lc_value == NULL)
1526 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1529 OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1530 ctx->lc_value = NULL;
1533 static int keys_fill(struct lu_context *ctx)
1537 LINVRNT(ctx->lc_value != NULL);
1538 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1539 struct lu_context_key *key;
1542 if (ctx->lc_value[i] == NULL && key != NULL &&
1543 (key->lct_tags & ctx->lc_tags) &&
1545 * Don't create values for a LCT_QUIESCENT key, as this
1546 * will pin module owning a key.
1548 !(key->lct_tags & LCT_QUIESCENT)) {
1551 LINVRNT(key->lct_init != NULL);
1552 LINVRNT(key->lct_index == i);
1554 value = key->lct_init(ctx, key);
1555 if (unlikely(IS_ERR(value)))
1556 return PTR_ERR(value);
1558 LASSERT(key->lct_owner != NULL);
1559 if (!(ctx->lc_tags & LCT_NOREF))
1560 cfs_try_module_get(key->lct_owner);
1561 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1562 cfs_atomic_inc(&key->lct_used);
1564 * This is the only place in the code, where an
1565 * element of ctx->lc_value[] array is set to non-NULL
1568 ctx->lc_value[i] = value;
1569 if (key->lct_exit != NULL)
1570 ctx->lc_tags |= LCT_HAS_EXIT;
1572 ctx->lc_version = key_set_version;
1577 static int keys_init(struct lu_context *ctx)
1579 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1580 if (likely(ctx->lc_value != NULL))
1581 return keys_fill(ctx);
1587 * Initialize context data-structure. Create values for all keys.
1589 int lu_context_init(struct lu_context *ctx, __u32 tags)
1593 memset(ctx, 0, sizeof *ctx);
1594 ctx->lc_state = LCS_INITIALIZED;
1595 ctx->lc_tags = tags;
1596 if (tags & LCT_REMEMBER) {
1597 spin_lock(&lu_keys_guard);
1598 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1599 spin_unlock(&lu_keys_guard);
1601 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1604 rc = keys_init(ctx);
1606 lu_context_fini(ctx);
1610 EXPORT_SYMBOL(lu_context_init);
1613 * Finalize context data-structure. Destroy key values.
1615 void lu_context_fini(struct lu_context *ctx)
1617 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1618 ctx->lc_state = LCS_FINALIZED;
1620 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1621 LASSERT(cfs_list_empty(&ctx->lc_remember));
1624 } else { /* could race with key degister */
1625 spin_lock(&lu_keys_guard);
1627 cfs_list_del_init(&ctx->lc_remember);
1628 spin_unlock(&lu_keys_guard);
1631 EXPORT_SYMBOL(lu_context_fini);
1634 * Called before entering context.
1636 void lu_context_enter(struct lu_context *ctx)
1638 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1639 ctx->lc_state = LCS_ENTERED;
1641 EXPORT_SYMBOL(lu_context_enter);
1644 * Called after exiting from \a ctx
1646 void lu_context_exit(struct lu_context *ctx)
1650 LINVRNT(ctx->lc_state == LCS_ENTERED);
1651 ctx->lc_state = LCS_LEFT;
1652 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1653 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1654 if (ctx->lc_value[i] != NULL) {
1655 struct lu_context_key *key;
1658 LASSERT(key != NULL);
1659 if (key->lct_exit != NULL)
1661 key, ctx->lc_value[i]);
1666 EXPORT_SYMBOL(lu_context_exit);
1669 * Allocate for context all missing keys that were registered after context
1670 * creation. key_set_version is only changed in rare cases when modules
1671 * are loaded and removed.
1673 int lu_context_refill(struct lu_context *ctx)
1675 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1677 EXPORT_SYMBOL(lu_context_refill);
1680 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1681 * obd being added. Currently, this is only used on client side, specifically
1682 * for echo device client, for other stack (like ptlrpc threads), context are
1683 * predefined when the lu_device type are registered, during the module probe
1686 __u32 lu_context_tags_default = 0;
1687 __u32 lu_session_tags_default = 0;
1689 void lu_context_tags_update(__u32 tags)
1691 spin_lock(&lu_keys_guard);
1692 lu_context_tags_default |= tags;
1694 spin_unlock(&lu_keys_guard);
1696 EXPORT_SYMBOL(lu_context_tags_update);
1698 void lu_context_tags_clear(__u32 tags)
1700 spin_lock(&lu_keys_guard);
1701 lu_context_tags_default &= ~tags;
1703 spin_unlock(&lu_keys_guard);
1705 EXPORT_SYMBOL(lu_context_tags_clear);
1707 void lu_session_tags_update(__u32 tags)
1709 spin_lock(&lu_keys_guard);
1710 lu_session_tags_default |= tags;
1712 spin_unlock(&lu_keys_guard);
1714 EXPORT_SYMBOL(lu_session_tags_update);
1716 void lu_session_tags_clear(__u32 tags)
1718 spin_lock(&lu_keys_guard);
1719 lu_session_tags_default &= ~tags;
1721 spin_unlock(&lu_keys_guard);
1723 EXPORT_SYMBOL(lu_session_tags_clear);
1725 int lu_env_init(struct lu_env *env, __u32 tags)
1730 result = lu_context_init(&env->le_ctx, tags);
1731 if (likely(result == 0))
1732 lu_context_enter(&env->le_ctx);
1735 EXPORT_SYMBOL(lu_env_init);
1737 void lu_env_fini(struct lu_env *env)
1739 lu_context_exit(&env->le_ctx);
1740 lu_context_fini(&env->le_ctx);
1743 EXPORT_SYMBOL(lu_env_fini);
1745 int lu_env_refill(struct lu_env *env)
1749 result = lu_context_refill(&env->le_ctx);
1750 if (result == 0 && env->le_ses != NULL)
1751 result = lu_context_refill(env->le_ses);
1754 EXPORT_SYMBOL(lu_env_refill);
1757 * Currently, this API will only be used by echo client.
1758 * Because echo client and normal lustre client will share
1759 * same cl_env cache. So echo client needs to refresh
1760 * the env context after it get one from the cache, especially
1761 * when normal client and echo client co-exist in the same client.
1763 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1768 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1769 env->le_ctx.lc_version = 0;
1770 env->le_ctx.lc_tags |= ctags;
1773 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1774 env->le_ses->lc_version = 0;
1775 env->le_ses->lc_tags |= stags;
1778 result = lu_env_refill(env);
1782 EXPORT_SYMBOL(lu_env_refill_by_tags);
1784 static struct cfs_shrinker *lu_site_shrinker = NULL;
1786 typedef struct lu_site_stats{
1787 unsigned lss_populated;
1788 unsigned lss_max_search;
1793 static void lu_site_stats_get(cfs_hash_t *hs,
1794 lu_site_stats_t *stats, int populated)
1799 cfs_hash_for_each_bucket(hs, &bd, i) {
1800 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1801 cfs_hlist_head_t *hhead;
1803 cfs_hash_bd_lock(hs, &bd, 1);
1804 stats->lss_busy += bkt->lsb_busy;
1805 stats->lss_total += cfs_hash_bd_count_get(&bd);
1806 stats->lss_max_search = max((int)stats->lss_max_search,
1807 cfs_hash_bd_depmax_get(&bd));
1809 cfs_hash_bd_unlock(hs, &bd, 1);
1813 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1814 if (!cfs_hlist_empty(hhead))
1815 stats->lss_populated++;
1817 cfs_hash_bd_unlock(hs, &bd, 1);
1824 * There exists a potential lock inversion deadlock scenario when using
1825 * Lustre on top of ZFS. This occurs between one of ZFS's
1826 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1827 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1828 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1829 * lock. Obviously neither thread will wake and drop their respective hold
1832 * To prevent this from happening we must ensure the lu_sites_guard lock is
1833 * not taken while down this code path. ZFS reliably does not set the
1834 * __GFP_FS bit in its code paths, so this can be used to determine if it
1835 * is safe to take the lu_sites_guard lock.
1837 * Ideally we should accurately return the remaining number of cached
1838 * objects without taking the lu_sites_guard lock, but this is not
1839 * possible in the current implementation.
1841 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1843 lu_site_stats_t stats;
1845 struct lu_site *tmp;
1847 int remain = shrink_param(sc, nr_to_scan);
1848 CFS_LIST_HEAD(splice);
1850 if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
1854 /* We must not take the lu_sites_guard lock when
1855 * __GFP_FS is *not* set because of the deadlock
1856 * possibility detailed above. Additionally,
1857 * since we cannot determine the number of
1858 * objects in the cache without taking this
1859 * lock, we're in a particularly tough spot. As
1860 * a result, we'll just lie and say our cache is
1861 * empty. This _should_ be ok, as we can't
1862 * reclaim objects when __GFP_FS is *not* set
1868 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1870 mutex_lock(&lu_sites_guard);
1871 cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1872 if (shrink_param(sc, nr_to_scan) != 0) {
1873 remain = lu_site_purge(&lu_shrink_env, s, remain);
1875 * Move just shrunk site to the tail of site list to
1876 * assure shrinking fairness.
1878 cfs_list_move_tail(&s->ls_linkage, &splice);
1881 memset(&stats, 0, sizeof(stats));
1882 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1883 cached += stats.lss_total - stats.lss_busy;
1884 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1887 cfs_list_splice(&splice, lu_sites.prev);
1888 mutex_unlock(&lu_sites_guard);
1890 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1891 if (shrink_param(sc, nr_to_scan) == 0)
1892 CDEBUG(D_INODE, "%d objects cached\n", cached);
1901 * Environment to be used in debugger, contains all tags.
1903 struct lu_env lu_debugging_env;
1906 * Debugging printer function using printk().
1908 int lu_printk_printer(const struct lu_env *env,
1909 void *unused, const char *format, ...)
1913 va_start(args, format);
1914 vprintk(format, args);
1919 int lu_debugging_setup(void)
1921 return lu_env_init(&lu_debugging_env, ~0);
1924 void lu_context_keys_dump(void)
1928 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1929 struct lu_context_key *key;
1933 CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1934 i, key, key->lct_tags,
1935 key->lct_init, key->lct_fini, key->lct_exit,
1936 key->lct_index, cfs_atomic_read(&key->lct_used),
1937 key->lct_owner ? key->lct_owner->name : "",
1939 lu_ref_print(&key->lct_reference);
1943 EXPORT_SYMBOL(lu_context_keys_dump);
1944 #else /* !__KERNEL__ */
1945 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1949 #endif /* __KERNEL__ */
1951 int cl_global_init(void);
1952 void cl_global_fini(void);
1953 int lu_ref_global_init(void);
1954 void lu_ref_global_fini(void);
1956 int dt_global_init(void);
1957 void dt_global_fini(void);
1959 int llo_global_init(void);
1960 void llo_global_fini(void);
1962 /* context key constructor/destructor: lu_ucred_key_init, lu_ucred_key_fini */
1963 LU_KEY_INIT_FINI(lu_ucred, struct lu_ucred);
1965 static struct lu_context_key lu_ucred_key = {
1966 .lct_tags = LCT_SESSION,
1967 .lct_init = lu_ucred_key_init,
1968 .lct_fini = lu_ucred_key_fini
1972 * Get ucred key if session exists and ucred key is allocated on it.
1973 * Return NULL otherwise.
1975 struct lu_ucred *lu_ucred(const struct lu_env *env)
1979 return lu_context_key_get(env->le_ses, &lu_ucred_key);
1981 EXPORT_SYMBOL(lu_ucred);
1984 * Get ucred key and check if it is properly initialized.
1985 * Return NULL otherwise.
1987 struct lu_ucred *lu_ucred_check(const struct lu_env *env)
1989 struct lu_ucred *uc = lu_ucred(env);
1990 if (uc && uc->uc_valid != UCRED_OLD && uc->uc_valid != UCRED_NEW)
1994 EXPORT_SYMBOL(lu_ucred_check);
1997 * Get ucred key, which must exist and must be properly initialized.
2000 struct lu_ucred *lu_ucred_assert(const struct lu_env *env)
2002 struct lu_ucred *uc = lu_ucred_check(env);
2003 LASSERT(uc != NULL);
2006 EXPORT_SYMBOL(lu_ucred_assert);
2009 * Initialization of global lu_* data.
2011 int lu_global_init(void)
2015 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
2017 result = lu_ref_global_init();
2021 LU_CONTEXT_KEY_INIT(&lu_global_key);
2022 result = lu_context_key_register(&lu_global_key);
2026 LU_CONTEXT_KEY_INIT(&lu_ucred_key);
2027 result = lu_context_key_register(&lu_ucred_key);
2032 * At this level, we don't know what tags are needed, so allocate them
2033 * conservatively. This should not be too bad, because this
2034 * environment is global.
2036 mutex_lock(&lu_sites_guard);
2037 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
2038 mutex_unlock(&lu_sites_guard);
2043 * seeks estimation: 3 seeks to read a record from oi, one to read
2044 * inode, one for ea. Unfortunately setting this high value results in
2045 * lu_object/inode cache consuming all the memory.
2047 lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
2048 if (lu_site_shrinker == NULL)
2052 result = dt_global_init();
2056 result = llo_global_init();
2060 result = cl_global_init();
2066 * Dual to lu_global_init().
2068 void lu_global_fini(void)
2075 if (lu_site_shrinker != NULL) {
2076 cfs_remove_shrinker(lu_site_shrinker);
2077 lu_site_shrinker = NULL;
2080 lu_context_key_degister(&lu_global_key);
2081 lu_context_key_degister(&lu_ucred_key);
2084 * Tear shrinker environment down _after_ de-registering
2085 * lu_global_key, because the latter has a value in the former.
2087 mutex_lock(&lu_sites_guard);
2088 lu_env_fini(&lu_shrink_env);
2089 mutex_unlock(&lu_sites_guard);
2091 lu_ref_global_fini();
2094 struct lu_buf LU_BUF_NULL = {
2098 EXPORT_SYMBOL(LU_BUF_NULL);
2100 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
2103 struct lprocfs_counter ret;
2105 lprocfs_stats_collect(stats, idx, &ret);
2106 return (__u32)ret.lc_count;
2113 * Output site statistical counters into a buffer. Suitable for
2114 * lprocfs_rd_*()-style functions.
2116 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
2118 lu_site_stats_t stats;
2120 memset(&stats, 0, sizeof(stats));
2121 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2123 return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2126 stats.lss_populated,
2127 CFS_HASH_NHLIST(s->ls_obj_hash),
2128 stats.lss_max_search,
2129 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2130 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2131 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2132 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2133 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2134 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2136 EXPORT_SYMBOL(lu_site_stats_print);
2139 * Helper function to initialize a number of kmem slab caches at once.
2141 int lu_kmem_init(struct lu_kmem_descr *caches)
2144 struct lu_kmem_descr *iter = caches;
2146 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2147 *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
2150 if (*iter->ckd_cache == NULL) {
2152 /* free all previously allocated caches */
2153 lu_kmem_fini(caches);
2159 EXPORT_SYMBOL(lu_kmem_init);
2162 * Helper function to finalize a number of kmem slab cached at once. Dual to
2165 void lu_kmem_fini(struct lu_kmem_descr *caches)
2169 for (; caches->ckd_cache != NULL; ++caches) {
2170 if (*caches->ckd_cache != NULL) {
2171 rc = cfs_mem_cache_destroy(*caches->ckd_cache);
2172 LASSERTF(rc == 0, "couldn't destroy %s slab\n",
2174 *caches->ckd_cache = NULL;
2178 EXPORT_SYMBOL(lu_kmem_fini);
2181 * Temporary solution to be able to assign fid in ->do_create()
2182 * till we have fully-functional OST fids
2184 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2185 const struct lu_fid *fid)
2187 struct lu_site *s = o->lo_dev->ld_site;
2188 struct lu_fid *old = &o->lo_header->loh_fid;
2189 struct lu_site_bkt_data *bkt;
2190 struct lu_object *shadow;
2191 cfs_waitlink_t waiter;
2196 LASSERT(fid_is_zero(old));
2198 hs = s->ls_obj_hash;
2199 cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2200 shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2201 /* supposed to be unique */
2202 LASSERT(shadow == NULL);
2204 bkt = cfs_hash_bd_extra_get(hs, &bd);
2205 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2207 cfs_hash_bd_unlock(hs, &bd, 1);
2209 EXPORT_SYMBOL(lu_object_assign_fid);
2212 * allocates object with 0 (non-assiged) fid
2213 * XXX: temporary solution to be able to assign fid in ->do_create()
2214 * till we have fully-functional OST fids
2216 struct lu_object *lu_object_anon(const struct lu_env *env,
2217 struct lu_device *dev,
2218 const struct lu_object_conf *conf)
2221 struct lu_object *o;
2224 o = lu_object_alloc(env, dev, &fid, conf);
2228 EXPORT_SYMBOL(lu_object_anon);
2230 void lu_buf_free(struct lu_buf *buf)
2234 LASSERT(buf->lb_len > 0);
2235 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2240 EXPORT_SYMBOL(lu_buf_free);
2242 void lu_buf_alloc(struct lu_buf *buf, int size)
2245 LASSERT(buf->lb_buf == NULL);
2246 LASSERT(buf->lb_len == 0);
2247 OBD_ALLOC_LARGE(buf->lb_buf, size);
2248 if (likely(buf->lb_buf))
2251 EXPORT_SYMBOL(lu_buf_alloc);
2253 void lu_buf_realloc(struct lu_buf *buf, int size)
2256 lu_buf_alloc(buf, size);
2258 EXPORT_SYMBOL(lu_buf_realloc);