4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include <libcfs/libcfs.h>
50 # include <linux/module.h>
54 #include <libcfs/libcfs_hash.h>
55 #include <obd_class.h>
56 #include <obd_support.h>
57 #include <lustre_disk.h>
58 #include <lustre_fid.h>
59 #include <lu_object.h>
60 #include <libcfs/list.h>
62 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
65 * Decrease reference counter on object. If last reference is freed, return
66 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
67 * case, free object immediately.
69 void lu_object_put(const struct lu_env *env, struct lu_object *o)
71 struct lu_site_bkt_data *bkt;
72 struct lu_object_header *top;
74 struct lu_object *orig;
76 const struct lu_fid *fid;
79 site = o->lo_dev->ld_site;
83 * till we have full fids-on-OST implemented anonymous objects
84 * are possible in OSP. such an object isn't listed in the site
85 * so we should not remove it from the site.
87 fid = lu_object_fid(o);
88 if (fid_is_zero(fid)) {
89 LASSERT(top->loh_hash.next == NULL
90 && top->loh_hash.pprev == NULL);
91 LASSERT(cfs_list_empty(&top->loh_lru));
92 if (!cfs_atomic_dec_and_test(&top->loh_ref))
94 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
95 if (o->lo_ops->loo_object_release != NULL)
96 o->lo_ops->loo_object_release(env, o);
98 lu_object_free(env, orig);
102 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
103 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
105 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
106 if (lu_object_is_dying(top)) {
109 * somebody may be waiting for this, currently only
110 * used for cl_object, see cl_object_put_last().
112 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
117 LASSERT(bkt->lsb_busy > 0);
120 * When last reference is released, iterate over object
121 * layers, and notify them that object is no longer busy.
123 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
124 if (o->lo_ops->loo_object_release != NULL)
125 o->lo_ops->loo_object_release(env, o);
128 if (!lu_object_is_dying(top)) {
129 LASSERT(cfs_list_empty(&top->loh_lru));
130 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
131 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
136 * If object is dying (will not be cached), removed it
137 * from hash table and LRU.
139 * This is done with hash table and LRU lists locked. As the only
140 * way to acquire first reference to previously unreferenced
141 * object is through hash-table lookup (lu_object_find()),
142 * or LRU scanning (lu_site_purge()), that are done under hash-table
143 * and LRU lock, no race with concurrent object lookup is possible
144 * and we can safely destroy object below.
146 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
147 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
148 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
150 * Object was already removed from hash and lru above, can
153 lu_object_free(env, orig);
155 EXPORT_SYMBOL(lu_object_put);
158 * Put object and don't keep in cache. This is temporary solution for
159 * multi-site objects when its layering is not constant.
161 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
163 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
164 return lu_object_put(env, o);
166 EXPORT_SYMBOL(lu_object_put_nocache);
169 * Kill the object and take it out of LRU cache.
170 * Currently used by client code for layout change.
172 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
174 struct lu_object_header *top;
177 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
178 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
179 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
182 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
183 cfs_list_del_init(&top->loh_lru);
184 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
185 cfs_hash_bd_unlock(obj_hash, &bd, 1);
188 EXPORT_SYMBOL(lu_object_unhash);
191 * Allocate new object.
193 * This follows object creation protocol, described in the comment within
194 * struct lu_device_operations definition.
196 static struct lu_object *lu_object_alloc(const struct lu_env *env,
197 struct lu_device *dev,
198 const struct lu_fid *f,
199 const struct lu_object_conf *conf)
201 struct lu_object *scan;
202 struct lu_object *top;
209 * Create top-level object slice. This will also create
212 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
214 RETURN(ERR_PTR(-ENOMEM));
218 * This is the only place where object fid is assigned. It's constant
221 top->lo_header->loh_fid = *f;
222 layers = &top->lo_header->loh_layers;
225 * Call ->loo_object_init() repeatedly, until no more new
226 * object slices are created.
229 cfs_list_for_each_entry(scan, layers, lo_linkage) {
230 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
233 scan->lo_header = top->lo_header;
234 result = scan->lo_ops->loo_object_init(env, scan, conf);
236 lu_object_free(env, top);
237 RETURN(ERR_PTR(result));
239 scan->lo_flags |= LU_OBJECT_ALLOCATED;
243 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
244 if (scan->lo_ops->loo_object_start != NULL) {
245 result = scan->lo_ops->loo_object_start(env, scan);
247 lu_object_free(env, top);
248 RETURN(ERR_PTR(result));
253 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
260 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
262 struct lu_site_bkt_data *bkt;
263 struct lu_site *site;
264 struct lu_object *scan;
268 site = o->lo_dev->ld_site;
269 layers = &o->lo_header->loh_layers;
270 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
272 * First call ->loo_object_delete() method to release all resources.
274 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
275 if (scan->lo_ops->loo_object_delete != NULL)
276 scan->lo_ops->loo_object_delete(env, scan);
280 * Then, splice object layers into stand-alone list, and call
281 * ->loo_object_free() on all layers to free memory. Splice is
282 * necessary, because lu_object_header is freed together with the
285 CFS_INIT_LIST_HEAD(&splice);
286 cfs_list_splice_init(layers, &splice);
287 while (!cfs_list_empty(&splice)) {
289 * Free layers in bottom-to-top order, so that object header
290 * lives as long as possible and ->loo_object_free() methods
291 * can look at its contents.
293 o = container_of0(splice.prev, struct lu_object, lo_linkage);
294 cfs_list_del_init(&o->lo_linkage);
295 LASSERT(o->lo_ops->loo_object_free != NULL);
296 o->lo_ops->loo_object_free(env, o);
299 if (cfs_waitq_active(&bkt->lsb_marche_funebre))
300 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
304 * Free \a nr objects from the cold end of the site LRU list.
306 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
308 struct lu_object_header *h;
309 struct lu_object_header *temp;
310 struct lu_site_bkt_data *bkt;
320 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
323 CFS_INIT_LIST_HEAD(&dispose);
325 * Under LRU list lock, scan LRU list and move unreferenced objects to
326 * the dispose list, removing them from LRU and hash table.
328 start = s->ls_purge_start;
329 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
332 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
336 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
337 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
339 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
340 LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
342 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
343 LASSERT(bd.bd_bucket == bd2.bd_bucket);
345 cfs_hash_bd_del_locked(s->ls_obj_hash,
347 cfs_list_move(&h->loh_lru, &dispose);
351 if (nr != ~0 && --nr == 0)
354 if (count > 0 && --count == 0)
358 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
361 * Free everything on the dispose list. This is safe against
362 * races due to the reasons described in lu_object_put().
364 while (!cfs_list_empty(&dispose)) {
365 h = container_of0(dispose.next,
366 struct lu_object_header, loh_lru);
367 cfs_list_del_init(&h->loh_lru);
368 lu_object_free(env, lu_object_top(h));
369 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
376 if (nr != 0 && did_sth && start != 0) {
377 start = 0; /* restart from the first bucket */
380 /* race on s->ls_purge_start, but nobody cares */
381 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
385 EXPORT_SYMBOL(lu_site_purge);
390 * Code below has to jump through certain loops to output object description
391 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
392 * composes object description from strings that are parts of _lines_ of
393 * output (i.e., strings that are not terminated by newline). This doesn't fit
394 * very well into libcfs_debug_msg() interface that assumes that each message
395 * supplied to it is a self-contained output line.
397 * To work around this, strings are collected in a temporary buffer
398 * (implemented as a value of lu_cdebug_key key), until terminating newline
399 * character is detected.
407 * XXX overflow is not handled correctly.
412 struct lu_cdebug_data {
416 char lck_area[LU_CDEBUG_LINE];
419 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
420 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
423 * Key, holding temporary buffer. This key is registered very early by
426 struct lu_context_key lu_global_key = {
427 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
428 LCT_MG_THREAD | LCT_CL_THREAD,
429 .lct_init = lu_global_key_init,
430 .lct_fini = lu_global_key_fini
434 * Printer function emitting messages through libcfs_debug_msg().
436 int lu_cdebug_printer(const struct lu_env *env,
437 void *cookie, const char *format, ...)
439 struct libcfs_debug_msg_data *msgdata = cookie;
440 struct lu_cdebug_data *key;
445 va_start(args, format);
447 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
448 LASSERT(key != NULL);
450 used = strlen(key->lck_area);
451 complete = format[strlen(format) - 1] == '\n';
453 * Append new chunk to the buffer.
455 vsnprintf(key->lck_area + used,
456 ARRAY_SIZE(key->lck_area) - used, format, args);
458 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
459 libcfs_debug_msg(msgdata, "%s", key->lck_area);
460 key->lck_area[0] = 0;
465 EXPORT_SYMBOL(lu_cdebug_printer);
468 * Print object header.
470 void lu_object_header_print(const struct lu_env *env, void *cookie,
471 lu_printer_t printer,
472 const struct lu_object_header *hdr)
474 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
475 hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
477 cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
478 cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
480 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
482 EXPORT_SYMBOL(lu_object_header_print);
485 * Print human readable representation of the \a o to the \a printer.
487 void lu_object_print(const struct lu_env *env, void *cookie,
488 lu_printer_t printer, const struct lu_object *o)
490 static const char ruler[] = "........................................";
491 struct lu_object_header *top;
495 lu_object_header_print(env, cookie, printer, top);
496 (*printer)(env, cookie, "{ \n");
497 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
498 depth = o->lo_depth + 4;
501 * print `.' \a depth times followed by type name and address
503 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
504 o->lo_dev->ld_type->ldt_name, o);
505 if (o->lo_ops->loo_object_print != NULL)
506 o->lo_ops->loo_object_print(env, cookie, printer, o);
507 (*printer)(env, cookie, "\n");
509 (*printer)(env, cookie, "} header@%p\n", top);
511 EXPORT_SYMBOL(lu_object_print);
514 * Check object consistency.
516 int lu_object_invariant(const struct lu_object *o)
518 struct lu_object_header *top;
521 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
522 if (o->lo_ops->loo_object_invariant != NULL &&
523 !o->lo_ops->loo_object_invariant(o))
528 EXPORT_SYMBOL(lu_object_invariant);
530 static struct lu_object *htable_lookup(struct lu_site *s,
532 const struct lu_fid *f,
533 cfs_waitlink_t *waiter,
536 struct lu_site_bkt_data *bkt;
537 struct lu_object_header *h;
538 cfs_hlist_node_t *hnode;
539 __u64 ver = cfs_hash_bd_version_get(bd);
545 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
546 /* cfs_hash_bd_peek_locked is a somehow "internal" function
547 * of cfs_hash, it doesn't add refcount on object. */
548 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
550 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
554 h = container_of0(hnode, struct lu_object_header, loh_hash);
555 if (likely(!lu_object_is_dying(h))) {
556 cfs_hash_get(s->ls_obj_hash, hnode);
557 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
558 cfs_list_del_init(&h->loh_lru);
559 return lu_object_top(h);
563 * Lookup found an object being destroyed this object cannot be
564 * returned (to assure that references to dying objects are eventually
565 * drained), and moreover, lookup has to wait until object is freed.
568 cfs_waitlink_init(waiter);
569 cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
570 cfs_set_current_state(CFS_TASK_UNINT);
571 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
572 return ERR_PTR(-EAGAIN);
576 * Search cache for an object with the fid \a f. If such object is found,
577 * return it. Otherwise, create new object, insert it into cache and return
578 * it. In any case, additional reference is acquired on the returned object.
580 struct lu_object *lu_object_find(const struct lu_env *env,
581 struct lu_device *dev, const struct lu_fid *f,
582 const struct lu_object_conf *conf)
584 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
586 EXPORT_SYMBOL(lu_object_find);
588 static struct lu_object *lu_object_new(const struct lu_env *env,
589 struct lu_device *dev,
590 const struct lu_fid *f,
591 const struct lu_object_conf *conf)
596 struct lu_site_bkt_data *bkt;
598 o = lu_object_alloc(env, dev, f, conf);
599 if (unlikely(IS_ERR(o)))
602 hs = dev->ld_site->ls_obj_hash;
603 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
604 bkt = cfs_hash_bd_extra_get(hs, &bd);
605 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
607 cfs_hash_bd_unlock(hs, &bd, 1);
612 * Core logic of lu_object_find*() functions.
614 static struct lu_object *lu_object_find_try(const struct lu_env *env,
615 struct lu_device *dev,
616 const struct lu_fid *f,
617 const struct lu_object_conf *conf,
618 cfs_waitlink_t *waiter)
621 struct lu_object *shadow;
628 * This uses standard index maintenance protocol:
630 * - search index under lock, and return object if found;
631 * - otherwise, unlock index, allocate new object;
632 * - lock index and search again;
633 * - if nothing is found (usual case), insert newly created
635 * - otherwise (race: other thread inserted object), free
636 * object just allocated.
640 * For "LOC_F_NEW" case, we are sure the object is new established.
641 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
642 * just alloc and insert directly.
644 * If dying object is found during index search, add @waiter to the
645 * site wait-queue and return ERR_PTR(-EAGAIN).
647 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
648 return lu_object_new(env, dev, f, conf);
652 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
653 o = htable_lookup(s, &bd, f, waiter, &version);
654 cfs_hash_bd_unlock(hs, &bd, 1);
659 * Allocate new object. This may result in rather complicated
660 * operations, including fld queries, inode loading, etc.
662 o = lu_object_alloc(env, dev, f, conf);
663 if (unlikely(IS_ERR(o)))
666 LASSERT(lu_fid_eq(lu_object_fid(o), f));
668 cfs_hash_bd_lock(hs, &bd, 1);
670 shadow = htable_lookup(s, &bd, f, waiter, &version);
671 if (likely(shadow == NULL)) {
672 struct lu_site_bkt_data *bkt;
674 bkt = cfs_hash_bd_extra_get(hs, &bd);
675 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
677 cfs_hash_bd_unlock(hs, &bd, 1);
681 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
682 cfs_hash_bd_unlock(hs, &bd, 1);
683 lu_object_free(env, o);
688 * Much like lu_object_find(), but top level device of object is specifically
689 * \a dev rather than top level device of the site. This interface allows
690 * objects of different "stacking" to be created within the same site.
692 struct lu_object *lu_object_find_at(const struct lu_env *env,
693 struct lu_device *dev,
694 const struct lu_fid *f,
695 const struct lu_object_conf *conf)
697 struct lu_site_bkt_data *bkt;
698 struct lu_object *obj;
702 obj = lu_object_find_try(env, dev, f, conf, &wait);
703 if (obj != ERR_PTR(-EAGAIN))
706 * lu_object_find_try() already added waiter into the
709 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
710 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
711 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
714 EXPORT_SYMBOL(lu_object_find_at);
717 * Find object with given fid, and return its slice belonging to given device.
719 struct lu_object *lu_object_find_slice(const struct lu_env *env,
720 struct lu_device *dev,
721 const struct lu_fid *f,
722 const struct lu_object_conf *conf)
724 struct lu_object *top;
725 struct lu_object *obj;
727 top = lu_object_find(env, dev, f, conf);
729 obj = lu_object_locate(top->lo_header, dev->ld_type);
731 lu_object_put(env, top);
736 EXPORT_SYMBOL(lu_object_find_slice);
739 * Global list of all device types.
741 static CFS_LIST_HEAD(lu_device_types);
743 int lu_device_type_init(struct lu_device_type *ldt)
747 CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
748 if (ldt->ldt_ops->ldto_init)
749 result = ldt->ldt_ops->ldto_init(ldt);
751 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
754 EXPORT_SYMBOL(lu_device_type_init);
756 void lu_device_type_fini(struct lu_device_type *ldt)
758 cfs_list_del_init(&ldt->ldt_linkage);
759 if (ldt->ldt_ops->ldto_fini)
760 ldt->ldt_ops->ldto_fini(ldt);
762 EXPORT_SYMBOL(lu_device_type_fini);
764 void lu_types_stop(void)
766 struct lu_device_type *ldt;
768 cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
769 if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
770 ldt->ldt_ops->ldto_stop(ldt);
773 EXPORT_SYMBOL(lu_types_stop);
776 * Global list of all sites on this node
778 static CFS_LIST_HEAD(lu_sites);
779 static DEFINE_MUTEX(lu_sites_guard);
782 * Global environment used by site shrinker.
784 static struct lu_env lu_shrink_env;
786 struct lu_site_print_arg {
787 struct lu_env *lsp_env;
789 lu_printer_t lsp_printer;
793 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
794 cfs_hlist_node_t *hnode, void *data)
796 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
797 struct lu_object_header *h;
799 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
800 if (!cfs_list_empty(&h->loh_layers)) {
801 const struct lu_object *o;
803 o = lu_object_top(h);
804 lu_object_print(arg->lsp_env, arg->lsp_cookie,
805 arg->lsp_printer, o);
807 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
808 arg->lsp_printer, h);
814 * Print all objects in \a s.
816 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
817 lu_printer_t printer)
819 struct lu_site_print_arg arg = {
820 .lsp_env = (struct lu_env *)env,
821 .lsp_cookie = cookie,
822 .lsp_printer = printer,
825 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
827 EXPORT_SYMBOL(lu_site_print);
830 LU_CACHE_PERCENT_MAX = 50,
831 LU_CACHE_PERCENT_DEFAULT = 20
834 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
835 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
836 "Percentage of memory to be used as lu_object cache");
839 * Return desired hash table order.
841 static int lu_htable_order(void)
843 unsigned long cache_size;
847 * Calculate hash table size, assuming that we want reasonable
848 * performance when 20% of total memory is occupied by cache of
851 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
853 cache_size = cfs_num_physpages;
855 #if BITS_PER_LONG == 32
856 /* limit hashtable size for lowmem systems to low RAM */
857 if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
858 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
861 /* clear off unreasonable cache setting. */
862 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
863 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
864 " the range of (0, %u]. Will use default value: %u.\n",
865 lu_cache_percent, LU_CACHE_PERCENT_MAX,
866 LU_CACHE_PERCENT_DEFAULT);
868 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
870 cache_size = cache_size / 100 * lu_cache_percent *
871 (CFS_PAGE_SIZE / 1024);
873 for (bits = 1; (1 << bits) < cache_size; ++bits) {
879 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
880 const void *key, unsigned mask)
882 struct lu_fid *fid = (struct lu_fid *)key;
885 hash = fid_flatten32(fid);
886 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
887 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
889 /* give me another random factor */
890 hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
892 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
893 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
898 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
900 return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
903 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
905 struct lu_object_header *h;
907 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
911 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
913 struct lu_object_header *h;
915 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
916 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
919 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
921 struct lu_object_header *h;
923 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
924 if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
925 struct lu_site_bkt_data *bkt;
928 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
929 bkt = cfs_hash_bd_extra_get(hs, &bd);
934 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
936 LBUG(); /* we should never called it */
939 cfs_hash_ops_t lu_site_hash_ops = {
940 .hs_hash = lu_obj_hop_hash,
941 .hs_key = lu_obj_hop_key,
942 .hs_keycmp = lu_obj_hop_keycmp,
943 .hs_object = lu_obj_hop_object,
944 .hs_get = lu_obj_hop_get,
945 .hs_put_locked = lu_obj_hop_put_locked,
948 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
950 spin_lock(&s->ls_ld_lock);
951 if (cfs_list_empty(&d->ld_linkage))
952 cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
953 spin_unlock(&s->ls_ld_lock);
955 EXPORT_SYMBOL(lu_dev_add_linkage);
957 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
959 spin_lock(&s->ls_ld_lock);
960 cfs_list_del_init(&d->ld_linkage);
961 spin_unlock(&s->ls_ld_lock);
963 EXPORT_SYMBOL(lu_dev_del_linkage);
966 * Initialize site \a s, with \a d as the top level device.
968 #define LU_SITE_BITS_MIN 12
969 #define LU_SITE_BITS_MAX 24
971 * total 256 buckets, we don't want too many buckets because:
972 * - consume too much memory
973 * - avoid unbalanced LRU list
975 #define LU_SITE_BKT_BITS 8
977 int lu_site_init(struct lu_site *s, struct lu_device *top)
979 struct lu_site_bkt_data *bkt;
986 memset(s, 0, sizeof *s);
987 bits = lu_htable_order();
988 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
989 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
990 bits >= LU_SITE_BITS_MIN; bits--) {
991 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
992 bits - LU_SITE_BKT_BITS,
995 CFS_HASH_SPIN_BKTLOCK |
996 CFS_HASH_NO_ITEMREF |
998 CFS_HASH_ASSERT_EMPTY);
999 if (s->ls_obj_hash != NULL)
1003 if (s->ls_obj_hash == NULL) {
1004 CERROR("failed to create lu_site hash with bits: %d\n", bits);
1008 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1009 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1010 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
1011 cfs_waitq_init(&bkt->lsb_marche_funebre);
1014 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1015 if (s->ls_stats == NULL) {
1016 cfs_hash_putref(s->ls_obj_hash);
1017 s->ls_obj_hash = NULL;
1021 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1022 0, "created", "created");
1023 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1024 0, "cache_hit", "cache_hit");
1025 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1026 0, "cache_miss", "cache_miss");
1027 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1028 0, "cache_race", "cache_race");
1029 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1030 0, "cache_death_race", "cache_death_race");
1031 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1032 0, "lru_purged", "lru_purged");
1034 CFS_INIT_LIST_HEAD(&s->ls_linkage);
1035 s->ls_top_dev = top;
1038 lu_ref_add(&top->ld_reference, "site-top", s);
1040 CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
1041 spin_lock_init(&s->ls_ld_lock);
1043 lu_dev_add_linkage(s, top);
1047 EXPORT_SYMBOL(lu_site_init);
1050 * Finalize \a s and release its resources.
1052 void lu_site_fini(struct lu_site *s)
1054 mutex_lock(&lu_sites_guard);
1055 cfs_list_del_init(&s->ls_linkage);
1056 mutex_unlock(&lu_sites_guard);
1058 if (s->ls_obj_hash != NULL) {
1059 cfs_hash_putref(s->ls_obj_hash);
1060 s->ls_obj_hash = NULL;
1063 if (s->ls_top_dev != NULL) {
1064 s->ls_top_dev->ld_site = NULL;
1065 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1066 lu_device_put(s->ls_top_dev);
1067 s->ls_top_dev = NULL;
1070 if (s->ls_stats != NULL)
1071 lprocfs_free_stats(&s->ls_stats);
1073 EXPORT_SYMBOL(lu_site_fini);
1076 * Called when initialization of stack for this site is completed.
1078 int lu_site_init_finish(struct lu_site *s)
1081 mutex_lock(&lu_sites_guard);
1082 result = lu_context_refill(&lu_shrink_env.le_ctx);
1084 cfs_list_add(&s->ls_linkage, &lu_sites);
1085 mutex_unlock(&lu_sites_guard);
1088 EXPORT_SYMBOL(lu_site_init_finish);
1091 * Acquire additional reference on device \a d
1093 void lu_device_get(struct lu_device *d)
1095 cfs_atomic_inc(&d->ld_ref);
1097 EXPORT_SYMBOL(lu_device_get);
1100 * Release reference on device \a d.
1102 void lu_device_put(struct lu_device *d)
1104 LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1105 cfs_atomic_dec(&d->ld_ref);
1107 EXPORT_SYMBOL(lu_device_put);
1110 * Initialize device \a d of type \a t.
1112 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1114 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1115 t->ldt_ops->ldto_start(t);
1116 memset(d, 0, sizeof *d);
1117 cfs_atomic_set(&d->ld_ref, 0);
1119 lu_ref_init(&d->ld_reference);
1120 CFS_INIT_LIST_HEAD(&d->ld_linkage);
1123 EXPORT_SYMBOL(lu_device_init);
1126 * Finalize device \a d.
1128 void lu_device_fini(struct lu_device *d)
1130 struct lu_device_type *t;
1133 if (d->ld_obd != NULL) {
1134 d->ld_obd->obd_lu_dev = NULL;
1138 lu_ref_fini(&d->ld_reference);
1139 LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1140 "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1141 LASSERT(t->ldt_device_nr > 0);
1142 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1143 t->ldt_ops->ldto_stop(t);
1145 EXPORT_SYMBOL(lu_device_fini);
1148 * Initialize object \a o that is part of compound object \a h and was created
1151 int lu_object_init(struct lu_object *o,
1152 struct lu_object_header *h, struct lu_device *d)
1154 memset(o, 0, sizeof *o);
1158 o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1159 CFS_INIT_LIST_HEAD(&o->lo_linkage);
1162 EXPORT_SYMBOL(lu_object_init);
1165 * Finalize object and release its resources.
1167 void lu_object_fini(struct lu_object *o)
1169 struct lu_device *dev = o->lo_dev;
1171 LASSERT(cfs_list_empty(&o->lo_linkage));
1174 lu_ref_del_at(&dev->ld_reference,
1175 o->lo_dev_ref , "lu_object", o);
1180 EXPORT_SYMBOL(lu_object_fini);
1183 * Add object \a o as first layer of compound object \a h
1185 * This is typically called by the ->ldo_object_alloc() method of top-level
1188 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1190 cfs_list_move(&o->lo_linkage, &h->loh_layers);
1192 EXPORT_SYMBOL(lu_object_add_top);
1195 * Add object \a o as a layer of compound object, going after \a before.
1197 * This is typically called by the ->ldo_object_alloc() method of \a
1200 void lu_object_add(struct lu_object *before, struct lu_object *o)
1202 cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1204 EXPORT_SYMBOL(lu_object_add);
1207 * Initialize compound object.
1209 int lu_object_header_init(struct lu_object_header *h)
1211 memset(h, 0, sizeof *h);
1212 cfs_atomic_set(&h->loh_ref, 1);
1213 CFS_INIT_HLIST_NODE(&h->loh_hash);
1214 CFS_INIT_LIST_HEAD(&h->loh_lru);
1215 CFS_INIT_LIST_HEAD(&h->loh_layers);
1216 lu_ref_init(&h->loh_reference);
1219 EXPORT_SYMBOL(lu_object_header_init);
1222 * Finalize compound object.
1224 void lu_object_header_fini(struct lu_object_header *h)
1226 LASSERT(cfs_list_empty(&h->loh_layers));
1227 LASSERT(cfs_list_empty(&h->loh_lru));
1228 LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1229 lu_ref_fini(&h->loh_reference);
1231 EXPORT_SYMBOL(lu_object_header_fini);
1234 * Given a compound object, find its slice, corresponding to the device type
1237 struct lu_object *lu_object_locate(struct lu_object_header *h,
1238 const struct lu_device_type *dtype)
1240 struct lu_object *o;
1242 cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1243 if (o->lo_dev->ld_type == dtype)
1248 EXPORT_SYMBOL(lu_object_locate);
1253 * Finalize and free devices in the device stack.
1255 * Finalize device stack by purging object cache, and calling
1256 * lu_device_type_operations::ldto_device_fini() and
1257 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1259 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1261 struct lu_site *site = top->ld_site;
1262 struct lu_device *scan;
1263 struct lu_device *next;
1265 lu_site_purge(env, site, ~0);
1266 for (scan = top; scan != NULL; scan = next) {
1267 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1268 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1269 lu_device_put(scan);
1273 lu_site_purge(env, site, ~0);
1275 for (scan = top; scan != NULL; scan = next) {
1276 const struct lu_device_type *ldt = scan->ld_type;
1277 struct obd_type *type;
1279 next = ldt->ldt_ops->ldto_device_free(env, scan);
1280 type = ldt->ldt_obd_type;
1283 class_put_type(type);
1287 EXPORT_SYMBOL(lu_stack_fini);
1291 * Maximal number of tld slots.
1293 LU_CONTEXT_KEY_NR = 40
1296 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1298 static DEFINE_SPINLOCK(lu_keys_guard);
1301 * Global counter incremented whenever key is registered, unregistered,
1302 * revived or quiesced. This is used to void unnecessary calls to
1303 * lu_context_refill(). No locking is provided, as initialization and shutdown
1304 * are supposed to be externally serialized.
1306 static unsigned key_set_version = 0;
1311 int lu_context_key_register(struct lu_context_key *key)
1316 LASSERT(key->lct_init != NULL);
1317 LASSERT(key->lct_fini != NULL);
1318 LASSERT(key->lct_tags != 0);
1319 LASSERT(key->lct_owner != NULL);
1322 spin_lock(&lu_keys_guard);
1323 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1324 if (lu_keys[i] == NULL) {
1326 cfs_atomic_set(&key->lct_used, 1);
1328 lu_ref_init(&key->lct_reference);
1334 spin_unlock(&lu_keys_guard);
1337 EXPORT_SYMBOL(lu_context_key_register);
1339 static void key_fini(struct lu_context *ctx, int index)
1341 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1342 struct lu_context_key *key;
1344 key = lu_keys[index];
1345 LASSERT(key != NULL);
1346 LASSERT(key->lct_fini != NULL);
1347 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1349 key->lct_fini(ctx, key, ctx->lc_value[index]);
1350 lu_ref_del(&key->lct_reference, "ctx", ctx);
1351 cfs_atomic_dec(&key->lct_used);
1353 LASSERT(key->lct_owner != NULL);
1354 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1355 LINVRNT(cfs_module_refcount(key->lct_owner) > 0);
1356 cfs_module_put(key->lct_owner);
1358 ctx->lc_value[index] = NULL;
1365 void lu_context_key_degister(struct lu_context_key *key)
1367 LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1368 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1370 lu_context_key_quiesce(key);
1373 spin_lock(&lu_keys_guard);
1374 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1375 if (lu_keys[key->lct_index]) {
1376 lu_keys[key->lct_index] = NULL;
1377 lu_ref_fini(&key->lct_reference);
1379 spin_unlock(&lu_keys_guard);
1381 LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1382 "key has instances: %d\n",
1383 cfs_atomic_read(&key->lct_used));
1385 EXPORT_SYMBOL(lu_context_key_degister);
1388 * Register a number of keys. This has to be called after all keys have been
1389 * initialized by a call to LU_CONTEXT_KEY_INIT().
1391 int lu_context_key_register_many(struct lu_context_key *k, ...)
1393 struct lu_context_key *key = k;
1399 result = lu_context_key_register(key);
1402 key = va_arg(args, struct lu_context_key *);
1403 } while (key != NULL);
1409 lu_context_key_degister(k);
1410 k = va_arg(args, struct lu_context_key *);
1417 EXPORT_SYMBOL(lu_context_key_register_many);
1420 * De-register a number of keys. This is a dual to
1421 * lu_context_key_register_many().
1423 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1429 lu_context_key_degister(k);
1430 k = va_arg(args, struct lu_context_key*);
1431 } while (k != NULL);
1434 EXPORT_SYMBOL(lu_context_key_degister_many);
1437 * Revive a number of keys.
1439 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1445 lu_context_key_revive(k);
1446 k = va_arg(args, struct lu_context_key*);
1447 } while (k != NULL);
1450 EXPORT_SYMBOL(lu_context_key_revive_many);
1453 * Quiescent a number of keys.
1455 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1461 lu_context_key_quiesce(k);
1462 k = va_arg(args, struct lu_context_key*);
1463 } while (k != NULL);
1466 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1469 * Return value associated with key \a key in context \a ctx.
1471 void *lu_context_key_get(const struct lu_context *ctx,
1472 const struct lu_context_key *key)
1474 LINVRNT(ctx->lc_state == LCS_ENTERED);
1475 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1476 LASSERT(lu_keys[key->lct_index] == key);
1477 return ctx->lc_value[key->lct_index];
1479 EXPORT_SYMBOL(lu_context_key_get);
1482 * List of remembered contexts. XXX document me.
1484 static CFS_LIST_HEAD(lu_context_remembered);
1487 * Destroy \a key in all remembered contexts. This is used to destroy key
1488 * values in "shared" contexts (like service threads), when a module owning
1489 * the key is about to be unloaded.
1491 void lu_context_key_quiesce(struct lu_context_key *key)
1493 struct lu_context *ctx;
1495 if (!(key->lct_tags & LCT_QUIESCENT)) {
1497 * XXX layering violation.
1499 key->lct_tags |= LCT_QUIESCENT;
1501 * XXX memory barrier has to go here.
1503 spin_lock(&lu_keys_guard);
1504 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1506 key_fini(ctx, key->lct_index);
1507 spin_unlock(&lu_keys_guard);
1511 EXPORT_SYMBOL(lu_context_key_quiesce);
1513 void lu_context_key_revive(struct lu_context_key *key)
1515 key->lct_tags &= ~LCT_QUIESCENT;
1518 EXPORT_SYMBOL(lu_context_key_revive);
1520 static void keys_fini(struct lu_context *ctx)
1524 if (ctx->lc_value == NULL)
1527 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1530 OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1531 ctx->lc_value = NULL;
1534 static int keys_fill(struct lu_context *ctx)
1538 LINVRNT(ctx->lc_value != NULL);
1539 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1540 struct lu_context_key *key;
1543 if (ctx->lc_value[i] == NULL && key != NULL &&
1544 (key->lct_tags & ctx->lc_tags) &&
1546 * Don't create values for a LCT_QUIESCENT key, as this
1547 * will pin module owning a key.
1549 !(key->lct_tags & LCT_QUIESCENT)) {
1552 LINVRNT(key->lct_init != NULL);
1553 LINVRNT(key->lct_index == i);
1555 value = key->lct_init(ctx, key);
1556 if (unlikely(IS_ERR(value)))
1557 return PTR_ERR(value);
1559 LASSERT(key->lct_owner != NULL);
1560 if (!(ctx->lc_tags & LCT_NOREF))
1561 cfs_try_module_get(key->lct_owner);
1562 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1563 cfs_atomic_inc(&key->lct_used);
1565 * This is the only place in the code, where an
1566 * element of ctx->lc_value[] array is set to non-NULL
1569 ctx->lc_value[i] = value;
1570 if (key->lct_exit != NULL)
1571 ctx->lc_tags |= LCT_HAS_EXIT;
1573 ctx->lc_version = key_set_version;
1578 static int keys_init(struct lu_context *ctx)
1580 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1581 if (likely(ctx->lc_value != NULL))
1582 return keys_fill(ctx);
1588 * Initialize context data-structure. Create values for all keys.
1590 int lu_context_init(struct lu_context *ctx, __u32 tags)
1594 memset(ctx, 0, sizeof *ctx);
1595 ctx->lc_state = LCS_INITIALIZED;
1596 ctx->lc_tags = tags;
1597 if (tags & LCT_REMEMBER) {
1598 spin_lock(&lu_keys_guard);
1599 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1600 spin_unlock(&lu_keys_guard);
1602 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1605 rc = keys_init(ctx);
1607 lu_context_fini(ctx);
1611 EXPORT_SYMBOL(lu_context_init);
1614 * Finalize context data-structure. Destroy key values.
1616 void lu_context_fini(struct lu_context *ctx)
1618 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1619 ctx->lc_state = LCS_FINALIZED;
1621 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1622 LASSERT(cfs_list_empty(&ctx->lc_remember));
1625 } else { /* could race with key degister */
1626 spin_lock(&lu_keys_guard);
1628 cfs_list_del_init(&ctx->lc_remember);
1629 spin_unlock(&lu_keys_guard);
1632 EXPORT_SYMBOL(lu_context_fini);
1635 * Called before entering context.
1637 void lu_context_enter(struct lu_context *ctx)
1639 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1640 ctx->lc_state = LCS_ENTERED;
1642 EXPORT_SYMBOL(lu_context_enter);
1645 * Called after exiting from \a ctx
1647 void lu_context_exit(struct lu_context *ctx)
1651 LINVRNT(ctx->lc_state == LCS_ENTERED);
1652 ctx->lc_state = LCS_LEFT;
1653 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1654 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1655 if (ctx->lc_value[i] != NULL) {
1656 struct lu_context_key *key;
1659 LASSERT(key != NULL);
1660 if (key->lct_exit != NULL)
1662 key, ctx->lc_value[i]);
1667 EXPORT_SYMBOL(lu_context_exit);
1670 * Allocate for context all missing keys that were registered after context
1671 * creation. key_set_version is only changed in rare cases when modules
1672 * are loaded and removed.
1674 int lu_context_refill(struct lu_context *ctx)
1676 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1678 EXPORT_SYMBOL(lu_context_refill);
1681 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1682 * obd being added. Currently, this is only used on client side, specifically
1683 * for echo device client, for other stack (like ptlrpc threads), context are
1684 * predefined when the lu_device type are registered, during the module probe
1687 __u32 lu_context_tags_default = 0;
1688 __u32 lu_session_tags_default = 0;
1690 void lu_context_tags_update(__u32 tags)
1692 spin_lock(&lu_keys_guard);
1693 lu_context_tags_default |= tags;
1695 spin_unlock(&lu_keys_guard);
1697 EXPORT_SYMBOL(lu_context_tags_update);
1699 void lu_context_tags_clear(__u32 tags)
1701 spin_lock(&lu_keys_guard);
1702 lu_context_tags_default &= ~tags;
1704 spin_unlock(&lu_keys_guard);
1706 EXPORT_SYMBOL(lu_context_tags_clear);
1708 void lu_session_tags_update(__u32 tags)
1710 spin_lock(&lu_keys_guard);
1711 lu_session_tags_default |= tags;
1713 spin_unlock(&lu_keys_guard);
1715 EXPORT_SYMBOL(lu_session_tags_update);
1717 void lu_session_tags_clear(__u32 tags)
1719 spin_lock(&lu_keys_guard);
1720 lu_session_tags_default &= ~tags;
1722 spin_unlock(&lu_keys_guard);
1724 EXPORT_SYMBOL(lu_session_tags_clear);
1726 int lu_env_init(struct lu_env *env, __u32 tags)
1731 result = lu_context_init(&env->le_ctx, tags);
1732 if (likely(result == 0))
1733 lu_context_enter(&env->le_ctx);
1736 EXPORT_SYMBOL(lu_env_init);
1738 void lu_env_fini(struct lu_env *env)
1740 lu_context_exit(&env->le_ctx);
1741 lu_context_fini(&env->le_ctx);
1744 EXPORT_SYMBOL(lu_env_fini);
1746 int lu_env_refill(struct lu_env *env)
1750 result = lu_context_refill(&env->le_ctx);
1751 if (result == 0 && env->le_ses != NULL)
1752 result = lu_context_refill(env->le_ses);
1755 EXPORT_SYMBOL(lu_env_refill);
1758 * Currently, this API will only be used by echo client.
1759 * Because echo client and normal lustre client will share
1760 * same cl_env cache. So echo client needs to refresh
1761 * the env context after it get one from the cache, especially
1762 * when normal client and echo client co-exist in the same client.
1764 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1769 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1770 env->le_ctx.lc_version = 0;
1771 env->le_ctx.lc_tags |= ctags;
1774 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1775 env->le_ses->lc_version = 0;
1776 env->le_ses->lc_tags |= stags;
1779 result = lu_env_refill(env);
1783 EXPORT_SYMBOL(lu_env_refill_by_tags);
1785 static struct cfs_shrinker *lu_site_shrinker = NULL;
1787 typedef struct lu_site_stats{
1788 unsigned lss_populated;
1789 unsigned lss_max_search;
1794 static void lu_site_stats_get(cfs_hash_t *hs,
1795 lu_site_stats_t *stats, int populated)
1800 cfs_hash_for_each_bucket(hs, &bd, i) {
1801 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1802 cfs_hlist_head_t *hhead;
1804 cfs_hash_bd_lock(hs, &bd, 1);
1805 stats->lss_busy += bkt->lsb_busy;
1806 stats->lss_total += cfs_hash_bd_count_get(&bd);
1807 stats->lss_max_search = max((int)stats->lss_max_search,
1808 cfs_hash_bd_depmax_get(&bd));
1810 cfs_hash_bd_unlock(hs, &bd, 1);
1814 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1815 if (!cfs_hlist_empty(hhead))
1816 stats->lss_populated++;
1818 cfs_hash_bd_unlock(hs, &bd, 1);
1825 * There exists a potential lock inversion deadlock scenario when using
1826 * Lustre on top of ZFS. This occurs between one of ZFS's
1827 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1828 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1829 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1830 * lock. Obviously neither thread will wake and drop their respective hold
1833 * To prevent this from happening we must ensure the lu_sites_guard lock is
1834 * not taken while down this code path. ZFS reliably does not set the
1835 * __GFP_FS bit in its code paths, so this can be used to determine if it
1836 * is safe to take the lu_sites_guard lock.
1838 * Ideally we should accurately return the remaining number of cached
1839 * objects without taking the lu_sites_guard lock, but this is not
1840 * possible in the current implementation.
1842 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1844 lu_site_stats_t stats;
1846 struct lu_site *tmp;
1848 int remain = shrink_param(sc, nr_to_scan);
1849 CFS_LIST_HEAD(splice);
1851 if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
1855 /* We must not take the lu_sites_guard lock when
1856 * __GFP_FS is *not* set because of the deadlock
1857 * possibility detailed above. Additionally,
1858 * since we cannot determine the number of
1859 * objects in the cache without taking this
1860 * lock, we're in a particularly tough spot. As
1861 * a result, we'll just lie and say our cache is
1862 * empty. This _should_ be ok, as we can't
1863 * reclaim objects when __GFP_FS is *not* set
1869 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1871 mutex_lock(&lu_sites_guard);
1872 cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1873 if (shrink_param(sc, nr_to_scan) != 0) {
1874 remain = lu_site_purge(&lu_shrink_env, s, remain);
1876 * Move just shrunk site to the tail of site list to
1877 * assure shrinking fairness.
1879 cfs_list_move_tail(&s->ls_linkage, &splice);
1882 memset(&stats, 0, sizeof(stats));
1883 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1884 cached += stats.lss_total - stats.lss_busy;
1885 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1888 cfs_list_splice(&splice, lu_sites.prev);
1889 mutex_unlock(&lu_sites_guard);
1891 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1892 if (shrink_param(sc, nr_to_scan) == 0)
1893 CDEBUG(D_INODE, "%d objects cached\n", cached);
1902 * Environment to be used in debugger, contains all tags.
1904 struct lu_env lu_debugging_env;
1907 * Debugging printer function using printk().
1909 int lu_printk_printer(const struct lu_env *env,
1910 void *unused, const char *format, ...)
1914 va_start(args, format);
1915 vprintk(format, args);
1920 int lu_debugging_setup(void)
1922 return lu_env_init(&lu_debugging_env, ~0);
1925 void lu_context_keys_dump(void)
1929 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1930 struct lu_context_key *key;
1934 CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1935 i, key, key->lct_tags,
1936 key->lct_init, key->lct_fini, key->lct_exit,
1937 key->lct_index, cfs_atomic_read(&key->lct_used),
1938 key->lct_owner ? key->lct_owner->name : "",
1940 lu_ref_print(&key->lct_reference);
1944 EXPORT_SYMBOL(lu_context_keys_dump);
1945 #else /* !__KERNEL__ */
1946 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1950 #endif /* __KERNEL__ */
1952 int cl_global_init(void);
1953 void cl_global_fini(void);
1954 int lu_ref_global_init(void);
1955 void lu_ref_global_fini(void);
1957 int dt_global_init(void);
1958 void dt_global_fini(void);
1960 int llo_global_init(void);
1961 void llo_global_fini(void);
1963 /* context key constructor/destructor: lu_ucred_key_init, lu_ucred_key_fini */
1964 LU_KEY_INIT_FINI(lu_ucred, struct lu_ucred);
1966 static struct lu_context_key lu_ucred_key = {
1967 .lct_tags = LCT_SESSION,
1968 .lct_init = lu_ucred_key_init,
1969 .lct_fini = lu_ucred_key_fini
1973 * Get ucred key if session exists and ucred key is allocated on it.
1974 * Return NULL otherwise.
1976 struct lu_ucred *lu_ucred(const struct lu_env *env)
1980 return lu_context_key_get(env->le_ses, &lu_ucred_key);
1982 EXPORT_SYMBOL(lu_ucred);
1985 * Get ucred key and check if it is properly initialized.
1986 * Return NULL otherwise.
1988 struct lu_ucred *lu_ucred_check(const struct lu_env *env)
1990 struct lu_ucred *uc = lu_ucred(env);
1991 if (uc && uc->uc_valid != UCRED_OLD && uc->uc_valid != UCRED_NEW)
1995 EXPORT_SYMBOL(lu_ucred_check);
1998 * Get ucred key, which must exist and must be properly initialized.
2001 struct lu_ucred *lu_ucred_assert(const struct lu_env *env)
2003 struct lu_ucred *uc = lu_ucred_check(env);
2004 LASSERT(uc != NULL);
2007 EXPORT_SYMBOL(lu_ucred_assert);
2010 * Initialization of global lu_* data.
2012 int lu_global_init(void)
2016 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
2018 result = lu_ref_global_init();
2022 LU_CONTEXT_KEY_INIT(&lu_global_key);
2023 result = lu_context_key_register(&lu_global_key);
2027 LU_CONTEXT_KEY_INIT(&lu_ucred_key);
2028 result = lu_context_key_register(&lu_ucred_key);
2033 * At this level, we don't know what tags are needed, so allocate them
2034 * conservatively. This should not be too bad, because this
2035 * environment is global.
2037 mutex_lock(&lu_sites_guard);
2038 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
2039 mutex_unlock(&lu_sites_guard);
2044 * seeks estimation: 3 seeks to read a record from oi, one to read
2045 * inode, one for ea. Unfortunately setting this high value results in
2046 * lu_object/inode cache consuming all the memory.
2048 lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
2049 if (lu_site_shrinker == NULL)
2053 result = dt_global_init();
2057 result = llo_global_init();
2061 result = cl_global_init();
2067 * Dual to lu_global_init().
2069 void lu_global_fini(void)
2076 if (lu_site_shrinker != NULL) {
2077 cfs_remove_shrinker(lu_site_shrinker);
2078 lu_site_shrinker = NULL;
2081 lu_context_key_degister(&lu_global_key);
2082 lu_context_key_degister(&lu_ucred_key);
2085 * Tear shrinker environment down _after_ de-registering
2086 * lu_global_key, because the latter has a value in the former.
2088 mutex_lock(&lu_sites_guard);
2089 lu_env_fini(&lu_shrink_env);
2090 mutex_unlock(&lu_sites_guard);
2092 lu_ref_global_fini();
2095 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
2098 struct lprocfs_counter ret;
2100 lprocfs_stats_collect(stats, idx, &ret);
2101 return (__u32)ret.lc_count;
2108 * Output site statistical counters into a buffer. Suitable for
2109 * lprocfs_rd_*()-style functions.
2111 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
2113 lu_site_stats_t stats;
2115 memset(&stats, 0, sizeof(stats));
2116 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2118 return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2121 stats.lss_populated,
2122 CFS_HASH_NHLIST(s->ls_obj_hash),
2123 stats.lss_max_search,
2124 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2125 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2126 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2127 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2128 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2129 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2131 EXPORT_SYMBOL(lu_site_stats_print);
2134 * Helper function to initialize a number of kmem slab caches at once.
2136 int lu_kmem_init(struct lu_kmem_descr *caches)
2139 struct lu_kmem_descr *iter = caches;
2141 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2142 *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
2145 if (*iter->ckd_cache == NULL) {
2147 /* free all previously allocated caches */
2148 lu_kmem_fini(caches);
2154 EXPORT_SYMBOL(lu_kmem_init);
2157 * Helper function to finalize a number of kmem slab cached at once. Dual to
2160 void lu_kmem_fini(struct lu_kmem_descr *caches)
2164 for (; caches->ckd_cache != NULL; ++caches) {
2165 if (*caches->ckd_cache != NULL) {
2166 rc = cfs_mem_cache_destroy(*caches->ckd_cache);
2167 LASSERTF(rc == 0, "couldn't destroy %s slab\n",
2169 *caches->ckd_cache = NULL;
2173 EXPORT_SYMBOL(lu_kmem_fini);
2176 * Temporary solution to be able to assign fid in ->do_create()
2177 * till we have fully-functional OST fids
2179 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2180 const struct lu_fid *fid)
2182 struct lu_site *s = o->lo_dev->ld_site;
2183 struct lu_fid *old = &o->lo_header->loh_fid;
2184 struct lu_site_bkt_data *bkt;
2185 struct lu_object *shadow;
2186 cfs_waitlink_t waiter;
2191 LASSERT(fid_is_zero(old));
2193 hs = s->ls_obj_hash;
2194 cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2195 shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2196 /* supposed to be unique */
2197 LASSERT(shadow == NULL);
2199 bkt = cfs_hash_bd_extra_get(hs, &bd);
2200 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2202 cfs_hash_bd_unlock(hs, &bd, 1);
2204 EXPORT_SYMBOL(lu_object_assign_fid);
2207 * allocates object with 0 (non-assiged) fid
2208 * XXX: temporary solution to be able to assign fid in ->do_create()
2209 * till we have fully-functional OST fids
2211 struct lu_object *lu_object_anon(const struct lu_env *env,
2212 struct lu_device *dev,
2213 const struct lu_object_conf *conf)
2216 struct lu_object *o;
2219 o = lu_object_alloc(env, dev, &fid, conf);
2223 EXPORT_SYMBOL(lu_object_anon);
2225 struct lu_buf LU_BUF_NULL = {
2229 EXPORT_SYMBOL(LU_BUF_NULL);
2231 void lu_buf_free(struct lu_buf *buf)
2235 LASSERT(buf->lb_len > 0);
2236 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2241 EXPORT_SYMBOL(lu_buf_free);
2243 void lu_buf_alloc(struct lu_buf *buf, int size)
2246 LASSERT(buf->lb_buf == NULL);
2247 LASSERT(buf->lb_len == 0);
2248 OBD_ALLOC_LARGE(buf->lb_buf, size);
2249 if (likely(buf->lb_buf))
2252 EXPORT_SYMBOL(lu_buf_alloc);
2254 void lu_buf_realloc(struct lu_buf *buf, int size)
2257 lu_buf_alloc(buf, size);
2259 EXPORT_SYMBOL(lu_buf_realloc);
2261 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
2263 if (buf->lb_buf == NULL && buf->lb_len == 0)
2264 lu_buf_alloc(buf, len);
2266 if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2267 lu_buf_realloc(buf, len);
2271 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2274 * Increase the size of the \a buf.
2275 * preserves old data in buffer
2276 * old buffer remains unchanged on error
2277 * \retval 0 or -ENOMEM
2279 int lu_buf_check_and_grow(struct lu_buf *buf, int len)
2283 if (len <= buf->lb_len)
2286 OBD_ALLOC_LARGE(ptr, len);
2290 /* Free the old buf */
2291 if (buf->lb_buf != NULL) {
2292 memcpy(ptr, buf->lb_buf, buf->lb_len);
2293 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2300 EXPORT_SYMBOL(lu_buf_check_and_grow);