4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include <libcfs/libcfs.h>
50 # include <linux/module.h>
54 #include <libcfs/libcfs_hash.h>
55 #include <obd_class.h>
56 #include <obd_support.h>
57 #include <lustre_disk.h>
58 #include <lustre_fid.h>
59 #include <lu_object.h>
61 #include <libcfs/list.h>
63 extern spinlock_t obd_types_lock;
65 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
68 * Decrease reference counter on object. If last reference is freed, return
69 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
70 * case, free object immediately.
72 void lu_object_put(const struct lu_env *env, struct lu_object *o)
74 struct lu_site_bkt_data *bkt;
75 struct lu_object_header *top;
77 struct lu_object *orig;
79 const struct lu_fid *fid;
82 site = o->lo_dev->ld_site;
86 * till we have full fids-on-OST implemented anonymous objects
87 * are possible in OSP. such an object isn't listed in the site
88 * so we should not remove it from the site.
90 fid = lu_object_fid(o);
91 if (fid_is_zero(fid)) {
92 LASSERT(top->loh_hash.next == NULL
93 && top->loh_hash.pprev == NULL);
94 LASSERT(cfs_list_empty(&top->loh_lru));
95 if (!atomic_dec_and_test(&top->loh_ref))
97 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
98 if (o->lo_ops->loo_object_release != NULL)
99 o->lo_ops->loo_object_release(env, o);
101 lu_object_free(env, orig);
105 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
106 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
108 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
109 if (lu_object_is_dying(top)) {
112 * somebody may be waiting for this, currently only
113 * used for cl_object, see cl_object_put_last().
115 wake_up_all(&bkt->lsb_marche_funebre);
120 LASSERT(bkt->lsb_busy > 0);
123 * When last reference is released, iterate over object
124 * layers, and notify them that object is no longer busy.
126 cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
127 if (o->lo_ops->loo_object_release != NULL)
128 o->lo_ops->loo_object_release(env, o);
131 if (!lu_object_is_dying(top)) {
132 LASSERT(cfs_list_empty(&top->loh_lru));
133 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
134 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
139 * If object is dying (will not be cached), removed it
140 * from hash table and LRU.
142 * This is done with hash table and LRU lists locked. As the only
143 * way to acquire first reference to previously unreferenced
144 * object is through hash-table lookup (lu_object_find()),
145 * or LRU scanning (lu_site_purge()), that are done under hash-table
146 * and LRU lock, no race with concurrent object lookup is possible
147 * and we can safely destroy object below.
149 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
150 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
151 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
153 * Object was already removed from hash and lru above, can
156 lu_object_free(env, orig);
158 EXPORT_SYMBOL(lu_object_put);
161 * Put object and don't keep in cache. This is temporary solution for
162 * multi-site objects when its layering is not constant.
164 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
166 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
167 return lu_object_put(env, o);
169 EXPORT_SYMBOL(lu_object_put_nocache);
172 * Kill the object and take it out of LRU cache.
173 * Currently used by client code for layout change.
175 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
177 struct lu_object_header *top;
180 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
181 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
182 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
185 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
186 cfs_list_del_init(&top->loh_lru);
187 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
188 cfs_hash_bd_unlock(obj_hash, &bd, 1);
191 EXPORT_SYMBOL(lu_object_unhash);
194 * Allocate new object.
196 * This follows object creation protocol, described in the comment within
197 * struct lu_device_operations definition.
199 static struct lu_object *lu_object_alloc(const struct lu_env *env,
200 struct lu_device *dev,
201 const struct lu_fid *f,
202 const struct lu_object_conf *conf)
204 struct lu_object *scan;
205 struct lu_object *top;
207 unsigned int init_mask = 0;
208 unsigned int init_flag;
214 * Create top-level object slice. This will also create
217 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
219 RETURN(ERR_PTR(-ENOMEM));
223 * This is the only place where object fid is assigned. It's constant
226 top->lo_header->loh_fid = *f;
227 layers = &top->lo_header->loh_layers;
231 * Call ->loo_object_init() repeatedly, until no more new
232 * object slices are created.
236 cfs_list_for_each_entry(scan, layers, lo_linkage) {
237 if (init_mask & init_flag)
240 scan->lo_header = top->lo_header;
241 result = scan->lo_ops->loo_object_init(env, scan, conf);
243 lu_object_free(env, top);
244 RETURN(ERR_PTR(result));
246 init_mask |= init_flag;
252 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
253 if (scan->lo_ops->loo_object_start != NULL) {
254 result = scan->lo_ops->loo_object_start(env, scan);
256 lu_object_free(env, top);
257 RETURN(ERR_PTR(result));
262 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
269 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
271 struct lu_site_bkt_data *bkt;
272 struct lu_site *site;
273 struct lu_object *scan;
277 site = o->lo_dev->ld_site;
278 layers = &o->lo_header->loh_layers;
279 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
281 * First call ->loo_object_delete() method to release all resources.
283 cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
284 if (scan->lo_ops->loo_object_delete != NULL)
285 scan->lo_ops->loo_object_delete(env, scan);
289 * Then, splice object layers into stand-alone list, and call
290 * ->loo_object_free() on all layers to free memory. Splice is
291 * necessary, because lu_object_header is freed together with the
294 CFS_INIT_LIST_HEAD(&splice);
295 cfs_list_splice_init(layers, &splice);
296 while (!cfs_list_empty(&splice)) {
298 * Free layers in bottom-to-top order, so that object header
299 * lives as long as possible and ->loo_object_free() methods
300 * can look at its contents.
302 o = container_of0(splice.prev, struct lu_object, lo_linkage);
303 cfs_list_del_init(&o->lo_linkage);
304 LASSERT(o->lo_ops->loo_object_free != NULL);
305 o->lo_ops->loo_object_free(env, o);
308 if (waitqueue_active(&bkt->lsb_marche_funebre))
309 wake_up_all(&bkt->lsb_marche_funebre);
313 * Free \a nr objects from the cold end of the site LRU list.
315 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
317 struct lu_object_header *h;
318 struct lu_object_header *temp;
319 struct lu_site_bkt_data *bkt;
329 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
332 CFS_INIT_LIST_HEAD(&dispose);
334 * Under LRU list lock, scan LRU list and move unreferenced objects to
335 * the dispose list, removing them from LRU and hash table.
337 start = s->ls_purge_start;
338 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
341 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
345 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
346 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
348 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
349 LASSERT(atomic_read(&h->loh_ref) == 0);
351 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
352 LASSERT(bd.bd_bucket == bd2.bd_bucket);
354 cfs_hash_bd_del_locked(s->ls_obj_hash,
356 cfs_list_move(&h->loh_lru, &dispose);
360 if (nr != ~0 && --nr == 0)
363 if (count > 0 && --count == 0)
367 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
370 * Free everything on the dispose list. This is safe against
371 * races due to the reasons described in lu_object_put().
373 while (!cfs_list_empty(&dispose)) {
374 h = container_of0(dispose.next,
375 struct lu_object_header, loh_lru);
376 cfs_list_del_init(&h->loh_lru);
377 lu_object_free(env, lu_object_top(h));
378 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
385 if (nr != 0 && did_sth && start != 0) {
386 start = 0; /* restart from the first bucket */
389 /* race on s->ls_purge_start, but nobody cares */
390 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
394 EXPORT_SYMBOL(lu_site_purge);
399 * Code below has to jump through certain loops to output object description
400 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
401 * composes object description from strings that are parts of _lines_ of
402 * output (i.e., strings that are not terminated by newline). This doesn't fit
403 * very well into libcfs_debug_msg() interface that assumes that each message
404 * supplied to it is a self-contained output line.
406 * To work around this, strings are collected in a temporary buffer
407 * (implemented as a value of lu_cdebug_key key), until terminating newline
408 * character is detected.
416 * XXX overflow is not handled correctly.
421 struct lu_cdebug_data {
425 char lck_area[LU_CDEBUG_LINE];
428 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
429 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
432 * Key, holding temporary buffer. This key is registered very early by
435 struct lu_context_key lu_global_key = {
436 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
437 LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
438 .lct_init = lu_global_key_init,
439 .lct_fini = lu_global_key_fini
443 * Printer function emitting messages through libcfs_debug_msg().
445 int lu_cdebug_printer(const struct lu_env *env,
446 void *cookie, const char *format, ...)
448 struct libcfs_debug_msg_data *msgdata = cookie;
449 struct lu_cdebug_data *key;
454 va_start(args, format);
456 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
457 LASSERT(key != NULL);
459 used = strlen(key->lck_area);
460 complete = format[strlen(format) - 1] == '\n';
462 * Append new chunk to the buffer.
464 vsnprintf(key->lck_area + used,
465 ARRAY_SIZE(key->lck_area) - used, format, args);
467 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
468 libcfs_debug_msg(msgdata, "%s", key->lck_area);
469 key->lck_area[0] = 0;
474 EXPORT_SYMBOL(lu_cdebug_printer);
477 * Print object header.
479 void lu_object_header_print(const struct lu_env *env, void *cookie,
480 lu_printer_t printer,
481 const struct lu_object_header *hdr)
483 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
484 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
486 cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
487 cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
489 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
491 EXPORT_SYMBOL(lu_object_header_print);
494 * Print human readable representation of the \a o to the \a printer.
496 void lu_object_print(const struct lu_env *env, void *cookie,
497 lu_printer_t printer, const struct lu_object *o)
499 static const char ruler[] = "........................................";
500 struct lu_object_header *top;
504 lu_object_header_print(env, cookie, printer, top);
505 (*printer)(env, cookie, "{\n");
507 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
509 * print `.' \a depth times followed by type name and address
511 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
512 o->lo_dev->ld_type->ldt_name, o);
514 if (o->lo_ops->loo_object_print != NULL)
515 (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
517 (*printer)(env, cookie, "\n");
520 (*printer)(env, cookie, "} header@%p\n", top);
522 EXPORT_SYMBOL(lu_object_print);
525 * Check object consistency.
527 int lu_object_invariant(const struct lu_object *o)
529 struct lu_object_header *top;
532 cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
533 if (o->lo_ops->loo_object_invariant != NULL &&
534 !o->lo_ops->loo_object_invariant(o))
539 EXPORT_SYMBOL(lu_object_invariant);
541 static struct lu_object *htable_lookup(struct lu_site *s,
543 const struct lu_fid *f,
544 wait_queue_t *waiter,
547 struct lu_site_bkt_data *bkt;
548 struct lu_object_header *h;
549 cfs_hlist_node_t *hnode;
550 __u64 ver = cfs_hash_bd_version_get(bd);
553 return ERR_PTR(-ENOENT);
556 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
557 /* cfs_hash_bd_peek_locked is a somehow "internal" function
558 * of cfs_hash, it doesn't add refcount on object. */
559 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
561 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
562 return ERR_PTR(-ENOENT);
565 h = container_of0(hnode, struct lu_object_header, loh_hash);
566 if (likely(!lu_object_is_dying(h))) {
567 cfs_hash_get(s->ls_obj_hash, hnode);
568 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
569 cfs_list_del_init(&h->loh_lru);
570 return lu_object_top(h);
574 * Lookup found an object being destroyed this object cannot be
575 * returned (to assure that references to dying objects are eventually
576 * drained), and moreover, lookup has to wait until object is freed.
579 init_waitqueue_entry_current(waiter);
580 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
581 set_current_state(TASK_UNINTERRUPTIBLE);
582 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
583 return ERR_PTR(-EAGAIN);
586 static struct lu_object *htable_lookup_nowait(struct lu_site *s,
588 const struct lu_fid *f)
590 cfs_hlist_node_t *hnode;
591 struct lu_object_header *h;
593 /* cfs_hash_bd_peek_locked is a somehow "internal" function
594 * of cfs_hash, it doesn't add refcount on object. */
595 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
597 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
598 return ERR_PTR(-ENOENT);
601 h = container_of0(hnode, struct lu_object_header, loh_hash);
602 if (unlikely(lu_object_is_dying(h)))
603 return ERR_PTR(-ENOENT);
605 cfs_hash_get(s->ls_obj_hash, hnode);
606 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
607 cfs_list_del_init(&h->loh_lru);
608 return lu_object_top(h);
612 * Search cache for an object with the fid \a f. If such object is found,
613 * return it. Otherwise, create new object, insert it into cache and return
614 * it. In any case, additional reference is acquired on the returned object.
616 struct lu_object *lu_object_find(const struct lu_env *env,
617 struct lu_device *dev, const struct lu_fid *f,
618 const struct lu_object_conf *conf)
620 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
622 EXPORT_SYMBOL(lu_object_find);
624 static struct lu_object *lu_object_new(const struct lu_env *env,
625 struct lu_device *dev,
626 const struct lu_fid *f,
627 const struct lu_object_conf *conf)
632 struct lu_site_bkt_data *bkt;
634 o = lu_object_alloc(env, dev, f, conf);
635 if (unlikely(IS_ERR(o)))
638 hs = dev->ld_site->ls_obj_hash;
639 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
640 bkt = cfs_hash_bd_extra_get(hs, &bd);
641 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
643 cfs_hash_bd_unlock(hs, &bd, 1);
648 * Core logic of lu_object_find*() functions.
650 static struct lu_object *lu_object_find_try(const struct lu_env *env,
651 struct lu_device *dev,
652 const struct lu_fid *f,
653 const struct lu_object_conf *conf,
654 wait_queue_t *waiter)
657 struct lu_object *shadow;
664 * This uses standard index maintenance protocol:
666 * - search index under lock, and return object if found;
667 * - otherwise, unlock index, allocate new object;
668 * - lock index and search again;
669 * - if nothing is found (usual case), insert newly created
671 * - otherwise (race: other thread inserted object), free
672 * object just allocated.
676 * For "LOC_F_NEW" case, we are sure the object is new established.
677 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
678 * just alloc and insert directly.
680 * If dying object is found during index search, add @waiter to the
681 * site wait-queue and return ERR_PTR(-EAGAIN).
683 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
684 return lu_object_new(env, dev, f, conf);
688 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
689 o = htable_lookup(s, &bd, f, waiter, &version);
690 cfs_hash_bd_unlock(hs, &bd, 1);
691 if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
695 * Allocate new object. This may result in rather complicated
696 * operations, including fld queries, inode loading, etc.
698 o = lu_object_alloc(env, dev, f, conf);
699 if (unlikely(IS_ERR(o)))
702 LASSERT(lu_fid_eq(lu_object_fid(o), f));
704 cfs_hash_bd_lock(hs, &bd, 1);
706 shadow = htable_lookup(s, &bd, f, waiter, &version);
707 if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
708 struct lu_site_bkt_data *bkt;
710 bkt = cfs_hash_bd_extra_get(hs, &bd);
711 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
713 cfs_hash_bd_unlock(hs, &bd, 1);
717 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
718 cfs_hash_bd_unlock(hs, &bd, 1);
719 lu_object_free(env, o);
724 * Much like lu_object_find(), but top level device of object is specifically
725 * \a dev rather than top level device of the site. This interface allows
726 * objects of different "stacking" to be created within the same site.
728 struct lu_object *lu_object_find_at(const struct lu_env *env,
729 struct lu_device *dev,
730 const struct lu_fid *f,
731 const struct lu_object_conf *conf)
733 struct lu_site_bkt_data *bkt;
734 struct lu_object *obj;
738 obj = lu_object_find_try(env, dev, f, conf, &wait);
739 if (obj != ERR_PTR(-EAGAIN))
742 * lu_object_find_try() already added waiter into the
745 waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
746 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
747 remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
750 EXPORT_SYMBOL(lu_object_find_at);
753 * Try to find the object in cache without waiting for the dead object
754 * to be released nor allocating object if no cached one was found.
756 * The found object will be set as LU_OBJECT_HEARD_BANSHEE for purging.
758 void lu_object_purge(const struct lu_env *env, struct lu_device *dev,
759 const struct lu_fid *f)
761 struct lu_site *s = dev->ld_site;
762 cfs_hash_t *hs = s->ls_obj_hash;
766 cfs_hash_bd_get_and_lock(hs, f, &bd, 1);
767 o = htable_lookup_nowait(s, &bd, f);
768 cfs_hash_bd_unlock(hs, &bd, 1);
770 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
771 lu_object_put(env, o);
774 EXPORT_SYMBOL(lu_object_purge);
777 * Find object with given fid, and return its slice belonging to given device.
779 struct lu_object *lu_object_find_slice(const struct lu_env *env,
780 struct lu_device *dev,
781 const struct lu_fid *f,
782 const struct lu_object_conf *conf)
784 struct lu_object *top;
785 struct lu_object *obj;
787 top = lu_object_find(env, dev, f, conf);
789 obj = lu_object_locate(top->lo_header, dev->ld_type);
791 lu_object_put(env, top);
796 EXPORT_SYMBOL(lu_object_find_slice);
799 * Global list of all device types.
801 static CFS_LIST_HEAD(lu_device_types);
803 int lu_device_type_init(struct lu_device_type *ldt)
807 atomic_set(&ldt->ldt_device_nr, 0);
808 INIT_LIST_HEAD(&ldt->ldt_linkage);
809 if (ldt->ldt_ops->ldto_init)
810 result = ldt->ldt_ops->ldto_init(ldt);
813 spin_lock(&obd_types_lock);
814 list_add(&ldt->ldt_linkage, &lu_device_types);
815 spin_unlock(&obd_types_lock);
820 EXPORT_SYMBOL(lu_device_type_init);
822 void lu_device_type_fini(struct lu_device_type *ldt)
824 spin_lock(&obd_types_lock);
825 list_del_init(&ldt->ldt_linkage);
826 spin_unlock(&obd_types_lock);
827 if (ldt->ldt_ops->ldto_fini)
828 ldt->ldt_ops->ldto_fini(ldt);
830 EXPORT_SYMBOL(lu_device_type_fini);
833 * Global list of all sites on this node
835 static CFS_LIST_HEAD(lu_sites);
836 static DEFINE_MUTEX(lu_sites_guard);
839 * Global environment used by site shrinker.
841 static struct lu_env lu_shrink_env;
843 struct lu_site_print_arg {
844 struct lu_env *lsp_env;
846 lu_printer_t lsp_printer;
850 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
851 cfs_hlist_node_t *hnode, void *data)
853 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
854 struct lu_object_header *h;
856 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
857 if (!cfs_list_empty(&h->loh_layers)) {
858 const struct lu_object *o;
860 o = lu_object_top(h);
861 lu_object_print(arg->lsp_env, arg->lsp_cookie,
862 arg->lsp_printer, o);
864 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
865 arg->lsp_printer, h);
871 * Print all objects in \a s.
873 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
874 lu_printer_t printer)
876 struct lu_site_print_arg arg = {
877 .lsp_env = (struct lu_env *)env,
878 .lsp_cookie = cookie,
879 .lsp_printer = printer,
882 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
884 EXPORT_SYMBOL(lu_site_print);
887 LU_CACHE_PERCENT_MAX = 50,
888 LU_CACHE_PERCENT_DEFAULT = 20
891 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
892 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
893 "Percentage of memory to be used as lu_object cache");
896 * Return desired hash table order.
898 static int lu_htable_order(void)
900 unsigned long cache_size;
904 * Calculate hash table size, assuming that we want reasonable
905 * performance when 20% of total memory is occupied by cache of
908 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
910 cache_size = totalram_pages;
912 #if BITS_PER_LONG == 32
913 /* limit hashtable size for lowmem systems to low RAM */
914 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
915 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
918 /* clear off unreasonable cache setting. */
919 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
920 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
921 " the range of (0, %u]. Will use default value: %u.\n",
922 lu_cache_percent, LU_CACHE_PERCENT_MAX,
923 LU_CACHE_PERCENT_DEFAULT);
925 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
927 cache_size = cache_size / 100 * lu_cache_percent *
928 (PAGE_CACHE_SIZE / 1024);
930 for (bits = 1; (1 << bits) < cache_size; ++bits) {
936 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
937 const void *key, unsigned mask)
939 struct lu_fid *fid = (struct lu_fid *)key;
942 hash = fid_flatten32(fid);
943 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
944 hash = hash_long(hash, hs->hs_bkt_bits);
946 /* give me another random factor */
947 hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
949 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
950 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
955 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
957 return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
960 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
962 struct lu_object_header *h;
964 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
968 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
970 struct lu_object_header *h;
972 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
973 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
976 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
978 struct lu_object_header *h;
980 h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
981 if (atomic_add_return(1, &h->loh_ref) == 1) {
982 struct lu_site_bkt_data *bkt;
985 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
986 bkt = cfs_hash_bd_extra_get(hs, &bd);
991 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
993 LBUG(); /* we should never called it */
996 cfs_hash_ops_t lu_site_hash_ops = {
997 .hs_hash = lu_obj_hop_hash,
998 .hs_key = lu_obj_hop_key,
999 .hs_keycmp = lu_obj_hop_keycmp,
1000 .hs_object = lu_obj_hop_object,
1001 .hs_get = lu_obj_hop_get,
1002 .hs_put_locked = lu_obj_hop_put_locked,
1005 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
1007 spin_lock(&s->ls_ld_lock);
1008 if (cfs_list_empty(&d->ld_linkage))
1009 cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
1010 spin_unlock(&s->ls_ld_lock);
1012 EXPORT_SYMBOL(lu_dev_add_linkage);
1014 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
1016 spin_lock(&s->ls_ld_lock);
1017 cfs_list_del_init(&d->ld_linkage);
1018 spin_unlock(&s->ls_ld_lock);
1020 EXPORT_SYMBOL(lu_dev_del_linkage);
1023 * Initialize site \a s, with \a d as the top level device.
1025 #define LU_SITE_BITS_MIN 12
1026 #define LU_SITE_BITS_MAX 24
1028 * total 256 buckets, we don't want too many buckets because:
1029 * - consume too much memory
1030 * - avoid unbalanced LRU list
1032 #define LU_SITE_BKT_BITS 8
1034 int lu_site_init(struct lu_site *s, struct lu_device *top)
1036 struct lu_site_bkt_data *bkt;
1043 memset(s, 0, sizeof *s);
1044 bits = lu_htable_order();
1045 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
1046 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
1047 bits >= LU_SITE_BITS_MIN; bits--) {
1048 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
1049 bits - LU_SITE_BKT_BITS,
1052 CFS_HASH_SPIN_BKTLOCK |
1053 CFS_HASH_NO_ITEMREF |
1055 CFS_HASH_ASSERT_EMPTY);
1056 if (s->ls_obj_hash != NULL)
1060 if (s->ls_obj_hash == NULL) {
1061 CERROR("failed to create lu_site hash with bits: %d\n", bits);
1065 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1066 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1067 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
1068 init_waitqueue_head(&bkt->lsb_marche_funebre);
1071 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1072 if (s->ls_stats == NULL) {
1073 cfs_hash_putref(s->ls_obj_hash);
1074 s->ls_obj_hash = NULL;
1078 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1079 0, "created", "created");
1080 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1081 0, "cache_hit", "cache_hit");
1082 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1083 0, "cache_miss", "cache_miss");
1084 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1085 0, "cache_race", "cache_race");
1086 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1087 0, "cache_death_race", "cache_death_race");
1088 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1089 0, "lru_purged", "lru_purged");
1091 CFS_INIT_LIST_HEAD(&s->ls_linkage);
1092 s->ls_top_dev = top;
1095 lu_ref_add(&top->ld_reference, "site-top", s);
1097 CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
1098 spin_lock_init(&s->ls_ld_lock);
1100 lu_dev_add_linkage(s, top);
1104 EXPORT_SYMBOL(lu_site_init);
1107 * Finalize \a s and release its resources.
1109 void lu_site_fini(struct lu_site *s)
1111 mutex_lock(&lu_sites_guard);
1112 cfs_list_del_init(&s->ls_linkage);
1113 mutex_unlock(&lu_sites_guard);
1115 if (s->ls_obj_hash != NULL) {
1116 cfs_hash_putref(s->ls_obj_hash);
1117 s->ls_obj_hash = NULL;
1120 if (s->ls_top_dev != NULL) {
1121 s->ls_top_dev->ld_site = NULL;
1122 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1123 lu_device_put(s->ls_top_dev);
1124 s->ls_top_dev = NULL;
1127 if (s->ls_stats != NULL)
1128 lprocfs_free_stats(&s->ls_stats);
1130 EXPORT_SYMBOL(lu_site_fini);
1133 * Called when initialization of stack for this site is completed.
1135 int lu_site_init_finish(struct lu_site *s)
1138 mutex_lock(&lu_sites_guard);
1139 result = lu_context_refill(&lu_shrink_env.le_ctx);
1141 cfs_list_add(&s->ls_linkage, &lu_sites);
1142 mutex_unlock(&lu_sites_guard);
1145 EXPORT_SYMBOL(lu_site_init_finish);
1148 * Acquire additional reference on device \a d
1150 void lu_device_get(struct lu_device *d)
1152 atomic_inc(&d->ld_ref);
1154 EXPORT_SYMBOL(lu_device_get);
1157 * Release reference on device \a d.
1159 void lu_device_put(struct lu_device *d)
1161 LASSERT(atomic_read(&d->ld_ref) > 0);
1162 atomic_dec(&d->ld_ref);
1164 EXPORT_SYMBOL(lu_device_put);
1167 * Initialize device \a d of type \a t.
1169 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1171 if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
1172 t->ldt_ops->ldto_start != NULL)
1173 t->ldt_ops->ldto_start(t);
1175 memset(d, 0, sizeof *d);
1177 lu_ref_init(&d->ld_reference);
1178 INIT_LIST_HEAD(&d->ld_linkage);
1182 EXPORT_SYMBOL(lu_device_init);
1185 * Finalize device \a d.
1187 void lu_device_fini(struct lu_device *d)
1189 struct lu_device_type *t = d->ld_type;
1191 if (d->ld_obd != NULL) {
1192 d->ld_obd->obd_lu_dev = NULL;
1196 lu_ref_fini(&d->ld_reference);
1197 LASSERTF(atomic_read(&d->ld_ref) == 0,
1198 "Refcount is %u\n", atomic_read(&d->ld_ref));
1199 LASSERT(atomic_read(&t->ldt_device_nr) > 0);
1201 if (atomic_dec_and_test(&t->ldt_device_nr) &&
1202 t->ldt_ops->ldto_stop != NULL)
1203 t->ldt_ops->ldto_stop(t);
1205 EXPORT_SYMBOL(lu_device_fini);
1208 * Initialize object \a o that is part of compound object \a h and was created
1211 int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1212 struct lu_device *d)
1214 memset(o, 0, sizeof(*o));
1218 lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
1219 CFS_INIT_LIST_HEAD(&o->lo_linkage);
1223 EXPORT_SYMBOL(lu_object_init);
1226 * Finalize object and release its resources.
1228 void lu_object_fini(struct lu_object *o)
1230 struct lu_device *dev = o->lo_dev;
1232 LASSERT(cfs_list_empty(&o->lo_linkage));
1235 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1241 EXPORT_SYMBOL(lu_object_fini);
1244 * Add object \a o as first layer of compound object \a h
1246 * This is typically called by the ->ldo_object_alloc() method of top-level
1249 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1251 cfs_list_move(&o->lo_linkage, &h->loh_layers);
1253 EXPORT_SYMBOL(lu_object_add_top);
1256 * Add object \a o as a layer of compound object, going after \a before.
1258 * This is typically called by the ->ldo_object_alloc() method of \a
1261 void lu_object_add(struct lu_object *before, struct lu_object *o)
1263 cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1265 EXPORT_SYMBOL(lu_object_add);
1268 * Initialize compound object.
1270 int lu_object_header_init(struct lu_object_header *h)
1272 memset(h, 0, sizeof *h);
1273 atomic_set(&h->loh_ref, 1);
1274 CFS_INIT_HLIST_NODE(&h->loh_hash);
1275 CFS_INIT_LIST_HEAD(&h->loh_lru);
1276 CFS_INIT_LIST_HEAD(&h->loh_layers);
1277 lu_ref_init(&h->loh_reference);
1280 EXPORT_SYMBOL(lu_object_header_init);
1283 * Finalize compound object.
1285 void lu_object_header_fini(struct lu_object_header *h)
1287 LASSERT(cfs_list_empty(&h->loh_layers));
1288 LASSERT(cfs_list_empty(&h->loh_lru));
1289 LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1290 lu_ref_fini(&h->loh_reference);
1292 EXPORT_SYMBOL(lu_object_header_fini);
1295 * Given a compound object, find its slice, corresponding to the device type
1298 struct lu_object *lu_object_locate(struct lu_object_header *h,
1299 const struct lu_device_type *dtype)
1301 struct lu_object *o;
1303 cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1304 if (o->lo_dev->ld_type == dtype)
1309 EXPORT_SYMBOL(lu_object_locate);
1314 * Finalize and free devices in the device stack.
1316 * Finalize device stack by purging object cache, and calling
1317 * lu_device_type_operations::ldto_device_fini() and
1318 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1320 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1322 struct lu_site *site = top->ld_site;
1323 struct lu_device *scan;
1324 struct lu_device *next;
1326 lu_site_purge(env, site, ~0);
1327 for (scan = top; scan != NULL; scan = next) {
1328 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1329 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1330 lu_device_put(scan);
1334 lu_site_purge(env, site, ~0);
1336 for (scan = top; scan != NULL; scan = next) {
1337 const struct lu_device_type *ldt = scan->ld_type;
1338 struct obd_type *type;
1340 next = ldt->ldt_ops->ldto_device_free(env, scan);
1341 type = ldt->ldt_obd_type;
1344 class_put_type(type);
1348 EXPORT_SYMBOL(lu_stack_fini);
1352 * Maximal number of tld slots.
1354 LU_CONTEXT_KEY_NR = 40
1357 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1359 static DEFINE_SPINLOCK(lu_keys_guard);
1362 * Global counter incremented whenever key is registered, unregistered,
1363 * revived or quiesced. This is used to void unnecessary calls to
1364 * lu_context_refill(). No locking is provided, as initialization and shutdown
1365 * are supposed to be externally serialized.
1367 static unsigned key_set_version = 0;
1372 int lu_context_key_register(struct lu_context_key *key)
1377 LASSERT(key->lct_init != NULL);
1378 LASSERT(key->lct_fini != NULL);
1379 LASSERT(key->lct_tags != 0);
1380 LASSERT(key->lct_owner != NULL);
1383 spin_lock(&lu_keys_guard);
1384 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1385 if (lu_keys[i] == NULL) {
1387 atomic_set(&key->lct_used, 1);
1389 lu_ref_init(&key->lct_reference);
1395 spin_unlock(&lu_keys_guard);
1398 EXPORT_SYMBOL(lu_context_key_register);
1400 static void key_fini(struct lu_context *ctx, int index)
1402 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1403 struct lu_context_key *key;
1405 key = lu_keys[index];
1406 LASSERT(key != NULL);
1407 LASSERT(key->lct_fini != NULL);
1408 LASSERT(atomic_read(&key->lct_used) > 1);
1410 key->lct_fini(ctx, key, ctx->lc_value[index]);
1411 lu_ref_del(&key->lct_reference, "ctx", ctx);
1412 atomic_dec(&key->lct_used);
1414 LASSERT(key->lct_owner != NULL);
1415 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1416 LINVRNT(module_refcount(key->lct_owner) > 0);
1417 module_put(key->lct_owner);
1419 ctx->lc_value[index] = NULL;
1426 void lu_context_key_degister(struct lu_context_key *key)
1428 LASSERT(atomic_read(&key->lct_used) >= 1);
1429 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1431 lu_context_key_quiesce(key);
1434 spin_lock(&lu_keys_guard);
1435 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1436 if (lu_keys[key->lct_index]) {
1437 lu_keys[key->lct_index] = NULL;
1438 lu_ref_fini(&key->lct_reference);
1440 spin_unlock(&lu_keys_guard);
1442 LASSERTF(atomic_read(&key->lct_used) == 1,
1443 "key has instances: %d\n",
1444 atomic_read(&key->lct_used));
1446 EXPORT_SYMBOL(lu_context_key_degister);
1449 * Register a number of keys. This has to be called after all keys have been
1450 * initialized by a call to LU_CONTEXT_KEY_INIT().
1452 int lu_context_key_register_many(struct lu_context_key *k, ...)
1454 struct lu_context_key *key = k;
1460 result = lu_context_key_register(key);
1463 key = va_arg(args, struct lu_context_key *);
1464 } while (key != NULL);
1470 lu_context_key_degister(k);
1471 k = va_arg(args, struct lu_context_key *);
1478 EXPORT_SYMBOL(lu_context_key_register_many);
1481 * De-register a number of keys. This is a dual to
1482 * lu_context_key_register_many().
1484 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1490 lu_context_key_degister(k);
1491 k = va_arg(args, struct lu_context_key*);
1492 } while (k != NULL);
1495 EXPORT_SYMBOL(lu_context_key_degister_many);
1498 * Revive a number of keys.
1500 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1506 lu_context_key_revive(k);
1507 k = va_arg(args, struct lu_context_key*);
1508 } while (k != NULL);
1511 EXPORT_SYMBOL(lu_context_key_revive_many);
1514 * Quiescent a number of keys.
1516 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1522 lu_context_key_quiesce(k);
1523 k = va_arg(args, struct lu_context_key*);
1524 } while (k != NULL);
1527 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1530 * Return value associated with key \a key in context \a ctx.
1532 void *lu_context_key_get(const struct lu_context *ctx,
1533 const struct lu_context_key *key)
1535 LINVRNT(ctx->lc_state == LCS_ENTERED);
1536 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1537 LASSERT(lu_keys[key->lct_index] == key);
1538 return ctx->lc_value[key->lct_index];
1540 EXPORT_SYMBOL(lu_context_key_get);
1543 * List of remembered contexts. XXX document me.
1545 static CFS_LIST_HEAD(lu_context_remembered);
1548 * Destroy \a key in all remembered contexts. This is used to destroy key
1549 * values in "shared" contexts (like service threads), when a module owning
1550 * the key is about to be unloaded.
1552 void lu_context_key_quiesce(struct lu_context_key *key)
1554 struct lu_context *ctx;
1555 extern unsigned cl_env_cache_purge(unsigned nr);
1557 if (!(key->lct_tags & LCT_QUIESCENT)) {
1559 * XXX layering violation.
1561 cl_env_cache_purge(~0);
1562 key->lct_tags |= LCT_QUIESCENT;
1564 * XXX memory barrier has to go here.
1566 spin_lock(&lu_keys_guard);
1567 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1569 key_fini(ctx, key->lct_index);
1570 spin_unlock(&lu_keys_guard);
1574 EXPORT_SYMBOL(lu_context_key_quiesce);
1576 void lu_context_key_revive(struct lu_context_key *key)
1578 key->lct_tags &= ~LCT_QUIESCENT;
1581 EXPORT_SYMBOL(lu_context_key_revive);
1583 static void keys_fini(struct lu_context *ctx)
1587 if (ctx->lc_value == NULL)
1590 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1593 OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1594 ctx->lc_value = NULL;
1597 static int keys_fill(struct lu_context *ctx)
1601 LINVRNT(ctx->lc_value != NULL);
1602 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1603 struct lu_context_key *key;
1606 if (ctx->lc_value[i] == NULL && key != NULL &&
1607 (key->lct_tags & ctx->lc_tags) &&
1609 * Don't create values for a LCT_QUIESCENT key, as this
1610 * will pin module owning a key.
1612 !(key->lct_tags & LCT_QUIESCENT)) {
1615 LINVRNT(key->lct_init != NULL);
1616 LINVRNT(key->lct_index == i);
1618 value = key->lct_init(ctx, key);
1619 if (unlikely(IS_ERR(value)))
1620 return PTR_ERR(value);
1622 LASSERT(key->lct_owner != NULL);
1623 if (!(ctx->lc_tags & LCT_NOREF))
1624 try_module_get(key->lct_owner);
1625 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1626 atomic_inc(&key->lct_used);
1628 * This is the only place in the code, where an
1629 * element of ctx->lc_value[] array is set to non-NULL
1632 ctx->lc_value[i] = value;
1633 if (key->lct_exit != NULL)
1634 ctx->lc_tags |= LCT_HAS_EXIT;
1636 ctx->lc_version = key_set_version;
1641 static int keys_init(struct lu_context *ctx)
1643 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1644 if (likely(ctx->lc_value != NULL))
1645 return keys_fill(ctx);
1651 * Initialize context data-structure. Create values for all keys.
1653 int lu_context_init(struct lu_context *ctx, __u32 tags)
1657 memset(ctx, 0, sizeof *ctx);
1658 ctx->lc_state = LCS_INITIALIZED;
1659 ctx->lc_tags = tags;
1660 if (tags & LCT_REMEMBER) {
1661 spin_lock(&lu_keys_guard);
1662 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1663 spin_unlock(&lu_keys_guard);
1665 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1668 rc = keys_init(ctx);
1670 lu_context_fini(ctx);
1674 EXPORT_SYMBOL(lu_context_init);
1677 * Finalize context data-structure. Destroy key values.
1679 void lu_context_fini(struct lu_context *ctx)
1681 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1682 ctx->lc_state = LCS_FINALIZED;
1684 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1685 LASSERT(cfs_list_empty(&ctx->lc_remember));
1688 } else { /* could race with key degister */
1689 spin_lock(&lu_keys_guard);
1691 cfs_list_del_init(&ctx->lc_remember);
1692 spin_unlock(&lu_keys_guard);
1695 EXPORT_SYMBOL(lu_context_fini);
1698 * Called before entering context.
1700 void lu_context_enter(struct lu_context *ctx)
1702 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1703 ctx->lc_state = LCS_ENTERED;
1705 EXPORT_SYMBOL(lu_context_enter);
1708 * Called after exiting from \a ctx
1710 void lu_context_exit(struct lu_context *ctx)
1714 LINVRNT(ctx->lc_state == LCS_ENTERED);
1715 ctx->lc_state = LCS_LEFT;
1716 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1717 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1718 if (ctx->lc_value[i] != NULL) {
1719 struct lu_context_key *key;
1722 LASSERT(key != NULL);
1723 if (key->lct_exit != NULL)
1725 key, ctx->lc_value[i]);
1730 EXPORT_SYMBOL(lu_context_exit);
1733 * Allocate for context all missing keys that were registered after context
1734 * creation. key_set_version is only changed in rare cases when modules
1735 * are loaded and removed.
1737 int lu_context_refill(struct lu_context *ctx)
1739 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1741 EXPORT_SYMBOL(lu_context_refill);
1744 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1745 * obd being added. Currently, this is only used on client side, specifically
1746 * for echo device client, for other stack (like ptlrpc threads), context are
1747 * predefined when the lu_device type are registered, during the module probe
1750 __u32 lu_context_tags_default = 0;
1751 __u32 lu_session_tags_default = 0;
1753 void lu_context_tags_update(__u32 tags)
1755 spin_lock(&lu_keys_guard);
1756 lu_context_tags_default |= tags;
1758 spin_unlock(&lu_keys_guard);
1760 EXPORT_SYMBOL(lu_context_tags_update);
1762 void lu_context_tags_clear(__u32 tags)
1764 spin_lock(&lu_keys_guard);
1765 lu_context_tags_default &= ~tags;
1767 spin_unlock(&lu_keys_guard);
1769 EXPORT_SYMBOL(lu_context_tags_clear);
1771 void lu_session_tags_update(__u32 tags)
1773 spin_lock(&lu_keys_guard);
1774 lu_session_tags_default |= tags;
1776 spin_unlock(&lu_keys_guard);
1778 EXPORT_SYMBOL(lu_session_tags_update);
1780 void lu_session_tags_clear(__u32 tags)
1782 spin_lock(&lu_keys_guard);
1783 lu_session_tags_default &= ~tags;
1785 spin_unlock(&lu_keys_guard);
1787 EXPORT_SYMBOL(lu_session_tags_clear);
1789 int lu_env_init(struct lu_env *env, __u32 tags)
1794 result = lu_context_init(&env->le_ctx, tags);
1795 if (likely(result == 0))
1796 lu_context_enter(&env->le_ctx);
1799 EXPORT_SYMBOL(lu_env_init);
1801 void lu_env_fini(struct lu_env *env)
1803 lu_context_exit(&env->le_ctx);
1804 lu_context_fini(&env->le_ctx);
1807 EXPORT_SYMBOL(lu_env_fini);
1809 int lu_env_refill(struct lu_env *env)
1813 result = lu_context_refill(&env->le_ctx);
1814 if (result == 0 && env->le_ses != NULL)
1815 result = lu_context_refill(env->le_ses);
1818 EXPORT_SYMBOL(lu_env_refill);
1821 * Currently, this API will only be used by echo client.
1822 * Because echo client and normal lustre client will share
1823 * same cl_env cache. So echo client needs to refresh
1824 * the env context after it get one from the cache, especially
1825 * when normal client and echo client co-exist in the same client.
1827 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1832 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1833 env->le_ctx.lc_version = 0;
1834 env->le_ctx.lc_tags |= ctags;
1837 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1838 env->le_ses->lc_version = 0;
1839 env->le_ses->lc_tags |= stags;
1842 result = lu_env_refill(env);
1846 EXPORT_SYMBOL(lu_env_refill_by_tags);
1848 static struct shrinker *lu_site_shrinker;
1850 typedef struct lu_site_stats{
1851 unsigned lss_populated;
1852 unsigned lss_max_search;
1857 static void lu_site_stats_get(cfs_hash_t *hs,
1858 lu_site_stats_t *stats, int populated)
1863 cfs_hash_for_each_bucket(hs, &bd, i) {
1864 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1865 cfs_hlist_head_t *hhead;
1867 cfs_hash_bd_lock(hs, &bd, 1);
1868 stats->lss_busy += bkt->lsb_busy;
1869 stats->lss_total += cfs_hash_bd_count_get(&bd);
1870 stats->lss_max_search = max((int)stats->lss_max_search,
1871 cfs_hash_bd_depmax_get(&bd));
1873 cfs_hash_bd_unlock(hs, &bd, 1);
1877 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1878 if (!cfs_hlist_empty(hhead))
1879 stats->lss_populated++;
1881 cfs_hash_bd_unlock(hs, &bd, 1);
1888 * There exists a potential lock inversion deadlock scenario when using
1889 * Lustre on top of ZFS. This occurs between one of ZFS's
1890 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1891 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1892 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1893 * lock. Obviously neither thread will wake and drop their respective hold
1896 * To prevent this from happening we must ensure the lu_sites_guard lock is
1897 * not taken while down this code path. ZFS reliably does not set the
1898 * __GFP_FS bit in its code paths, so this can be used to determine if it
1899 * is safe to take the lu_sites_guard lock.
1901 * Ideally we should accurately return the remaining number of cached
1902 * objects without taking the lu_sites_guard lock, but this is not
1903 * possible in the current implementation.
1905 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1907 lu_site_stats_t stats;
1909 struct lu_site *tmp;
1911 int remain = shrink_param(sc, nr_to_scan);
1912 CFS_LIST_HEAD(splice);
1914 if (!(shrink_param(sc, gfp_mask) & __GFP_FS)) {
1918 /* We must not take the lu_sites_guard lock when
1919 * __GFP_FS is *not* set because of the deadlock
1920 * possibility detailed above. Additionally,
1921 * since we cannot determine the number of
1922 * objects in the cache without taking this
1923 * lock, we're in a particularly tough spot. As
1924 * a result, we'll just lie and say our cache is
1925 * empty. This _should_ be ok, as we can't
1926 * reclaim objects when __GFP_FS is *not* set
1932 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1934 mutex_lock(&lu_sites_guard);
1935 cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1936 if (shrink_param(sc, nr_to_scan) != 0) {
1937 remain = lu_site_purge(&lu_shrink_env, s, remain);
1939 * Move just shrunk site to the tail of site list to
1940 * assure shrinking fairness.
1942 cfs_list_move_tail(&s->ls_linkage, &splice);
1945 memset(&stats, 0, sizeof(stats));
1946 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1947 cached += stats.lss_total - stats.lss_busy;
1948 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1951 cfs_list_splice(&splice, lu_sites.prev);
1952 mutex_unlock(&lu_sites_guard);
1954 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1955 if (shrink_param(sc, nr_to_scan) == 0)
1956 CDEBUG(D_INODE, "%d objects cached\n", cached);
1965 * Environment to be used in debugger, contains all tags.
1967 struct lu_env lu_debugging_env;
1970 * Debugging printer function using printk().
1972 int lu_printk_printer(const struct lu_env *env,
1973 void *unused, const char *format, ...)
1977 va_start(args, format);
1978 vprintk(format, args);
1983 int lu_debugging_setup(void)
1985 return lu_env_init(&lu_debugging_env, ~0);
1988 void lu_context_keys_dump(void)
1992 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1993 struct lu_context_key *key;
1997 CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1998 i, key, key->lct_tags,
1999 key->lct_init, key->lct_fini, key->lct_exit,
2000 key->lct_index, atomic_read(&key->lct_used),
2001 key->lct_owner ? key->lct_owner->name : "",
2003 lu_ref_print(&key->lct_reference);
2007 EXPORT_SYMBOL(lu_context_keys_dump);
2008 #else /* !__KERNEL__ */
2009 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
2013 #endif /* __KERNEL__ */
2016 * Initialization of global lu_* data.
2018 int lu_global_init(void)
2022 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
2024 result = lu_ref_global_init();
2028 LU_CONTEXT_KEY_INIT(&lu_global_key);
2029 result = lu_context_key_register(&lu_global_key);
2034 * At this level, we don't know what tags are needed, so allocate them
2035 * conservatively. This should not be too bad, because this
2036 * environment is global.
2038 mutex_lock(&lu_sites_guard);
2039 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
2040 mutex_unlock(&lu_sites_guard);
2045 * seeks estimation: 3 seeks to read a record from oi, one to read
2046 * inode, one for ea. Unfortunately setting this high value results in
2047 * lu_object/inode cache consuming all the memory.
2049 lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
2050 if (lu_site_shrinker == NULL)
2057 * Dual to lu_global_init().
2059 void lu_global_fini(void)
2061 if (lu_site_shrinker != NULL) {
2062 remove_shrinker(lu_site_shrinker);
2063 lu_site_shrinker = NULL;
2066 lu_context_key_degister(&lu_global_key);
2069 * Tear shrinker environment down _after_ de-registering
2070 * lu_global_key, because the latter has a value in the former.
2072 mutex_lock(&lu_sites_guard);
2073 lu_env_fini(&lu_shrink_env);
2074 mutex_unlock(&lu_sites_guard);
2076 lu_ref_global_fini();
2079 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
2082 struct lprocfs_counter ret;
2084 lprocfs_stats_collect(stats, idx, &ret);
2085 return (__u32)ret.lc_count;
2092 * Output site statistical counters into a buffer. Suitable for
2093 * lprocfs_rd_*()-style functions.
2095 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
2097 lu_site_stats_t stats;
2099 memset(&stats, 0, sizeof(stats));
2100 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2102 return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2105 stats.lss_populated,
2106 CFS_HASH_NHLIST(s->ls_obj_hash),
2107 stats.lss_max_search,
2108 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2109 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2110 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2111 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2112 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2113 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2115 EXPORT_SYMBOL(lu_site_stats_seq_print);
2117 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
2119 lu_site_stats_t stats;
2121 memset(&stats, 0, sizeof(stats));
2122 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2124 return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2127 stats.lss_populated,
2128 CFS_HASH_NHLIST(s->ls_obj_hash),
2129 stats.lss_max_search,
2130 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2131 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2132 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2133 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2134 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2135 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2137 EXPORT_SYMBOL(lu_site_stats_print);
2140 * Helper function to initialize a number of kmem slab caches at once.
2142 int lu_kmem_init(struct lu_kmem_descr *caches)
2145 struct lu_kmem_descr *iter = caches;
2147 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2148 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2151 if (*iter->ckd_cache == NULL) {
2153 /* free all previously allocated caches */
2154 lu_kmem_fini(caches);
2160 EXPORT_SYMBOL(lu_kmem_init);
2163 * Helper function to finalize a number of kmem slab cached at once. Dual to
2166 void lu_kmem_fini(struct lu_kmem_descr *caches)
2168 for (; caches->ckd_cache != NULL; ++caches) {
2169 if (*caches->ckd_cache != NULL) {
2170 kmem_cache_destroy(*caches->ckd_cache);
2171 *caches->ckd_cache = NULL;
2175 EXPORT_SYMBOL(lu_kmem_fini);
2178 * Temporary solution to be able to assign fid in ->do_create()
2179 * till we have fully-functional OST fids
2181 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2182 const struct lu_fid *fid)
2184 struct lu_site *s = o->lo_dev->ld_site;
2185 struct lu_fid *old = &o->lo_header->loh_fid;
2186 struct lu_site_bkt_data *bkt;
2187 struct lu_object *shadow;
2188 wait_queue_t waiter;
2193 LASSERT(fid_is_zero(old));
2195 hs = s->ls_obj_hash;
2196 cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2197 shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2198 /* supposed to be unique */
2199 LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
2201 bkt = cfs_hash_bd_extra_get(hs, &bd);
2202 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2204 cfs_hash_bd_unlock(hs, &bd, 1);
2206 EXPORT_SYMBOL(lu_object_assign_fid);
2209 * allocates object with 0 (non-assiged) fid
2210 * XXX: temporary solution to be able to assign fid in ->do_create()
2211 * till we have fully-functional OST fids
2213 struct lu_object *lu_object_anon(const struct lu_env *env,
2214 struct lu_device *dev,
2215 const struct lu_object_conf *conf)
2218 struct lu_object *o;
2221 o = lu_object_alloc(env, dev, &fid, conf);
2225 EXPORT_SYMBOL(lu_object_anon);
2227 struct lu_buf LU_BUF_NULL = {
2231 EXPORT_SYMBOL(LU_BUF_NULL);
2233 void lu_buf_free(struct lu_buf *buf)
2237 LASSERT(buf->lb_len > 0);
2238 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2243 EXPORT_SYMBOL(lu_buf_free);
2245 void lu_buf_alloc(struct lu_buf *buf, int size)
2248 LASSERT(buf->lb_buf == NULL);
2249 LASSERT(buf->lb_len == 0);
2250 OBD_ALLOC_LARGE(buf->lb_buf, size);
2251 if (likely(buf->lb_buf))
2254 EXPORT_SYMBOL(lu_buf_alloc);
2256 void lu_buf_realloc(struct lu_buf *buf, int size)
2259 lu_buf_alloc(buf, size);
2261 EXPORT_SYMBOL(lu_buf_realloc);
2263 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
2265 if (buf->lb_buf == NULL && buf->lb_len == 0)
2266 lu_buf_alloc(buf, len);
2268 if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2269 lu_buf_realloc(buf, len);
2273 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2276 * Increase the size of the \a buf.
2277 * preserves old data in buffer
2278 * old buffer remains unchanged on error
2279 * \retval 0 or -ENOMEM
2281 int lu_buf_check_and_grow(struct lu_buf *buf, int len)
2285 if (len <= buf->lb_len)
2288 OBD_ALLOC_LARGE(ptr, len);
2292 /* Free the old buf */
2293 if (buf->lb_buf != NULL) {
2294 memcpy(ptr, buf->lb_buf, buf->lb_len);
2295 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2302 EXPORT_SYMBOL(lu_buf_check_and_grow);