4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include <libcfs/libcfs.h>
48 #include <linux/module.h>
49 #include <libcfs/libcfs_hash.h> /* hash_long() */
50 #include <obd_class.h>
51 #include <obd_support.h>
52 #include <lustre_disk.h>
53 #include <lustre_fid.h>
54 #include <lu_object.h>
56 #include <libcfs/list.h>
59 LU_CACHE_PERCENT_MAX = 50,
60 LU_CACHE_PERCENT_DEFAULT = 20
63 #define LU_CACHE_NR_MAX_ADJUST 128
64 #define LU_CACHE_NR_UNLIMITED -1
65 #define LU_CACHE_NR_DEFAULT LU_CACHE_NR_UNLIMITED
66 #define LU_CACHE_NR_LDISKFS_LIMIT LU_CACHE_NR_UNLIMITED
67 /** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */
68 #define LU_CACHE_NR_ZFS_LIMIT 10240
70 #define LU_SITE_BITS_MIN 12
71 #define LU_SITE_BITS_MAX 24
73 * total 256 buckets, we don't want too many buckets because:
74 * - consume too much memory
75 * - avoid unbalanced LRU list
77 #define LU_SITE_BKT_BITS 8
80 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
81 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
82 "Percentage of memory to be used as lu_object cache");
84 static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
85 CFS_MODULE_PARM(lu_cache_nr, "l", long, 0644,
86 "Maximum number of objects in lu_object cache");
88 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
91 * Decrease reference counter on object. If last reference is freed, return
92 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
93 * case, free object immediately.
95 void lu_object_put(const struct lu_env *env, struct lu_object *o)
97 struct lu_site_bkt_data *bkt;
98 struct lu_object_header *top;
100 struct lu_object *orig;
102 const struct lu_fid *fid;
105 site = o->lo_dev->ld_site;
109 * till we have full fids-on-OST implemented anonymous objects
110 * are possible in OSP. such an object isn't listed in the site
111 * so we should not remove it from the site.
113 fid = lu_object_fid(o);
114 if (fid_is_zero(fid)) {
115 LASSERT(top->loh_hash.next == NULL
116 && top->loh_hash.pprev == NULL);
117 LASSERT(list_empty(&top->loh_lru));
118 if (!atomic_dec_and_test(&top->loh_ref))
120 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
121 if (o->lo_ops->loo_object_release != NULL)
122 o->lo_ops->loo_object_release(env, o);
124 lu_object_free(env, orig);
128 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
129 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
131 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
132 if (lu_object_is_dying(top)) {
135 * somebody may be waiting for this, currently only
136 * used for cl_object, see cl_object_put_last().
138 wake_up_all(&bkt->lsb_marche_funebre);
143 LASSERT(bkt->lsb_busy > 0);
146 * When last reference is released, iterate over object
147 * layers, and notify them that object is no longer busy.
149 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
150 if (o->lo_ops->loo_object_release != NULL)
151 o->lo_ops->loo_object_release(env, o);
154 if (!lu_object_is_dying(top) &&
155 (lu_object_exists(orig) || lu_object_is_cl(orig))) {
156 LASSERT(list_empty(&top->loh_lru));
157 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
158 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
163 * If object is dying (will not be cached), removed it
164 * from hash table and LRU.
166 * This is done with hash table and LRU lists locked. As the only
167 * way to acquire first reference to previously unreferenced
168 * object is through hash-table lookup (lu_object_find()),
169 * or LRU scanning (lu_site_purge()), that are done under hash-table
170 * and LRU lock, no race with concurrent object lookup is possible
171 * and we can safely destroy object below.
173 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
174 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
175 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
177 * Object was already removed from hash and lru above, can
180 lu_object_free(env, orig);
182 EXPORT_SYMBOL(lu_object_put);
185 * Put object and don't keep in cache. This is temporary solution for
186 * multi-site objects when its layering is not constant.
188 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
190 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
191 return lu_object_put(env, o);
193 EXPORT_SYMBOL(lu_object_put_nocache);
196 * Kill the object and take it out of LRU cache.
197 * Currently used by client code for layout change.
199 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
201 struct lu_object_header *top;
204 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
205 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
206 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
209 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
210 list_del_init(&top->loh_lru);
211 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
212 cfs_hash_bd_unlock(obj_hash, &bd, 1);
215 EXPORT_SYMBOL(lu_object_unhash);
218 * Allocate new object.
220 * This follows object creation protocol, described in the comment within
221 * struct lu_device_operations definition.
223 static struct lu_object *lu_object_alloc(const struct lu_env *env,
224 struct lu_device *dev,
225 const struct lu_fid *f,
226 const struct lu_object_conf *conf)
228 struct lu_object *scan;
229 struct lu_object *top;
230 struct list_head *layers;
231 unsigned int init_mask = 0;
232 unsigned int init_flag;
238 * Create top-level object slice. This will also create
241 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
243 RETURN(ERR_PTR(-ENOMEM));
247 * This is the only place where object fid is assigned. It's constant
250 top->lo_header->loh_fid = *f;
251 layers = &top->lo_header->loh_layers;
255 * Call ->loo_object_init() repeatedly, until no more new
256 * object slices are created.
260 list_for_each_entry(scan, layers, lo_linkage) {
261 if (init_mask & init_flag)
264 scan->lo_header = top->lo_header;
265 result = scan->lo_ops->loo_object_init(env, scan, conf);
267 lu_object_free(env, top);
268 RETURN(ERR_PTR(result));
270 init_mask |= init_flag;
276 list_for_each_entry_reverse(scan, layers, lo_linkage) {
277 if (scan->lo_ops->loo_object_start != NULL) {
278 result = scan->lo_ops->loo_object_start(env, scan);
280 lu_object_free(env, top);
281 RETURN(ERR_PTR(result));
286 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
293 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
295 struct lu_site_bkt_data *bkt;
296 struct lu_site *site;
297 struct lu_object *scan;
298 struct list_head *layers;
299 struct list_head splice;
301 site = o->lo_dev->ld_site;
302 layers = &o->lo_header->loh_layers;
303 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
305 * First call ->loo_object_delete() method to release all resources.
307 list_for_each_entry_reverse(scan, layers, lo_linkage) {
308 if (scan->lo_ops->loo_object_delete != NULL)
309 scan->lo_ops->loo_object_delete(env, scan);
313 * Then, splice object layers into stand-alone list, and call
314 * ->loo_object_free() on all layers to free memory. Splice is
315 * necessary, because lu_object_header is freed together with the
318 INIT_LIST_HEAD(&splice);
319 list_splice_init(layers, &splice);
320 while (!list_empty(&splice)) {
322 * Free layers in bottom-to-top order, so that object header
323 * lives as long as possible and ->loo_object_free() methods
324 * can look at its contents.
326 o = container_of0(splice.prev, struct lu_object, lo_linkage);
327 list_del_init(&o->lo_linkage);
328 LASSERT(o->lo_ops->loo_object_free != NULL);
329 o->lo_ops->loo_object_free(env, o);
332 if (waitqueue_active(&bkt->lsb_marche_funebre))
333 wake_up_all(&bkt->lsb_marche_funebre);
337 * Free \a nr objects from the cold end of the site LRU list.
339 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
341 struct lu_object_header *h;
342 struct lu_object_header *temp;
343 struct lu_site_bkt_data *bkt;
346 struct list_head dispose;
353 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
356 INIT_LIST_HEAD(&dispose);
358 * Under LRU list lock, scan LRU list and move unreferenced objects to
359 * the dispose list, removing them from LRU and hash table.
361 start = s->ls_purge_start;
362 bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
365 * It doesn't make any sense to make purge threads parallel, that can
366 * only bring troubles to us. See LU-5331.
368 mutex_lock(&s->ls_purge_mutex);
370 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
374 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
375 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
377 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
378 LASSERT(atomic_read(&h->loh_ref) == 0);
380 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
381 LASSERT(bd.bd_bucket == bd2.bd_bucket);
383 cfs_hash_bd_del_locked(s->ls_obj_hash,
385 list_move(&h->loh_lru, &dispose);
389 if (nr != ~0 && --nr == 0)
392 if (count > 0 && --count == 0)
396 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
399 * Free everything on the dispose list. This is safe against
400 * races due to the reasons described in lu_object_put().
402 while (!list_empty(&dispose)) {
403 h = container_of0(dispose.next,
404 struct lu_object_header, loh_lru);
405 list_del_init(&h->loh_lru);
406 lu_object_free(env, lu_object_top(h));
407 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
413 mutex_unlock(&s->ls_purge_mutex);
415 if (nr != 0 && did_sth && start != 0) {
416 start = 0; /* restart from the first bucket */
419 /* race on s->ls_purge_start, but nobody cares */
420 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
424 EXPORT_SYMBOL(lu_site_purge);
429 * Code below has to jump through certain loops to output object description
430 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
431 * composes object description from strings that are parts of _lines_ of
432 * output (i.e., strings that are not terminated by newline). This doesn't fit
433 * very well into libcfs_debug_msg() interface that assumes that each message
434 * supplied to it is a self-contained output line.
436 * To work around this, strings are collected in a temporary buffer
437 * (implemented as a value of lu_cdebug_key key), until terminating newline
438 * character is detected.
446 * XXX overflow is not handled correctly.
451 struct lu_cdebug_data {
455 char lck_area[LU_CDEBUG_LINE];
458 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
459 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
462 * Key, holding temporary buffer. This key is registered very early by
465 static struct lu_context_key lu_global_key = {
466 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
467 LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
468 .lct_init = lu_global_key_init,
469 .lct_fini = lu_global_key_fini
473 * Printer function emitting messages through libcfs_debug_msg().
475 int lu_cdebug_printer(const struct lu_env *env,
476 void *cookie, const char *format, ...)
478 struct libcfs_debug_msg_data *msgdata = cookie;
479 struct lu_cdebug_data *key;
484 va_start(args, format);
486 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
487 LASSERT(key != NULL);
489 used = strlen(key->lck_area);
490 complete = format[strlen(format) - 1] == '\n';
492 * Append new chunk to the buffer.
494 vsnprintf(key->lck_area + used,
495 ARRAY_SIZE(key->lck_area) - used, format, args);
497 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
498 libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
499 key->lck_area[0] = 0;
504 EXPORT_SYMBOL(lu_cdebug_printer);
507 * Print object header.
509 void lu_object_header_print(const struct lu_env *env, void *cookie,
510 lu_printer_t printer,
511 const struct lu_object_header *hdr)
513 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
514 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
516 hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
517 list_empty((struct list_head *)&hdr->loh_lru) ? \
519 hdr->loh_attr & LOHA_EXISTS ? " exist" : "");
521 EXPORT_SYMBOL(lu_object_header_print);
524 * Print human readable representation of the \a o to the \a printer.
526 void lu_object_print(const struct lu_env *env, void *cookie,
527 lu_printer_t printer, const struct lu_object *o)
529 static const char ruler[] = "........................................";
530 struct lu_object_header *top;
534 lu_object_header_print(env, cookie, printer, top);
535 (*printer)(env, cookie, "{\n");
537 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
539 * print `.' \a depth times followed by type name and address
541 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
542 o->lo_dev->ld_type->ldt_name, o);
544 if (o->lo_ops->loo_object_print != NULL)
545 (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
547 (*printer)(env, cookie, "\n");
550 (*printer)(env, cookie, "} header@%p\n", top);
552 EXPORT_SYMBOL(lu_object_print);
555 * Check object consistency.
557 int lu_object_invariant(const struct lu_object *o)
559 struct lu_object_header *top;
562 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
563 if (o->lo_ops->loo_object_invariant != NULL &&
564 !o->lo_ops->loo_object_invariant(o))
569 EXPORT_SYMBOL(lu_object_invariant);
571 static struct lu_object *htable_lookup(struct lu_site *s,
573 const struct lu_fid *f,
574 wait_queue_t *waiter,
577 struct lu_site_bkt_data *bkt;
578 struct lu_object_header *h;
579 struct hlist_node *hnode;
580 __u64 ver = cfs_hash_bd_version_get(bd);
583 return ERR_PTR(-ENOENT);
586 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
587 /* cfs_hash_bd_peek_locked is a somehow "internal" function
588 * of cfs_hash, it doesn't add refcount on object. */
589 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
591 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
592 return ERR_PTR(-ENOENT);
595 h = container_of0(hnode, struct lu_object_header, loh_hash);
596 if (likely(!lu_object_is_dying(h))) {
597 cfs_hash_get(s->ls_obj_hash, hnode);
598 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
599 list_del_init(&h->loh_lru);
600 return lu_object_top(h);
604 * Lookup found an object being destroyed this object cannot be
605 * returned (to assure that references to dying objects are eventually
606 * drained), and moreover, lookup has to wait until object is freed.
609 if (likely(waiter != NULL)) {
610 init_waitqueue_entry_current(waiter);
611 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
612 set_current_state(TASK_UNINTERRUPTIBLE);
613 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
616 return ERR_PTR(-EAGAIN);
620 * Search cache for an object with the fid \a f. If such object is found,
621 * return it. Otherwise, create new object, insert it into cache and return
622 * it. In any case, additional reference is acquired on the returned object.
624 struct lu_object *lu_object_find(const struct lu_env *env,
625 struct lu_device *dev, const struct lu_fid *f,
626 const struct lu_object_conf *conf)
628 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
630 EXPORT_SYMBOL(lu_object_find);
633 * Limit the lu_object cache to a maximum of lu_cache_nr objects. Because
634 * the calculation for the number of objects to reclaim is not covered by
635 * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST.
636 * This ensures that many concurrent threads will not accidentally purge
639 static void lu_object_limit(const struct lu_env *env,
640 struct lu_device *dev)
644 if (lu_cache_nr == LU_CACHE_NR_UNLIMITED)
647 size = cfs_hash_size_get(dev->ld_site->ls_obj_hash);
648 nr = (__u64)lu_cache_nr;
650 lu_site_purge(env, dev->ld_site,
651 MIN(size - nr, LU_CACHE_NR_MAX_ADJUST));
656 static struct lu_object *lu_object_new(const struct lu_env *env,
657 struct lu_device *dev,
658 const struct lu_fid *f,
659 const struct lu_object_conf *conf)
664 struct lu_site_bkt_data *bkt;
666 o = lu_object_alloc(env, dev, f, conf);
667 if (unlikely(IS_ERR(o)))
670 hs = dev->ld_site->ls_obj_hash;
671 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
672 bkt = cfs_hash_bd_extra_get(hs, &bd);
673 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
675 cfs_hash_bd_unlock(hs, &bd, 1);
677 lu_object_limit(env, dev);
683 * Core logic of lu_object_find*() functions.
685 static struct lu_object *lu_object_find_try(const struct lu_env *env,
686 struct lu_device *dev,
687 const struct lu_fid *f,
688 const struct lu_object_conf *conf,
689 wait_queue_t *waiter)
692 struct lu_object *shadow;
699 * This uses standard index maintenance protocol:
701 * - search index under lock, and return object if found;
702 * - otherwise, unlock index, allocate new object;
703 * - lock index and search again;
704 * - if nothing is found (usual case), insert newly created
706 * - otherwise (race: other thread inserted object), free
707 * object just allocated.
711 * For "LOC_F_NEW" case, we are sure the object is new established.
712 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
713 * just alloc and insert directly.
715 * If dying object is found during index search, add @waiter to the
716 * site wait-queue and return ERR_PTR(-EAGAIN).
718 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
719 return lu_object_new(env, dev, f, conf);
723 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
724 o = htable_lookup(s, &bd, f, waiter, &version);
725 cfs_hash_bd_unlock(hs, &bd, 1);
726 if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
730 * Allocate new object. This may result in rather complicated
731 * operations, including fld queries, inode loading, etc.
733 o = lu_object_alloc(env, dev, f, conf);
734 if (unlikely(IS_ERR(o)))
737 LASSERT(lu_fid_eq(lu_object_fid(o), f));
739 cfs_hash_bd_lock(hs, &bd, 1);
741 shadow = htable_lookup(s, &bd, f, waiter, &version);
742 if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
743 struct lu_site_bkt_data *bkt;
745 bkt = cfs_hash_bd_extra_get(hs, &bd);
746 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
748 cfs_hash_bd_unlock(hs, &bd, 1);
750 lu_object_limit(env, dev);
755 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
756 cfs_hash_bd_unlock(hs, &bd, 1);
757 lu_object_free(env, o);
762 * Much like lu_object_find(), but top level device of object is specifically
763 * \a dev rather than top level device of the site. This interface allows
764 * objects of different "stacking" to be created within the same site.
766 struct lu_object *lu_object_find_at(const struct lu_env *env,
767 struct lu_device *dev,
768 const struct lu_fid *f,
769 const struct lu_object_conf *conf)
771 struct lu_site_bkt_data *bkt;
772 struct lu_object *obj;
775 if (conf != NULL && conf->loc_flags & LOC_F_NOWAIT)
776 return lu_object_find_try(env, dev, f, conf, NULL);
779 obj = lu_object_find_try(env, dev, f, conf, &wait);
780 if (obj != ERR_PTR(-EAGAIN))
783 * lu_object_find_try() already added waiter into the
786 waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
787 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
788 remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
791 EXPORT_SYMBOL(lu_object_find_at);
794 * Find object with given fid, and return its slice belonging to given device.
796 struct lu_object *lu_object_find_slice(const struct lu_env *env,
797 struct lu_device *dev,
798 const struct lu_fid *f,
799 const struct lu_object_conf *conf)
801 struct lu_object *top;
802 struct lu_object *obj;
804 top = lu_object_find(env, dev, f, conf);
808 obj = lu_object_locate(top->lo_header, dev->ld_type);
809 if (unlikely(obj == NULL)) {
810 lu_object_put(env, top);
811 obj = ERR_PTR(-ENOENT);
816 EXPORT_SYMBOL(lu_object_find_slice);
819 * Global list of all device types.
821 static struct list_head lu_device_types;
823 int lu_device_type_init(struct lu_device_type *ldt)
827 atomic_set(&ldt->ldt_device_nr, 0);
828 INIT_LIST_HEAD(&ldt->ldt_linkage);
829 if (ldt->ldt_ops->ldto_init)
830 result = ldt->ldt_ops->ldto_init(ldt);
833 spin_lock(&obd_types_lock);
834 list_add(&ldt->ldt_linkage, &lu_device_types);
835 spin_unlock(&obd_types_lock);
840 EXPORT_SYMBOL(lu_device_type_init);
842 void lu_device_type_fini(struct lu_device_type *ldt)
844 spin_lock(&obd_types_lock);
845 list_del_init(&ldt->ldt_linkage);
846 spin_unlock(&obd_types_lock);
847 if (ldt->ldt_ops->ldto_fini)
848 ldt->ldt_ops->ldto_fini(ldt);
850 EXPORT_SYMBOL(lu_device_type_fini);
853 * Global list of all sites on this node
855 static struct list_head lu_sites;
856 static DEFINE_MUTEX(lu_sites_guard);
859 * Global environment used by site shrinker.
861 static struct lu_env lu_shrink_env;
863 struct lu_site_print_arg {
864 struct lu_env *lsp_env;
866 lu_printer_t lsp_printer;
870 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
871 struct hlist_node *hnode, void *data)
873 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
874 struct lu_object_header *h;
876 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
877 if (!list_empty(&h->loh_layers)) {
878 const struct lu_object *o;
880 o = lu_object_top(h);
881 lu_object_print(arg->lsp_env, arg->lsp_cookie,
882 arg->lsp_printer, o);
884 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
885 arg->lsp_printer, h);
891 * Print all objects in \a s.
893 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
894 lu_printer_t printer)
896 struct lu_site_print_arg arg = {
897 .lsp_env = (struct lu_env *)env,
898 .lsp_cookie = cookie,
899 .lsp_printer = printer,
902 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
904 EXPORT_SYMBOL(lu_site_print);
907 * Return desired hash table order.
909 static unsigned long lu_htable_order(struct lu_device *top)
911 unsigned long cache_size;
915 * For ZFS based OSDs the cache should be disabled by default. This
916 * allows the ZFS ARC maximum flexibility in determining what buffers
917 * to cache. If Lustre has objects or buffer which it wants to ensure
918 * always stay cached it must maintain a hold on them.
920 if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) {
921 lu_cache_percent = 1;
922 lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT;
923 return LU_SITE_BITS_MIN;
927 * Calculate hash table size, assuming that we want reasonable
928 * performance when 20% of total memory is occupied by cache of
931 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
933 cache_size = totalram_pages;
935 #if BITS_PER_LONG == 32
936 /* limit hashtable size for lowmem systems to low RAM */
937 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
938 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
941 /* clear off unreasonable cache setting. */
942 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
943 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
944 " the range of (0, %u]. Will use default value: %u.\n",
945 lu_cache_percent, LU_CACHE_PERCENT_MAX,
946 LU_CACHE_PERCENT_DEFAULT);
948 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
950 cache_size = cache_size / 100 * lu_cache_percent *
951 (PAGE_CACHE_SIZE / 1024);
953 for (bits = 1; (1 << bits) < cache_size; ++bits) {
959 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
960 const void *key, unsigned mask)
962 struct lu_fid *fid = (struct lu_fid *)key;
965 hash = fid_flatten32(fid);
966 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
967 hash = hash_long(hash, hs->hs_bkt_bits);
969 /* give me another random factor */
970 hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
972 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
973 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
978 static void *lu_obj_hop_object(struct hlist_node *hnode)
980 return hlist_entry(hnode, struct lu_object_header, loh_hash);
983 static void *lu_obj_hop_key(struct hlist_node *hnode)
985 struct lu_object_header *h;
987 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
991 static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
993 struct lu_object_header *h;
995 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
996 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
999 static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
1001 struct lu_object_header *h;
1003 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
1004 if (atomic_add_return(1, &h->loh_ref) == 1) {
1005 struct lu_site_bkt_data *bkt;
1008 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
1009 bkt = cfs_hash_bd_extra_get(hs, &bd);
1014 static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
1016 LBUG(); /* we should never called it */
1019 static cfs_hash_ops_t lu_site_hash_ops = {
1020 .hs_hash = lu_obj_hop_hash,
1021 .hs_key = lu_obj_hop_key,
1022 .hs_keycmp = lu_obj_hop_keycmp,
1023 .hs_object = lu_obj_hop_object,
1024 .hs_get = lu_obj_hop_get,
1025 .hs_put_locked = lu_obj_hop_put_locked,
1028 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
1030 spin_lock(&s->ls_ld_lock);
1031 if (list_empty(&d->ld_linkage))
1032 list_add(&d->ld_linkage, &s->ls_ld_linkage);
1033 spin_unlock(&s->ls_ld_lock);
1035 EXPORT_SYMBOL(lu_dev_add_linkage);
1037 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
1039 spin_lock(&s->ls_ld_lock);
1040 list_del_init(&d->ld_linkage);
1041 spin_unlock(&s->ls_ld_lock);
1043 EXPORT_SYMBOL(lu_dev_del_linkage);
1046 * Initialize site \a s, with \a d as the top level device.
1048 int lu_site_init(struct lu_site *s, struct lu_device *top)
1050 struct lu_site_bkt_data *bkt;
1057 memset(s, 0, sizeof *s);
1058 mutex_init(&s->ls_purge_mutex);
1059 bits = lu_htable_order(top);
1060 snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
1061 for (bits = clamp_t(typeof(bits), bits,
1062 LU_SITE_BITS_MIN, LU_SITE_BITS_MAX);
1063 bits >= LU_SITE_BITS_MIN; bits--) {
1064 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
1065 bits - LU_SITE_BKT_BITS,
1068 CFS_HASH_SPIN_BKTLOCK |
1069 CFS_HASH_NO_ITEMREF |
1071 CFS_HASH_ASSERT_EMPTY |
1073 if (s->ls_obj_hash != NULL)
1077 if (s->ls_obj_hash == NULL) {
1078 CERROR("failed to create lu_site hash with bits: %lu\n", bits);
1082 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1083 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1084 INIT_LIST_HEAD(&bkt->lsb_lru);
1085 init_waitqueue_head(&bkt->lsb_marche_funebre);
1088 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1089 if (s->ls_stats == NULL) {
1090 cfs_hash_putref(s->ls_obj_hash);
1091 s->ls_obj_hash = NULL;
1095 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1096 0, "created", "created");
1097 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1098 0, "cache_hit", "cache_hit");
1099 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1100 0, "cache_miss", "cache_miss");
1101 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1102 0, "cache_race", "cache_race");
1103 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1104 0, "cache_death_race", "cache_death_race");
1105 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1106 0, "lru_purged", "lru_purged");
1108 INIT_LIST_HEAD(&s->ls_linkage);
1109 s->ls_top_dev = top;
1112 lu_ref_add(&top->ld_reference, "site-top", s);
1114 INIT_LIST_HEAD(&s->ls_ld_linkage);
1115 spin_lock_init(&s->ls_ld_lock);
1117 lu_dev_add_linkage(s, top);
1121 EXPORT_SYMBOL(lu_site_init);
1124 * Finalize \a s and release its resources.
1126 void lu_site_fini(struct lu_site *s)
1128 mutex_lock(&lu_sites_guard);
1129 list_del_init(&s->ls_linkage);
1130 mutex_unlock(&lu_sites_guard);
1132 if (s->ls_obj_hash != NULL) {
1133 cfs_hash_putref(s->ls_obj_hash);
1134 s->ls_obj_hash = NULL;
1137 if (s->ls_top_dev != NULL) {
1138 s->ls_top_dev->ld_site = NULL;
1139 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1140 lu_device_put(s->ls_top_dev);
1141 s->ls_top_dev = NULL;
1144 if (s->ls_stats != NULL)
1145 lprocfs_free_stats(&s->ls_stats);
1147 EXPORT_SYMBOL(lu_site_fini);
1150 * Called when initialization of stack for this site is completed.
1152 int lu_site_init_finish(struct lu_site *s)
1155 mutex_lock(&lu_sites_guard);
1156 result = lu_context_refill(&lu_shrink_env.le_ctx);
1158 list_add(&s->ls_linkage, &lu_sites);
1159 mutex_unlock(&lu_sites_guard);
1162 EXPORT_SYMBOL(lu_site_init_finish);
1165 * Acquire additional reference on device \a d
1167 void lu_device_get(struct lu_device *d)
1169 atomic_inc(&d->ld_ref);
1171 EXPORT_SYMBOL(lu_device_get);
1174 * Release reference on device \a d.
1176 void lu_device_put(struct lu_device *d)
1178 LASSERT(atomic_read(&d->ld_ref) > 0);
1179 atomic_dec(&d->ld_ref);
1181 EXPORT_SYMBOL(lu_device_put);
1184 * Initialize device \a d of type \a t.
1186 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1188 if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
1189 t->ldt_ops->ldto_start != NULL)
1190 t->ldt_ops->ldto_start(t);
1192 memset(d, 0, sizeof *d);
1194 lu_ref_init(&d->ld_reference);
1195 INIT_LIST_HEAD(&d->ld_linkage);
1199 EXPORT_SYMBOL(lu_device_init);
1202 * Finalize device \a d.
1204 void lu_device_fini(struct lu_device *d)
1206 struct lu_device_type *t = d->ld_type;
1208 if (d->ld_obd != NULL) {
1209 d->ld_obd->obd_lu_dev = NULL;
1213 lu_ref_fini(&d->ld_reference);
1214 LASSERTF(atomic_read(&d->ld_ref) == 0,
1215 "Refcount is %u\n", atomic_read(&d->ld_ref));
1216 LASSERT(atomic_read(&t->ldt_device_nr) > 0);
1218 if (atomic_dec_and_test(&t->ldt_device_nr) &&
1219 t->ldt_ops->ldto_stop != NULL)
1220 t->ldt_ops->ldto_stop(t);
1222 EXPORT_SYMBOL(lu_device_fini);
1225 * Initialize object \a o that is part of compound object \a h and was created
1228 int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1229 struct lu_device *d)
1231 memset(o, 0, sizeof(*o));
1235 lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
1236 INIT_LIST_HEAD(&o->lo_linkage);
1240 EXPORT_SYMBOL(lu_object_init);
1243 * Finalize object and release its resources.
1245 void lu_object_fini(struct lu_object *o)
1247 struct lu_device *dev = o->lo_dev;
1249 LASSERT(list_empty(&o->lo_linkage));
1252 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1258 EXPORT_SYMBOL(lu_object_fini);
1261 * Add object \a o as first layer of compound object \a h
1263 * This is typically called by the ->ldo_object_alloc() method of top-level
1266 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1268 list_move(&o->lo_linkage, &h->loh_layers);
1270 EXPORT_SYMBOL(lu_object_add_top);
1273 * Add object \a o as a layer of compound object, going after \a before.
1275 * This is typically called by the ->ldo_object_alloc() method of \a
1278 void lu_object_add(struct lu_object *before, struct lu_object *o)
1280 list_move(&o->lo_linkage, &before->lo_linkage);
1282 EXPORT_SYMBOL(lu_object_add);
1285 * Initialize compound object.
1287 int lu_object_header_init(struct lu_object_header *h)
1289 memset(h, 0, sizeof *h);
1290 atomic_set(&h->loh_ref, 1);
1291 INIT_HLIST_NODE(&h->loh_hash);
1292 INIT_LIST_HEAD(&h->loh_lru);
1293 INIT_LIST_HEAD(&h->loh_layers);
1294 lu_ref_init(&h->loh_reference);
1297 EXPORT_SYMBOL(lu_object_header_init);
1300 * Finalize compound object.
1302 void lu_object_header_fini(struct lu_object_header *h)
1304 LASSERT(list_empty(&h->loh_layers));
1305 LASSERT(list_empty(&h->loh_lru));
1306 LASSERT(hlist_unhashed(&h->loh_hash));
1307 lu_ref_fini(&h->loh_reference);
1309 EXPORT_SYMBOL(lu_object_header_fini);
1312 * Given a compound object, find its slice, corresponding to the device type
1315 struct lu_object *lu_object_locate(struct lu_object_header *h,
1316 const struct lu_device_type *dtype)
1318 struct lu_object *o;
1320 list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1321 if (o->lo_dev->ld_type == dtype)
1326 EXPORT_SYMBOL(lu_object_locate);
1329 * Finalize and free devices in the device stack.
1331 * Finalize device stack by purging object cache, and calling
1332 * lu_device_type_operations::ldto_device_fini() and
1333 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1335 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1337 struct lu_site *site = top->ld_site;
1338 struct lu_device *scan;
1339 struct lu_device *next;
1341 lu_site_purge(env, site, ~0);
1342 for (scan = top; scan != NULL; scan = next) {
1343 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1344 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1345 lu_device_put(scan);
1349 lu_site_purge(env, site, ~0);
1351 for (scan = top; scan != NULL; scan = next) {
1352 const struct lu_device_type *ldt = scan->ld_type;
1353 struct obd_type *type;
1355 next = ldt->ldt_ops->ldto_device_free(env, scan);
1356 type = ldt->ldt_obd_type;
1359 class_put_type(type);
1363 EXPORT_SYMBOL(lu_stack_fini);
1367 * Maximal number of tld slots.
1369 LU_CONTEXT_KEY_NR = 40
1372 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1374 static DEFINE_SPINLOCK(lu_keys_guard);
1377 * Global counter incremented whenever key is registered, unregistered,
1378 * revived or quiesced. This is used to void unnecessary calls to
1379 * lu_context_refill(). No locking is provided, as initialization and shutdown
1380 * are supposed to be externally serialized.
1382 static unsigned key_set_version = 0;
1387 int lu_context_key_register(struct lu_context_key *key)
1392 LASSERT(key->lct_init != NULL);
1393 LASSERT(key->lct_fini != NULL);
1394 LASSERT(key->lct_tags != 0);
1395 LASSERT(key->lct_owner != NULL);
1398 spin_lock(&lu_keys_guard);
1399 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1400 if (lu_keys[i] == NULL) {
1402 atomic_set(&key->lct_used, 1);
1404 lu_ref_init(&key->lct_reference);
1410 spin_unlock(&lu_keys_guard);
1413 EXPORT_SYMBOL(lu_context_key_register);
1415 static void key_fini(struct lu_context *ctx, int index)
1417 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1418 struct lu_context_key *key;
1420 key = lu_keys[index];
1421 LASSERT(key != NULL);
1422 LASSERT(key->lct_fini != NULL);
1423 LASSERT(atomic_read(&key->lct_used) > 1);
1425 key->lct_fini(ctx, key, ctx->lc_value[index]);
1426 lu_ref_del(&key->lct_reference, "ctx", ctx);
1427 atomic_dec(&key->lct_used);
1429 LASSERT(key->lct_owner != NULL);
1430 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1431 LINVRNT(module_refcount(key->lct_owner) > 0);
1432 module_put(key->lct_owner);
1434 ctx->lc_value[index] = NULL;
1441 void lu_context_key_degister(struct lu_context_key *key)
1443 LASSERT(atomic_read(&key->lct_used) >= 1);
1444 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1446 lu_context_key_quiesce(key);
1449 spin_lock(&lu_keys_guard);
1450 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1451 if (lu_keys[key->lct_index]) {
1452 lu_keys[key->lct_index] = NULL;
1453 lu_ref_fini(&key->lct_reference);
1455 spin_unlock(&lu_keys_guard);
1457 LASSERTF(atomic_read(&key->lct_used) == 1,
1458 "key has instances: %d\n",
1459 atomic_read(&key->lct_used));
1461 EXPORT_SYMBOL(lu_context_key_degister);
1464 * Register a number of keys. This has to be called after all keys have been
1465 * initialized by a call to LU_CONTEXT_KEY_INIT().
1467 int lu_context_key_register_many(struct lu_context_key *k, ...)
1469 struct lu_context_key *key = k;
1475 result = lu_context_key_register(key);
1478 key = va_arg(args, struct lu_context_key *);
1479 } while (key != NULL);
1485 lu_context_key_degister(k);
1486 k = va_arg(args, struct lu_context_key *);
1493 EXPORT_SYMBOL(lu_context_key_register_many);
1496 * De-register a number of keys. This is a dual to
1497 * lu_context_key_register_many().
1499 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1505 lu_context_key_degister(k);
1506 k = va_arg(args, struct lu_context_key*);
1507 } while (k != NULL);
1510 EXPORT_SYMBOL(lu_context_key_degister_many);
1513 * Revive a number of keys.
1515 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1521 lu_context_key_revive(k);
1522 k = va_arg(args, struct lu_context_key*);
1523 } while (k != NULL);
1526 EXPORT_SYMBOL(lu_context_key_revive_many);
1529 * Quiescent a number of keys.
1531 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1537 lu_context_key_quiesce(k);
1538 k = va_arg(args, struct lu_context_key*);
1539 } while (k != NULL);
1542 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1545 * Return value associated with key \a key in context \a ctx.
1547 void *lu_context_key_get(const struct lu_context *ctx,
1548 const struct lu_context_key *key)
1550 LINVRNT(ctx->lc_state == LCS_ENTERED);
1551 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1552 LASSERT(lu_keys[key->lct_index] == key);
1553 return ctx->lc_value[key->lct_index];
1555 EXPORT_SYMBOL(lu_context_key_get);
1558 * List of remembered contexts. XXX document me.
1560 static struct list_head lu_context_remembered;
1563 * Destroy \a key in all remembered contexts. This is used to destroy key
1564 * values in "shared" contexts (like service threads), when a module owning
1565 * the key is about to be unloaded.
1567 void lu_context_key_quiesce(struct lu_context_key *key)
1569 struct lu_context *ctx;
1570 extern unsigned cl_env_cache_purge(unsigned nr);
1572 if (!(key->lct_tags & LCT_QUIESCENT)) {
1574 * XXX layering violation.
1576 cl_env_cache_purge(~0);
1577 key->lct_tags |= LCT_QUIESCENT;
1579 * XXX memory barrier has to go here.
1581 spin_lock(&lu_keys_guard);
1582 list_for_each_entry(ctx, &lu_context_remembered,
1584 key_fini(ctx, key->lct_index);
1585 spin_unlock(&lu_keys_guard);
1589 EXPORT_SYMBOL(lu_context_key_quiesce);
1591 void lu_context_key_revive(struct lu_context_key *key)
1593 key->lct_tags &= ~LCT_QUIESCENT;
1596 EXPORT_SYMBOL(lu_context_key_revive);
1598 static void keys_fini(struct lu_context *ctx)
1602 if (ctx->lc_value == NULL)
1605 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1608 OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1609 ctx->lc_value = NULL;
1612 static int keys_fill(struct lu_context *ctx)
1616 LINVRNT(ctx->lc_value != NULL);
1617 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1618 struct lu_context_key *key;
1621 if (ctx->lc_value[i] == NULL && key != NULL &&
1622 (key->lct_tags & ctx->lc_tags) &&
1624 * Don't create values for a LCT_QUIESCENT key, as this
1625 * will pin module owning a key.
1627 !(key->lct_tags & LCT_QUIESCENT)) {
1630 LINVRNT(key->lct_init != NULL);
1631 LINVRNT(key->lct_index == i);
1633 value = key->lct_init(ctx, key);
1634 if (unlikely(IS_ERR(value)))
1635 return PTR_ERR(value);
1637 LASSERT(key->lct_owner != NULL);
1638 if (!(ctx->lc_tags & LCT_NOREF))
1639 try_module_get(key->lct_owner);
1640 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1641 atomic_inc(&key->lct_used);
1643 * This is the only place in the code, where an
1644 * element of ctx->lc_value[] array is set to non-NULL
1647 ctx->lc_value[i] = value;
1648 if (key->lct_exit != NULL)
1649 ctx->lc_tags |= LCT_HAS_EXIT;
1651 ctx->lc_version = key_set_version;
1656 static int keys_init(struct lu_context *ctx)
1658 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1659 if (likely(ctx->lc_value != NULL))
1660 return keys_fill(ctx);
1666 * Initialize context data-structure. Create values for all keys.
1668 int lu_context_init(struct lu_context *ctx, __u32 tags)
1672 memset(ctx, 0, sizeof *ctx);
1673 ctx->lc_state = LCS_INITIALIZED;
1674 ctx->lc_tags = tags;
1675 if (tags & LCT_REMEMBER) {
1676 spin_lock(&lu_keys_guard);
1677 list_add(&ctx->lc_remember, &lu_context_remembered);
1678 spin_unlock(&lu_keys_guard);
1680 INIT_LIST_HEAD(&ctx->lc_remember);
1683 rc = keys_init(ctx);
1685 lu_context_fini(ctx);
1689 EXPORT_SYMBOL(lu_context_init);
1692 * Finalize context data-structure. Destroy key values.
1694 void lu_context_fini(struct lu_context *ctx)
1696 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1697 ctx->lc_state = LCS_FINALIZED;
1699 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1700 LASSERT(list_empty(&ctx->lc_remember));
1703 } else { /* could race with key degister */
1704 spin_lock(&lu_keys_guard);
1706 list_del_init(&ctx->lc_remember);
1707 spin_unlock(&lu_keys_guard);
1710 EXPORT_SYMBOL(lu_context_fini);
1713 * Called before entering context.
1715 void lu_context_enter(struct lu_context *ctx)
1717 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1718 ctx->lc_state = LCS_ENTERED;
1720 EXPORT_SYMBOL(lu_context_enter);
1723 * Called after exiting from \a ctx
1725 void lu_context_exit(struct lu_context *ctx)
1729 LINVRNT(ctx->lc_state == LCS_ENTERED);
1730 ctx->lc_state = LCS_LEFT;
1731 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1732 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1733 if (ctx->lc_value[i] != NULL) {
1734 struct lu_context_key *key;
1737 LASSERT(key != NULL);
1738 if (key->lct_exit != NULL)
1740 key, ctx->lc_value[i]);
1745 EXPORT_SYMBOL(lu_context_exit);
1748 * Allocate for context all missing keys that were registered after context
1749 * creation. key_set_version is only changed in rare cases when modules
1750 * are loaded and removed.
1752 int lu_context_refill(struct lu_context *ctx)
1754 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1756 EXPORT_SYMBOL(lu_context_refill);
1759 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1760 * obd being added. Currently, this is only used on client side, specifically
1761 * for echo device client, for other stack (like ptlrpc threads), context are
1762 * predefined when the lu_device type are registered, during the module probe
1765 __u32 lu_context_tags_default = 0;
1766 __u32 lu_session_tags_default = 0;
1768 void lu_context_tags_update(__u32 tags)
1770 spin_lock(&lu_keys_guard);
1771 lu_context_tags_default |= tags;
1773 spin_unlock(&lu_keys_guard);
1775 EXPORT_SYMBOL(lu_context_tags_update);
1777 void lu_context_tags_clear(__u32 tags)
1779 spin_lock(&lu_keys_guard);
1780 lu_context_tags_default &= ~tags;
1782 spin_unlock(&lu_keys_guard);
1784 EXPORT_SYMBOL(lu_context_tags_clear);
1786 void lu_session_tags_update(__u32 tags)
1788 spin_lock(&lu_keys_guard);
1789 lu_session_tags_default |= tags;
1791 spin_unlock(&lu_keys_guard);
1793 EXPORT_SYMBOL(lu_session_tags_update);
1795 void lu_session_tags_clear(__u32 tags)
1797 spin_lock(&lu_keys_guard);
1798 lu_session_tags_default &= ~tags;
1800 spin_unlock(&lu_keys_guard);
1802 EXPORT_SYMBOL(lu_session_tags_clear);
1804 int lu_env_init(struct lu_env *env, __u32 tags)
1809 result = lu_context_init(&env->le_ctx, tags);
1810 if (likely(result == 0))
1811 lu_context_enter(&env->le_ctx);
1814 EXPORT_SYMBOL(lu_env_init);
1816 void lu_env_fini(struct lu_env *env)
1818 lu_context_exit(&env->le_ctx);
1819 lu_context_fini(&env->le_ctx);
1822 EXPORT_SYMBOL(lu_env_fini);
1824 int lu_env_refill(struct lu_env *env)
1828 result = lu_context_refill(&env->le_ctx);
1829 if (result == 0 && env->le_ses != NULL)
1830 result = lu_context_refill(env->le_ses);
1833 EXPORT_SYMBOL(lu_env_refill);
1836 * Currently, this API will only be used by echo client.
1837 * Because echo client and normal lustre client will share
1838 * same cl_env cache. So echo client needs to refresh
1839 * the env context after it get one from the cache, especially
1840 * when normal client and echo client co-exist in the same client.
1842 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1847 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1848 env->le_ctx.lc_version = 0;
1849 env->le_ctx.lc_tags |= ctags;
1852 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1853 env->le_ses->lc_version = 0;
1854 env->le_ses->lc_tags |= stags;
1857 result = lu_env_refill(env);
1861 EXPORT_SYMBOL(lu_env_refill_by_tags);
1863 static struct shrinker *lu_site_shrinker;
1865 typedef struct lu_site_stats{
1866 unsigned lss_populated;
1867 unsigned lss_max_search;
1872 static void lu_site_stats_get(cfs_hash_t *hs,
1873 lu_site_stats_t *stats, int populated)
1878 cfs_hash_for_each_bucket(hs, &bd, i) {
1879 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1880 struct hlist_head *hhead;
1882 cfs_hash_bd_lock(hs, &bd, 1);
1883 stats->lss_busy += bkt->lsb_busy;
1884 stats->lss_total += cfs_hash_bd_count_get(&bd);
1885 stats->lss_max_search = max((int)stats->lss_max_search,
1886 cfs_hash_bd_depmax_get(&bd));
1888 cfs_hash_bd_unlock(hs, &bd, 1);
1892 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1893 if (!hlist_empty(hhead))
1894 stats->lss_populated++;
1896 cfs_hash_bd_unlock(hs, &bd, 1);
1901 static unsigned long lu_cache_shrink_count(struct shrinker *sk,
1902 struct shrink_control *sc)
1904 lu_site_stats_t stats;
1906 struct lu_site *tmp;
1907 unsigned long cached = 0;
1909 if (!(sc->gfp_mask & __GFP_FS))
1912 mutex_lock(&lu_sites_guard);
1913 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1914 memset(&stats, 0, sizeof(stats));
1915 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1916 cached += stats.lss_total - stats.lss_busy;
1918 mutex_unlock(&lu_sites_guard);
1920 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1921 CDEBUG(D_INODE, "%ld objects cached\n", cached);
1925 static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
1926 struct shrink_control *sc)
1929 struct lu_site *tmp;
1930 unsigned long remain = sc->nr_to_scan;
1933 if (!(sc->gfp_mask & __GFP_FS))
1934 /* We must not take the lu_sites_guard lock when
1935 * __GFP_FS is *not* set because of the deadlock
1936 * possibility detailed above. Additionally,
1937 * since we cannot determine the number of
1938 * objects in the cache without taking this
1939 * lock, we're in a particularly tough spot. As
1940 * a result, we'll just lie and say our cache is
1941 * empty. This _should_ be ok, as we can't
1942 * reclaim objects when __GFP_FS is *not* set
1947 mutex_lock(&lu_sites_guard);
1948 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1949 remain = lu_site_purge(&lu_shrink_env, s, remain);
1951 * Move just shrunk site to the tail of site list to
1952 * assure shrinking fairness.
1954 list_move_tail(&s->ls_linkage, &splice);
1956 list_splice(&splice, lu_sites.prev);
1957 mutex_unlock(&lu_sites_guard);
1959 return sc->nr_to_scan - remain;
1962 #ifndef HAVE_SHRINKER_COUNT
1964 * There exists a potential lock inversion deadlock scenario when using
1965 * Lustre on top of ZFS. This occurs between one of ZFS's
1966 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1967 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1968 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1969 * lock. Obviously neither thread will wake and drop their respective hold
1972 * To prevent this from happening we must ensure the lu_sites_guard lock is
1973 * not taken while down this code path. ZFS reliably does not set the
1974 * __GFP_FS bit in its code paths, so this can be used to determine if it
1975 * is safe to take the lu_sites_guard lock.
1977 * Ideally we should accurately return the remaining number of cached
1978 * objects without taking the lu_sites_guard lock, but this is not
1979 * possible in the current implementation.
1981 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1984 struct shrink_control scv = {
1985 .nr_to_scan = shrink_param(sc, nr_to_scan),
1986 .gfp_mask = shrink_param(sc, gfp_mask)
1988 #if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
1989 struct shrinker* shrinker = NULL;
1993 CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
1995 lu_cache_shrink_scan(shrinker, &scv);
1997 cached = lu_cache_shrink_count(shrinker, &scv);
1998 if (scv.nr_to_scan == 0)
1999 CDEBUG(D_INODE, "%d objects cached\n", cached);
2003 #endif /* HAVE_SHRINKER_COUNT */
2011 * Environment to be used in debugger, contains all tags.
2013 static struct lu_env lu_debugging_env;
2016 * Debugging printer function using printk().
2018 int lu_printk_printer(const struct lu_env *env,
2019 void *unused, const char *format, ...)
2023 va_start(args, format);
2024 vprintk(format, args);
2029 int lu_debugging_setup(void)
2031 return lu_env_init(&lu_debugging_env, ~0);
2034 void lu_context_keys_dump(void)
2038 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
2039 struct lu_context_key *key;
2043 CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
2044 i, key, key->lct_tags,
2045 key->lct_init, key->lct_fini, key->lct_exit,
2046 key->lct_index, atomic_read(&key->lct_used),
2047 key->lct_owner ? key->lct_owner->name : "",
2049 lu_ref_print(&key->lct_reference);
2053 EXPORT_SYMBOL(lu_context_keys_dump);
2056 * Initialization of global lu_* data.
2058 int lu_global_init(void)
2061 DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
2062 lu_cache_shrink_count, lu_cache_shrink_scan);
2064 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
2066 INIT_LIST_HEAD(&lu_device_types);
2067 INIT_LIST_HEAD(&lu_context_remembered);
2068 INIT_LIST_HEAD(&lu_sites);
2070 result = lu_ref_global_init();
2074 LU_CONTEXT_KEY_INIT(&lu_global_key);
2075 result = lu_context_key_register(&lu_global_key);
2080 * At this level, we don't know what tags are needed, so allocate them
2081 * conservatively. This should not be too bad, because this
2082 * environment is global.
2084 mutex_lock(&lu_sites_guard);
2085 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
2086 mutex_unlock(&lu_sites_guard);
2091 * seeks estimation: 3 seeks to read a record from oi, one to read
2092 * inode, one for ea. Unfortunately setting this high value results in
2093 * lu_object/inode cache consuming all the memory.
2095 lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
2096 if (lu_site_shrinker == NULL)
2103 * Dual to lu_global_init().
2105 void lu_global_fini(void)
2107 if (lu_site_shrinker != NULL) {
2108 remove_shrinker(lu_site_shrinker);
2109 lu_site_shrinker = NULL;
2112 lu_context_key_degister(&lu_global_key);
2115 * Tear shrinker environment down _after_ de-registering
2116 * lu_global_key, because the latter has a value in the former.
2118 mutex_lock(&lu_sites_guard);
2119 lu_env_fini(&lu_shrink_env);
2120 mutex_unlock(&lu_sites_guard);
2122 lu_ref_global_fini();
2125 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
2127 #ifdef CONFIG_PROC_FS
2128 struct lprocfs_counter ret;
2130 lprocfs_stats_collect(stats, idx, &ret);
2131 return (__u32)ret.lc_count;
2138 * Output site statistical counters into a buffer. Suitable for
2139 * lprocfs_rd_*()-style functions.
2141 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
2143 lu_site_stats_t stats;
2145 memset(&stats, 0, sizeof(stats));
2146 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2148 return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2151 stats.lss_populated,
2152 CFS_HASH_NHLIST(s->ls_obj_hash),
2153 stats.lss_max_search,
2154 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2155 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2156 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2157 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2158 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2159 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2161 EXPORT_SYMBOL(lu_site_stats_seq_print);
2163 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
2165 lu_site_stats_t stats;
2167 memset(&stats, 0, sizeof(stats));
2168 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2170 return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2173 stats.lss_populated,
2174 CFS_HASH_NHLIST(s->ls_obj_hash),
2175 stats.lss_max_search,
2176 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2177 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2178 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2179 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2180 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2181 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2183 EXPORT_SYMBOL(lu_site_stats_print);
2186 * Helper function to initialize a number of kmem slab caches at once.
2188 int lu_kmem_init(struct lu_kmem_descr *caches)
2191 struct lu_kmem_descr *iter = caches;
2193 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2194 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2197 if (*iter->ckd_cache == NULL) {
2199 /* free all previously allocated caches */
2200 lu_kmem_fini(caches);
2206 EXPORT_SYMBOL(lu_kmem_init);
2209 * Helper function to finalize a number of kmem slab cached at once. Dual to
2212 void lu_kmem_fini(struct lu_kmem_descr *caches)
2214 for (; caches->ckd_cache != NULL; ++caches) {
2215 if (*caches->ckd_cache != NULL) {
2216 kmem_cache_destroy(*caches->ckd_cache);
2217 *caches->ckd_cache = NULL;
2221 EXPORT_SYMBOL(lu_kmem_fini);
2224 * Temporary solution to be able to assign fid in ->do_create()
2225 * till we have fully-functional OST fids
2227 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2228 const struct lu_fid *fid)
2230 struct lu_site *s = o->lo_dev->ld_site;
2231 struct lu_fid *old = &o->lo_header->loh_fid;
2232 struct lu_site_bkt_data *bkt;
2233 struct lu_object *shadow;
2234 wait_queue_t waiter;
2239 LASSERT(fid_is_zero(old));
2241 hs = s->ls_obj_hash;
2242 cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2243 shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2244 /* supposed to be unique */
2245 LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
2247 bkt = cfs_hash_bd_extra_get(hs, &bd);
2248 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2250 cfs_hash_bd_unlock(hs, &bd, 1);
2252 EXPORT_SYMBOL(lu_object_assign_fid);
2255 * allocates object with 0 (non-assiged) fid
2256 * XXX: temporary solution to be able to assign fid in ->do_create()
2257 * till we have fully-functional OST fids
2259 struct lu_object *lu_object_anon(const struct lu_env *env,
2260 struct lu_device *dev,
2261 const struct lu_object_conf *conf)
2264 struct lu_object *o;
2267 o = lu_object_alloc(env, dev, &fid, conf);
2271 EXPORT_SYMBOL(lu_object_anon);
2273 struct lu_buf LU_BUF_NULL = {
2277 EXPORT_SYMBOL(LU_BUF_NULL);
2279 void lu_buf_free(struct lu_buf *buf)
2283 LASSERT(buf->lb_len > 0);
2284 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2289 EXPORT_SYMBOL(lu_buf_free);
2291 void lu_buf_alloc(struct lu_buf *buf, size_t size)
2294 LASSERT(buf->lb_buf == NULL);
2295 LASSERT(buf->lb_len == 0);
2296 OBD_ALLOC_LARGE(buf->lb_buf, size);
2297 if (likely(buf->lb_buf))
2300 EXPORT_SYMBOL(lu_buf_alloc);
2302 void lu_buf_realloc(struct lu_buf *buf, size_t size)
2305 lu_buf_alloc(buf, size);
2307 EXPORT_SYMBOL(lu_buf_realloc);
2309 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len)
2311 if (buf->lb_buf == NULL && buf->lb_len == 0)
2312 lu_buf_alloc(buf, len);
2314 if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2315 lu_buf_realloc(buf, len);
2319 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2322 * Increase the size of the \a buf.
2323 * preserves old data in buffer
2324 * old buffer remains unchanged on error
2325 * \retval 0 or -ENOMEM
2327 int lu_buf_check_and_grow(struct lu_buf *buf, size_t len)
2331 if (len <= buf->lb_len)
2334 OBD_ALLOC_LARGE(ptr, len);
2338 /* Free the old buf */
2339 if (buf->lb_buf != NULL) {
2340 memcpy(ptr, buf->lb_buf, buf->lb_len);
2341 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2348 EXPORT_SYMBOL(lu_buf_check_and_grow);