1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * Copyright (C) 2006 Cluster File Systems, Inc.
7 * Author: Nikita Danilov <nikita@clusterfs.com>
9 * This file is part of the Lustre file system, http://www.lustre.org
10 * Lustre is a trademark of Cluster File Systems, Inc.
12 * You may have signed or agreed to another license before downloading
13 * this software. If so, you are bound by the terms and conditions
14 * of that agreement, and the following does not apply to you. See the
15 * LICENSE file included with this distribution for more information.
17 * If you did not agree to a different license, then this copy of Lustre
18 * is open source software; you can redistribute it and/or modify it
19 * under the terms of version 2 of the GNU General Public License as
20 * published by the Free Software Foundation.
22 * In either case, Lustre is distributed in the hope that it will be
23 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
24 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * license text for more details.
27 * These are the only exported functions, they provide some generic
28 * infrastructure for managing object devices
31 #define DEBUG_SUBSYSTEM S_CLASS
33 # define EXPORT_SYMTAB
36 #include <linux/seq_file.h>
37 #include <linux/module.h>
39 #include <linux/swap.h>
41 #include <linux/hash.h>
42 #include <obd_support.h>
43 #include <lustre_disk.h>
44 #include <lustre_fid.h>
45 #include <lu_object.h>
46 #include <libcfs/list.h>
47 /* lu_time_global_{init,fini}() */
50 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
53 * Decrease reference counter on object. If last reference is freed, return
54 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
55 * case, free object immediately.
57 void lu_object_put(const struct lu_env *env, struct lu_object *o)
59 struct lu_object_header *top;
61 struct lu_object *orig;
65 site = o->lo_dev->ld_site;
68 write_lock(&site->ls_guard);
69 if (atomic_dec_and_test(&top->loh_ref)) {
71 * When last reference is released, iterate over object
72 * layers, and notify them that object is no longer busy.
74 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
75 if (o->lo_ops->loo_object_release != NULL)
76 o->lo_ops->loo_object_release(env, o);
79 if (lu_object_is_dying(top)) {
81 * If object is dying (will not be cached), removed it
82 * from hash table and LRU.
84 * This is done with hash table and LRU lists
85 * locked. As the only way to acquire first reference
86 * to previously unreferenced object is through
87 * hash-table lookup (lu_object_find()), or LRU
88 * scanning (lu_site_purge()), that are done under
89 * hash-table and LRU lock, no race with concurrent
90 * object lookup is possible and we can safely destroy
93 hlist_del_init(&top->loh_hash);
94 list_del_init(&top->loh_lru);
99 write_unlock(&site->ls_guard);
102 * Object was already removed from hash and lru above, can
105 lu_object_free(env, orig);
107 EXPORT_SYMBOL(lu_object_put);
110 * Allocate new object.
112 * This follows object creation protocol, described in the comment within
113 * struct lu_device_operations definition.
115 static struct lu_object *lu_object_alloc(const struct lu_env *env,
117 const struct lu_fid *f)
119 struct lu_object *scan;
120 struct lu_object *top;
121 struct list_head *layers;
126 * Create top-level object slice. This will also create
129 top = s->ls_top_dev->ld_ops->ldo_object_alloc(env,
130 NULL, s->ls_top_dev);
134 * This is the only place where object fid is assigned. It's constant
137 LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
138 top->lo_header->loh_fid = *f;
139 layers = &top->lo_header->loh_layers;
142 * Call ->loo_object_init() repeatedly, until no more new
143 * object slices are created.
146 list_for_each_entry(scan, layers, lo_linkage) {
147 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
150 scan->lo_header = top->lo_header;
151 result = scan->lo_ops->loo_object_init(env, scan);
153 lu_object_free(env, top);
154 RETURN(ERR_PTR(result));
156 scan->lo_flags |= LU_OBJECT_ALLOCATED;
160 list_for_each_entry_reverse(scan, layers, lo_linkage) {
161 if (scan->lo_ops->loo_object_start != NULL) {
162 result = scan->lo_ops->loo_object_start(env, scan);
164 lu_object_free(env, top);
165 RETURN(ERR_PTR(result));
170 s->ls_stats.s_created ++;
177 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
179 struct list_head splice;
180 struct lu_object *scan;
183 * First call ->loo_object_delete() method to release all resources.
185 list_for_each_entry_reverse(scan,
186 &o->lo_header->loh_layers, lo_linkage) {
187 if (scan->lo_ops->loo_object_delete != NULL)
188 scan->lo_ops->loo_object_delete(env, scan);
192 * Then, splice object layers into stand-alone list, and call
193 * ->loo_object_free() on all layers to free memory. Splice is
194 * necessary, because lu_object_header is freed together with the
197 INIT_LIST_HEAD(&splice);
198 list_splice_init(&o->lo_header->loh_layers, &splice);
199 while (!list_empty(&splice)) {
200 o = container_of0(splice.next, struct lu_object, lo_linkage);
201 list_del_init(&o->lo_linkage);
202 LASSERT(o->lo_ops->loo_object_free != NULL);
203 o->lo_ops->loo_object_free(env, o);
208 * Free @nr objects from the cold end of the site LRU list.
210 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
212 struct list_head dispose;
213 struct lu_object_header *h;
214 struct lu_object_header *temp;
216 INIT_LIST_HEAD(&dispose);
218 * Under LRU list lock, scan LRU list and move unreferenced objects to
219 * the dispose list, removing them from LRU and hash table.
221 write_lock(&s->ls_guard);
222 list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
224 * Objects are sorted in lru order, and "busy" objects (ones
225 * with h->loh_ref > 0) naturally tend to live near hot end
226 * that we scan last. Unfortunately, sites usually have small
227 * (less then ten) number of busy yet rarely accessed objects
228 * (some global objects, accessed directly through pointers,
229 * bypassing hash table). Currently algorithm scans them over
230 * and over again. Probably we should move busy objects out of
231 * LRU, or we can live with that.
235 if (atomic_read(&h->loh_ref) > 0)
237 hlist_del_init(&h->loh_hash);
238 list_move(&h->loh_lru, &dispose);
241 write_unlock(&s->ls_guard);
243 * Free everything on the dispose list. This is safe against races due
244 * to the reasons described in lu_object_put().
246 while (!list_empty(&dispose)) {
247 h = container_of0(dispose.next,
248 struct lu_object_header, loh_lru);
249 list_del_init(&h->loh_lru);
250 lu_object_free(env, lu_object_top(h));
251 s->ls_stats.s_lru_purged ++;
255 EXPORT_SYMBOL(lu_site_purge);
260 * Code below has to jump through certain loops to output object description
261 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
262 * composes object description from strings that are parts of _lines_ of
263 * output (i.e., strings that are not terminated by newline). This doesn't fit
264 * very well into libcfs_debug_msg() interface that assumes that each message
265 * supplied to it is a self-contained output line.
267 * To work around this, strings are collected in a temporary buffer
268 * (implemented as a value of lu_cdebug_key key), until terminating newline
269 * character is detected.
277 * XXX overflow is not handled correctly.
282 struct lu_cdebug_data {
286 char lck_area[LU_CDEBUG_LINE];
288 * fid staging area used by dt_store_open().
290 struct lu_fid_pack lck_pack;
293 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
294 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
297 * Key, holding temporary buffer. This key is registered very early by
300 struct lu_context_key lu_global_key = {
301 .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
302 .lct_init = lu_global_key_init,
303 .lct_fini = lu_global_key_fini
307 * Printer function emitting messages through libcfs_debug_msg().
309 int lu_cdebug_printer(const struct lu_env *env,
310 void *cookie, const char *format, ...)
312 struct lu_cdebug_print_info *info = cookie;
313 struct lu_cdebug_data *key;
318 va_start(args, format);
320 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
321 LASSERT(key != NULL);
323 used = strlen(key->lck_area);
324 complete = format[strlen(format) - 1] == '\n';
326 * Append new chunk to the buffer.
328 vsnprintf(key->lck_area + used,
329 ARRAY_SIZE(key->lck_area) - used, format, args);
331 libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
332 (char *)info->lpi_file, info->lpi_fn,
333 info->lpi_line, "%s", key->lck_area);
334 key->lck_area[0] = 0;
339 EXPORT_SYMBOL(lu_cdebug_printer);
342 * Print object header.
344 static void lu_object_header_print(const struct lu_env *env,
345 void *cookie, lu_printer_t printer,
346 const struct lu_object_header *hdr)
348 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
349 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
351 hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
352 list_empty(&hdr->loh_lru) ? "" : " lru",
353 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
357 * Print human readable representation of the @o to the @printer.
359 void lu_object_print(const struct lu_env *env, void *cookie,
360 lu_printer_t printer, const struct lu_object *o)
362 static const char ruler[] = "........................................";
363 struct lu_object_header *top;
367 lu_object_header_print(env, cookie, printer, top);
368 (*printer)(env, cookie, "\n");
369 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
370 depth = o->lo_depth + 4;
371 LASSERT(o->lo_ops->loo_object_print != NULL);
373 * print `.' @depth times.
375 (*printer)(env, cookie, "%*.*s", depth, depth, ruler);
376 o->lo_ops->loo_object_print(env, cookie, printer, o);
377 (*printer)(env, cookie, "\n");
380 EXPORT_SYMBOL(lu_object_print);
383 * Check object consistency.
385 int lu_object_invariant(const struct lu_object *o)
387 struct lu_object_header *top;
390 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
391 if (o->lo_ops->loo_object_invariant != NULL &&
392 !o->lo_ops->loo_object_invariant(o))
397 EXPORT_SYMBOL(lu_object_invariant);
399 static struct lu_object *htable_lookup(struct lu_site *s,
400 const struct hlist_head *bucket,
401 const struct lu_fid *f)
403 struct lu_object_header *h;
404 struct hlist_node *scan;
406 hlist_for_each_entry(h, scan, bucket, loh_hash) {
407 s->ls_stats.s_cache_check ++;
408 if (likely(lu_fid_eq(&h->loh_fid, f) &&
409 !lu_object_is_dying(h))) {
410 /* bump reference count... */
411 if (atomic_add_return(1, &h->loh_ref) == 1)
413 /* and move to the head of the LRU */
415 * XXX temporary disable this to measure effects of
416 * read-write locking.
418 /* list_move_tail(&h->loh_lru, &s->ls_lru); */
419 s->ls_stats.s_cache_hit ++;
420 return lu_object_top(h);
423 s->ls_stats.s_cache_miss ++;
427 static __u32 fid_hash(const struct lu_fid *f, int bits)
429 /* all objects with same id and different versions will belong to same
430 * collisions list. */
431 return hash_long(fid_flatten(f), bits);
435 * Search cache for an object with the fid @f. If such object is found, return
436 * it. Otherwise, create new object, insert it into cache and return it. In
437 * any case, additional reference is acquired on the returned object.
439 struct lu_object *lu_object_find(const struct lu_env *env,
440 struct lu_site *s, const struct lu_fid *f)
443 struct lu_object *shadow;
444 struct hlist_head *bucket;
447 * This uses standard index maintenance protocol:
449 * - search index under lock, and return object if found;
450 * - otherwise, unlock index, allocate new object;
451 * - lock index and search again;
452 * - if nothing is found (usual case), insert newly created
454 * - otherwise (race: other thread inserted object), free
455 * object just allocated.
460 bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
462 read_lock(&s->ls_guard);
463 o = htable_lookup(s, bucket, f);
464 read_unlock(&s->ls_guard);
470 * Allocate new object. This may result in rather complicated
471 * operations, including fld queries, inode loading, etc.
473 o = lu_object_alloc(env, s, f);
474 if (unlikely(IS_ERR(o)))
477 LASSERT(lu_fid_eq(lu_object_fid(o), f));
479 write_lock(&s->ls_guard);
480 shadow = htable_lookup(s, bucket, f);
481 if (likely(shadow == NULL)) {
482 hlist_add_head(&o->lo_header->loh_hash, bucket);
483 list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
489 s->ls_stats.s_cache_race ++;
490 write_unlock(&s->ls_guard);
492 lu_object_free(env, o);
495 EXPORT_SYMBOL(lu_object_find);
498 * Global list of all sites on this node
500 static LIST_HEAD(lu_sites);
501 static DECLARE_MUTEX(lu_sites_guard);
504 * Global environment used by site shrinker.
506 static struct lu_env lu_shrink_env;
509 * Print all objects in @s.
511 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
512 lu_printer_t printer)
516 for (i = 0; i < s->ls_hash_size; ++i) {
517 struct lu_object_header *h;
518 struct hlist_node *scan;
520 read_lock(&s->ls_guard);
521 hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
523 if (!list_empty(&h->loh_layers)) {
524 const struct lu_object *obj;
526 obj = lu_object_top(h);
527 lu_object_print(env, cookie, printer, obj);
529 lu_object_header_print(env, cookie, printer, h);
531 read_unlock(&s->ls_guard);
534 EXPORT_SYMBOL(lu_site_print);
537 LU_CACHE_PERCENT = 30,
541 * Return desired hash table order.
543 static int lu_htable_order(void)
546 unsigned long cache_size;
549 * Calculate hash table size, assuming that we want reasonable
550 * performance when 30% of available memory is occupied by cache of
553 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
555 cache_size = nr_free_buffer_pages() / 100 *
556 LU_CACHE_PERCENT * (CFS_PAGE_SIZE / 1024);
558 for (bits = 1; (1 << bits) < cache_size; ++bits) {
565 * Initialize site @s, with @d as the top level device.
567 int lu_site_init(struct lu_site *s, struct lu_device *top)
574 memset(s, 0, sizeof *s);
575 rwlock_init(&s->ls_guard);
576 CFS_INIT_LIST_HEAD(&s->ls_lru);
577 CFS_INIT_LIST_HEAD(&s->ls_linkage);
582 for (bits = lu_htable_order(), size = 1 << bits;
584 cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL;
585 --bits, size >>= 1) {
587 * Scale hash table down, until allocation succeeds.
592 s->ls_hash_size = size;
593 s->ls_hash_bits = bits;
594 s->ls_hash_mask = size - 1;
596 for (i = 0; i < size; i++)
597 INIT_HLIST_HEAD(&s->ls_hash[i]);
601 EXPORT_SYMBOL(lu_site_init);
604 * Finalize @s and release its resources.
606 void lu_site_fini(struct lu_site *s)
608 LASSERT(list_empty(&s->ls_lru));
609 LASSERT(s->ls_total == 0);
611 down(&lu_sites_guard);
612 list_del_init(&s->ls_linkage);
615 if (s->ls_hash != NULL) {
617 for (i = 0; i < s->ls_hash_size; i++)
618 LASSERT(hlist_empty(&s->ls_hash[i]));
619 cfs_free_large(s->ls_hash);
622 if (s->ls_top_dev != NULL) {
623 s->ls_top_dev->ld_site = NULL;
624 lu_device_put(s->ls_top_dev);
625 s->ls_top_dev = NULL;
628 EXPORT_SYMBOL(lu_site_fini);
631 * Called when initialization of stack for this site is completed.
633 int lu_site_init_finish(struct lu_site *s)
636 down(&lu_sites_guard);
637 result = lu_context_refill(&lu_shrink_env.le_ctx);
639 list_add(&s->ls_linkage, &lu_sites);
643 EXPORT_SYMBOL(lu_site_init_finish);
646 * Acquire additional reference on device @d
648 void lu_device_get(struct lu_device *d)
650 atomic_inc(&d->ld_ref);
652 EXPORT_SYMBOL(lu_device_get);
655 * Release reference on device @d.
657 void lu_device_put(struct lu_device *d)
659 atomic_dec(&d->ld_ref);
661 EXPORT_SYMBOL(lu_device_put);
664 * Initialize device @d of type @t.
666 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
668 memset(d, 0, sizeof *d);
669 atomic_set(&d->ld_ref, 0);
673 EXPORT_SYMBOL(lu_device_init);
676 * Finalize device @d.
678 void lu_device_fini(struct lu_device *d)
680 if (d->ld_obd != NULL)
682 lprocfs_obd_cleanup(d->ld_obd);
684 LASSERTF(atomic_read(&d->ld_ref) == 0,
685 "Refcount is %u\n", atomic_read(&d->ld_ref));
687 EXPORT_SYMBOL(lu_device_fini);
690 * Initialize object @o that is part of compound object @h and was created by
693 int lu_object_init(struct lu_object *o,
694 struct lu_object_header *h, struct lu_device *d)
696 memset(o, 0, sizeof *o);
700 CFS_INIT_LIST_HEAD(&o->lo_linkage);
703 EXPORT_SYMBOL(lu_object_init);
706 * Finalize object and release its resources.
708 void lu_object_fini(struct lu_object *o)
710 LASSERT(list_empty(&o->lo_linkage));
712 if (o->lo_dev != NULL) {
713 lu_device_put(o->lo_dev);
717 EXPORT_SYMBOL(lu_object_fini);
720 * Add object @o as first layer of compound object @h
722 * This is typically called by the ->ldo_object_alloc() method of top-level
725 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
727 list_move(&o->lo_linkage, &h->loh_layers);
729 EXPORT_SYMBOL(lu_object_add_top);
732 * Add object @o as a layer of compound object, going after @before.1
734 * This is typically called by the ->ldo_object_alloc() method of
737 void lu_object_add(struct lu_object *before, struct lu_object *o)
739 list_move(&o->lo_linkage, &before->lo_linkage);
741 EXPORT_SYMBOL(lu_object_add);
744 * Initialize compound object.
746 int lu_object_header_init(struct lu_object_header *h)
748 memset(h, 0, sizeof *h);
749 atomic_set(&h->loh_ref, 1);
750 INIT_HLIST_NODE(&h->loh_hash);
751 CFS_INIT_LIST_HEAD(&h->loh_lru);
752 CFS_INIT_LIST_HEAD(&h->loh_layers);
755 EXPORT_SYMBOL(lu_object_header_init);
758 * Finalize compound object.
760 void lu_object_header_fini(struct lu_object_header *h)
762 LASSERT(list_empty(&h->loh_layers));
763 LASSERT(list_empty(&h->loh_lru));
764 LASSERT(hlist_unhashed(&h->loh_hash));
766 EXPORT_SYMBOL(lu_object_header_fini);
769 * Given a compound object, find its slice, corresponding to the device type
772 struct lu_object *lu_object_locate(struct lu_object_header *h,
773 struct lu_device_type *dtype)
777 list_for_each_entry(o, &h->loh_layers, lo_linkage) {
778 if (o->lo_dev->ld_type == dtype)
783 EXPORT_SYMBOL(lu_object_locate);
787 * Maximal number of tld slots.
789 LU_CONTEXT_KEY_NR = 16
792 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
794 static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
799 int lu_context_key_register(struct lu_context_key *key)
804 LASSERT(key->lct_init != NULL);
805 LASSERT(key->lct_fini != NULL);
806 LASSERT(key->lct_tags != 0);
807 LASSERT(key->lct_owner != NULL);
810 spin_lock(&lu_keys_guard);
811 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
812 if (lu_keys[i] == NULL) {
814 atomic_set(&key->lct_used, 1);
820 spin_unlock(&lu_keys_guard);
823 EXPORT_SYMBOL(lu_context_key_register);
825 static void key_fini(struct lu_context *ctx, int index)
827 if (ctx->lc_value[index] != NULL) {
828 struct lu_context_key *key;
830 key = lu_keys[index];
831 LASSERT(key != NULL);
832 LASSERT(key->lct_fini != NULL);
833 LASSERT(atomic_read(&key->lct_used) > 1);
835 key->lct_fini(ctx, key, ctx->lc_value[index]);
836 atomic_dec(&key->lct_used);
837 LASSERT(key->lct_owner != NULL);
838 if (!(ctx->lc_tags & LCT_NOREF)) {
839 LASSERT(module_refcount(key->lct_owner) > 0);
840 module_put(key->lct_owner);
842 ctx->lc_value[index] = NULL;
849 void lu_context_key_degister(struct lu_context_key *key)
851 LASSERT(atomic_read(&key->lct_used) >= 1);
852 LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
854 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
856 if (atomic_read(&key->lct_used) > 1)
857 CERROR("key has instances.\n");
858 spin_lock(&lu_keys_guard);
859 lu_keys[key->lct_index] = NULL;
860 spin_unlock(&lu_keys_guard);
862 EXPORT_SYMBOL(lu_context_key_degister);
865 * Return value associated with key @key in context @ctx.
867 void *lu_context_key_get(const struct lu_context *ctx,
868 struct lu_context_key *key)
870 LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
871 return ctx->lc_value[key->lct_index];
873 EXPORT_SYMBOL(lu_context_key_get);
875 static void keys_fini(struct lu_context *ctx)
879 if (ctx->lc_value != NULL) {
880 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
882 OBD_FREE(ctx->lc_value,
883 ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
884 ctx->lc_value = NULL;
888 static int keys_fill(const struct lu_context *ctx)
892 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
893 struct lu_context_key *key;
896 if (ctx->lc_value[i] == NULL &&
897 key != NULL && key->lct_tags & ctx->lc_tags) {
900 LASSERT(key->lct_init != NULL);
901 LASSERT(key->lct_index == i);
903 value = key->lct_init(ctx, key);
904 if (unlikely(IS_ERR(value)))
905 return PTR_ERR(value);
906 LASSERT(key->lct_owner != NULL);
907 if (!(ctx->lc_tags & LCT_NOREF))
908 try_module_get(key->lct_owner);
909 atomic_inc(&key->lct_used);
910 ctx->lc_value[i] = value;
916 static int keys_init(struct lu_context *ctx)
920 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
921 if (likely(ctx->lc_value != NULL))
922 result = keys_fill(ctx);
932 * Initialize context data-structure. Create values for all keys.
934 int lu_context_init(struct lu_context *ctx, __u32 tags)
936 memset(ctx, 0, sizeof *ctx);
938 return keys_init(ctx);
940 EXPORT_SYMBOL(lu_context_init);
943 * Finalize context data-structure. Destroy key values.
945 void lu_context_fini(struct lu_context *ctx)
949 EXPORT_SYMBOL(lu_context_fini);
952 * Called before entering context.
954 void lu_context_enter(struct lu_context *ctx)
957 EXPORT_SYMBOL(lu_context_enter);
960 * Called after exiting from @ctx
962 void lu_context_exit(struct lu_context *ctx)
966 if (ctx->lc_value != NULL) {
967 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
968 if (ctx->lc_value[i] != NULL) {
969 struct lu_context_key *key;
972 LASSERT(key != NULL);
973 if (key->lct_exit != NULL)
975 key, ctx->lc_value[i]);
980 EXPORT_SYMBOL(lu_context_exit);
983 * Allocate for context all missing keys that were registered after context
986 int lu_context_refill(const struct lu_context *ctx)
988 LASSERT(ctx->lc_value != NULL);
989 return keys_fill(ctx);
991 EXPORT_SYMBOL(lu_context_refill);
993 static int lu_env_setup(struct lu_env *env, struct lu_context *ses,
994 __u32 tags, int noref)
998 LASSERT(ergo(!noref, !(tags & LCT_NOREF)));
1001 result = lu_context_init(&env->le_ctx, tags);
1002 if (likely(result == 0))
1003 lu_context_enter(&env->le_ctx);
1007 static int lu_env_init_noref(struct lu_env *env, struct lu_context *ses,
1010 return lu_env_setup(env, ses, tags, 1);
1013 int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags)
1015 return lu_env_setup(env, ses, tags, 0);
1017 EXPORT_SYMBOL(lu_env_init);
1019 void lu_env_fini(struct lu_env *env)
1021 lu_context_exit(&env->le_ctx);
1022 lu_context_fini(&env->le_ctx);
1025 EXPORT_SYMBOL(lu_env_fini);
1027 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1030 struct lu_site *tmp;
1035 if (nr != 0 && !(gfp_mask & __GFP_FS))
1038 down(&lu_sites_guard);
1039 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1041 remain = lu_site_purge(&lu_shrink_env, s, remain);
1043 * Move just shrunk site to the tail of site list to
1044 * assure shrinking fairness.
1046 list_move_tail(&s->ls_linkage, &splice);
1048 read_lock(&s->ls_guard);
1049 cached += s->ls_total - s->ls_busy;
1050 read_unlock(&s->ls_guard);
1054 list_splice(&splice, lu_sites.prev);
1055 up(&lu_sites_guard);
1059 static struct shrinker *lu_site_shrinker = NULL;
1062 * Initialization of global lu_* data.
1064 int lu_global_init(void)
1068 LU_CONTEXT_KEY_INIT(&lu_global_key);
1069 result = lu_context_key_register(&lu_global_key);
1072 * At this level, we don't know what tags are needed, so
1073 * allocate them conservatively. This should not be too bad,
1074 * because this environment is global.
1076 down(&lu_sites_guard);
1077 result = lu_env_init_noref(&lu_shrink_env, NULL, LCT_SHRINKER);
1078 up(&lu_sites_guard);
1081 * seeks estimation: 3 seeks to read a record from oi,
1082 * one to read inode, one for ea. Unfortunately
1083 * setting this high value results in lu_object/inode
1084 * cache consuming all the memory.
1086 lu_site_shrinker = set_shrinker(DEFAULT_SEEKS,
1089 result = lu_time_global_init();
1096 * Dual to lu_global_init().
1098 void lu_global_fini(void)
1100 lu_time_global_fini();
1101 if (lu_site_shrinker != NULL) {
1102 remove_shrinker(lu_site_shrinker);
1103 lu_site_shrinker = NULL;
1106 lu_context_key_degister(&lu_global_key);
1109 * Tear shrinker environment down _after_ de-registering
1110 * lu_global_key, because the latter has a value in the former.
1112 down(&lu_sites_guard);
1113 lu_env_fini(&lu_shrink_env);
1114 up(&lu_sites_guard);
1117 struct lu_buf LU_BUF_NULL = {
1121 EXPORT_SYMBOL(LU_BUF_NULL);
1124 * XXX: Functions below logically belong to fid module, but they are used by
1125 * dt_store_open(). Put them here until better place is found.
1128 void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid,
1129 struct lu_fid *befider)
1139 * Two cases: compact 6 bytes representation for a common case, and
1140 * full 17 byte representation for "unusual" fid.
1144 * Check that usual case is really usual.
1146 CLASSERT(LUSTRE_SEQ_MAX_WIDTH < 0xffffull);
1148 if (fid_is_igif(fid) ||
1149 seq > 0xffffffull || oid > 0xffff || fid_ver(fid) != 0) {
1150 fid_cpu_to_be(befider, fid);
1151 recsize = sizeof *befider;
1153 unsigned char *small_befider;
1155 small_befider = (char *)befider;
1157 small_befider[0] = seq >> 16;
1158 small_befider[1] = seq >> 8;
1159 small_befider[2] = seq;
1161 small_befider[3] = oid >> 8;
1162 small_befider[4] = oid;
1166 memcpy(pack->fp_area, befider, recsize);
1167 pack->fp_len = recsize + 1;
1169 EXPORT_SYMBOL(fid_pack);
1171 int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid)
1176 switch (pack->fp_len) {
1177 case sizeof *fid + 1:
1178 memcpy(fid, pack->fp_area, sizeof *fid);
1179 fid_be_to_cpu(fid, fid);
1182 const unsigned char *area;
1184 area = pack->fp_area;
1185 fid->f_seq = (area[0] << 16) | (area[1] << 8) | area[2];
1186 fid->f_oid = (area[3] << 8) | area[4];
1191 CERROR("Unexpected packed fid size: %d\n", pack->fp_len);
1196 EXPORT_SYMBOL(fid_unpack);
1198 const char *lu_time_names[LU_TIME_NR] = {
1199 [LU_TIME_FIND_LOOKUP] = "find_lookup",
1200 [LU_TIME_FIND_ALLOC] = "find_alloc",
1201 [LU_TIME_FIND_INSERT] = "find_insert"
1203 EXPORT_SYMBOL(lu_time_names);