1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * Copyright (C) 2006 Cluster File Systems, Inc.
7 * Author: Nikita Danilov <nikita@clusterfs.com>
9 * This file is part of the Lustre file system, http://www.lustre.org
10 * Lustre is a trademark of Cluster File Systems, Inc.
12 * You may have signed or agreed to another license before downloading
13 * this software. If so, you are bound by the terms and conditions
14 * of that agreement, and the following does not apply to you. See the
15 * LICENSE file included with this distribution for more information.
17 * If you did not agree to a different license, then this copy of Lustre
18 * is open source software; you can redistribute it and/or modify it
19 * under the terms of version 2 of the GNU General Public License as
20 * published by the Free Software Foundation.
22 * In either case, Lustre is distributed in the hope that it will be
23 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
24 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * license text for more details.
27 * These are the only exported functions, they provide some generic
28 * infrastructure for managing object devices
31 #define DEBUG_SUBSYSTEM S_CLASS
33 # define EXPORT_SYMTAB
36 #include <linux/seq_file.h>
37 #include <linux/module.h>
39 #include <linux/swap.h>
41 #include <linux/hash.h>
42 #include <obd_support.h>
43 #include <lustre_disk.h>
44 #include <lustre_fid.h>
45 #include <lu_object.h>
46 #include <libcfs/list.h>
47 /* lu_time_global_{init,fini}() */
50 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
53 * Decrease reference counter on object. If last reference is freed, return
54 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
55 * case, free object immediately.
57 void lu_object_put(const struct lu_env *env, struct lu_object *o)
59 struct lu_object_header *top;
61 struct lu_object *orig;
65 site = o->lo_dev->ld_site;
68 write_lock(&site->ls_guard);
69 if (atomic_dec_and_test(&top->loh_ref)) {
71 * When last reference is released, iterate over object
72 * layers, and notify them that object is no longer busy.
74 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
75 if (o->lo_ops->loo_object_release != NULL)
76 o->lo_ops->loo_object_release(env, o);
79 if (lu_object_is_dying(top)) {
81 * If object is dying (will not be cached), removed it
82 * from hash table and LRU.
84 * This is done with hash table and LRU lists
85 * locked. As the only way to acquire first reference
86 * to previously unreferenced object is through
87 * hash-table lookup (lu_object_find()), or LRU
88 * scanning (lu_site_purge()), that are done under
89 * hash-table and LRU lock, no race with concurrent
90 * object lookup is possible and we can safely destroy
93 hlist_del_init(&top->loh_hash);
94 list_del_init(&top->loh_lru);
99 write_unlock(&site->ls_guard);
102 * Object was already removed from hash and lru above, can
105 lu_object_free(env, orig);
107 EXPORT_SYMBOL(lu_object_put);
110 * Allocate new object.
112 * This follows object creation protocol, described in the comment within
113 * struct lu_device_operations definition.
115 static struct lu_object *lu_object_alloc(const struct lu_env *env,
117 const struct lu_fid *f)
119 struct lu_object *scan;
120 struct lu_object *top;
121 struct list_head *layers;
127 * Create top-level object slice. This will also create
130 top = s->ls_top_dev->ld_ops->ldo_object_alloc(env,
131 NULL, s->ls_top_dev);
133 RETURN(ERR_PTR(-ENOMEM));
135 * This is the only place where object fid is assigned. It's constant
138 LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
139 top->lo_header->loh_fid = *f;
140 layers = &top->lo_header->loh_layers;
143 * Call ->loo_object_init() repeatedly, until no more new
144 * object slices are created.
147 list_for_each_entry(scan, layers, lo_linkage) {
148 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
151 scan->lo_header = top->lo_header;
152 result = scan->lo_ops->loo_object_init(env, scan);
154 lu_object_free(env, top);
155 RETURN(ERR_PTR(result));
157 scan->lo_flags |= LU_OBJECT_ALLOCATED;
161 list_for_each_entry_reverse(scan, layers, lo_linkage) {
162 if (scan->lo_ops->loo_object_start != NULL) {
163 result = scan->lo_ops->loo_object_start(env, scan);
165 lu_object_free(env, top);
166 RETURN(ERR_PTR(result));
171 s->ls_stats.s_created ++;
178 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
180 struct list_head splice;
181 struct lu_object *scan;
184 * First call ->loo_object_delete() method to release all resources.
186 list_for_each_entry_reverse(scan,
187 &o->lo_header->loh_layers, lo_linkage) {
188 if (scan->lo_ops->loo_object_delete != NULL)
189 scan->lo_ops->loo_object_delete(env, scan);
193 * Then, splice object layers into stand-alone list, and call
194 * ->loo_object_free() on all layers to free memory. Splice is
195 * necessary, because lu_object_header is freed together with the
198 CFS_INIT_LIST_HEAD(&splice);
199 list_splice_init(&o->lo_header->loh_layers, &splice);
200 while (!list_empty(&splice)) {
201 o = container_of0(splice.next, struct lu_object, lo_linkage);
202 list_del_init(&o->lo_linkage);
203 LASSERT(o->lo_ops->loo_object_free != NULL);
204 o->lo_ops->loo_object_free(env, o);
209 * Free @nr objects from the cold end of the site LRU list.
211 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
213 struct list_head dispose;
214 struct lu_object_header *h;
215 struct lu_object_header *temp;
217 CFS_INIT_LIST_HEAD(&dispose);
219 * Under LRU list lock, scan LRU list and move unreferenced objects to
220 * the dispose list, removing them from LRU and hash table.
222 write_lock(&s->ls_guard);
223 list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
225 * Objects are sorted in lru order, and "busy" objects (ones
226 * with h->loh_ref > 0) naturally tend to live near hot end
227 * that we scan last. Unfortunately, sites usually have small
228 * (less then ten) number of busy yet rarely accessed objects
229 * (some global objects, accessed directly through pointers,
230 * bypassing hash table). Currently algorithm scans them over
231 * and over again. Probably we should move busy objects out of
232 * LRU, or we can live with that.
236 if (atomic_read(&h->loh_ref) > 0)
238 hlist_del_init(&h->loh_hash);
239 list_move(&h->loh_lru, &dispose);
242 write_unlock(&s->ls_guard);
244 * Free everything on the dispose list. This is safe against races due
245 * to the reasons described in lu_object_put().
247 while (!list_empty(&dispose)) {
248 h = container_of0(dispose.next,
249 struct lu_object_header, loh_lru);
250 list_del_init(&h->loh_lru);
251 lu_object_free(env, lu_object_top(h));
252 s->ls_stats.s_lru_purged ++;
256 EXPORT_SYMBOL(lu_site_purge);
261 * Code below has to jump through certain loops to output object description
262 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
263 * composes object description from strings that are parts of _lines_ of
264 * output (i.e., strings that are not terminated by newline). This doesn't fit
265 * very well into libcfs_debug_msg() interface that assumes that each message
266 * supplied to it is a self-contained output line.
268 * To work around this, strings are collected in a temporary buffer
269 * (implemented as a value of lu_cdebug_key key), until terminating newline
270 * character is detected.
278 * XXX overflow is not handled correctly.
283 struct lu_cdebug_data {
287 char lck_area[LU_CDEBUG_LINE];
289 * fid staging area used by dt_store_open().
291 struct lu_fid_pack lck_pack;
294 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
295 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
298 * Key, holding temporary buffer. This key is registered very early by
301 struct lu_context_key lu_global_key = {
302 .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
303 .lct_init = lu_global_key_init,
304 .lct_fini = lu_global_key_fini
308 * Printer function emitting messages through libcfs_debug_msg().
310 int lu_cdebug_printer(const struct lu_env *env,
311 void *cookie, const char *format, ...)
313 struct lu_cdebug_print_info *info = cookie;
314 struct lu_cdebug_data *key;
319 va_start(args, format);
321 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
322 LASSERT(key != NULL);
324 used = strlen(key->lck_area);
325 complete = format[strlen(format) - 1] == '\n';
327 * Append new chunk to the buffer.
329 vsnprintf(key->lck_area + used,
330 ARRAY_SIZE(key->lck_area) - used, format, args);
332 libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
333 (char *)info->lpi_file, info->lpi_fn,
334 info->lpi_line, "%s", key->lck_area);
335 key->lck_area[0] = 0;
340 EXPORT_SYMBOL(lu_cdebug_printer);
343 * Print object header.
345 static void lu_object_header_print(const struct lu_env *env,
346 void *cookie, lu_printer_t printer,
347 const struct lu_object_header *hdr)
349 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
350 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
352 hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
353 list_empty(&hdr->loh_lru) ? "" : " lru",
354 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
358 * Print human readable representation of the @o to the @printer.
360 void lu_object_print(const struct lu_env *env, void *cookie,
361 lu_printer_t printer, const struct lu_object *o)
363 static const char ruler[] = "........................................";
364 struct lu_object_header *top;
368 lu_object_header_print(env, cookie, printer, top);
369 (*printer)(env, cookie, "\n");
370 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
371 depth = o->lo_depth + 4;
372 LASSERT(o->lo_ops->loo_object_print != NULL);
374 * print `.' @depth times.
376 (*printer)(env, cookie, "%*.*s", depth, depth, ruler);
377 o->lo_ops->loo_object_print(env, cookie, printer, o);
378 (*printer)(env, cookie, "\n");
381 EXPORT_SYMBOL(lu_object_print);
384 * Check object consistency.
386 int lu_object_invariant(const struct lu_object *o)
388 struct lu_object_header *top;
391 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
392 if (o->lo_ops->loo_object_invariant != NULL &&
393 !o->lo_ops->loo_object_invariant(o))
398 EXPORT_SYMBOL(lu_object_invariant);
400 static struct lu_object *htable_lookup(struct lu_site *s,
401 const struct hlist_head *bucket,
402 const struct lu_fid *f)
404 struct lu_object_header *h;
405 struct hlist_node *scan;
407 hlist_for_each_entry(h, scan, bucket, loh_hash) {
408 s->ls_stats.s_cache_check ++;
409 if (likely(lu_fid_eq(&h->loh_fid, f) &&
410 !lu_object_is_dying(h))) {
411 /* bump reference count... */
412 if (atomic_add_return(1, &h->loh_ref) == 1)
414 /* and move to the head of the LRU */
416 * XXX temporary disable this to measure effects of
417 * read-write locking.
419 /* list_move_tail(&h->loh_lru, &s->ls_lru); */
420 s->ls_stats.s_cache_hit ++;
421 return lu_object_top(h);
424 s->ls_stats.s_cache_miss ++;
428 static __u32 fid_hash(const struct lu_fid *f, int bits)
430 /* all objects with same id and different versions will belong to same
431 * collisions list. */
432 return hash_long(fid_flatten(f), bits);
436 * Search cache for an object with the fid @f. If such object is found, return
437 * it. Otherwise, create new object, insert it into cache and return it. In
438 * any case, additional reference is acquired on the returned object.
440 struct lu_object *lu_object_find(const struct lu_env *env,
441 struct lu_site *s, const struct lu_fid *f)
444 struct lu_object *shadow;
445 struct hlist_head *bucket;
448 * This uses standard index maintenance protocol:
450 * - search index under lock, and return object if found;
451 * - otherwise, unlock index, allocate new object;
452 * - lock index and search again;
453 * - if nothing is found (usual case), insert newly created
455 * - otherwise (race: other thread inserted object), free
456 * object just allocated.
461 bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
463 read_lock(&s->ls_guard);
464 o = htable_lookup(s, bucket, f);
465 read_unlock(&s->ls_guard);
471 * Allocate new object. This may result in rather complicated
472 * operations, including fld queries, inode loading, etc.
474 o = lu_object_alloc(env, s, f);
475 if (unlikely(IS_ERR(o)))
478 LASSERT(lu_fid_eq(lu_object_fid(o), f));
480 write_lock(&s->ls_guard);
481 shadow = htable_lookup(s, bucket, f);
482 if (likely(shadow == NULL)) {
483 hlist_add_head(&o->lo_header->loh_hash, bucket);
484 list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
490 s->ls_stats.s_cache_race ++;
491 write_unlock(&s->ls_guard);
493 lu_object_free(env, o);
496 EXPORT_SYMBOL(lu_object_find);
499 * Global list of all sites on this node
501 static CFS_LIST_HEAD(lu_sites);
502 static DECLARE_MUTEX(lu_sites_guard);
505 * Global environment used by site shrinker.
507 static struct lu_env lu_shrink_env;
510 * Print all objects in @s.
512 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
513 lu_printer_t printer)
517 for (i = 0; i < s->ls_hash_size; ++i) {
518 struct lu_object_header *h;
519 struct hlist_node *scan;
521 read_lock(&s->ls_guard);
522 hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
524 if (!list_empty(&h->loh_layers)) {
525 const struct lu_object *obj;
527 obj = lu_object_top(h);
528 lu_object_print(env, cookie, printer, obj);
530 lu_object_header_print(env, cookie, printer, h);
532 read_unlock(&s->ls_guard);
535 EXPORT_SYMBOL(lu_site_print);
538 LU_CACHE_PERCENT = 30,
542 * Return desired hash table order.
544 static int lu_htable_order(void)
547 unsigned long cache_size;
550 * Calculate hash table size, assuming that we want reasonable
551 * performance when 30% of available memory is occupied by cache of
554 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
556 cache_size = ll_nr_free_buffer_pages() / 100 *
557 LU_CACHE_PERCENT * (CFS_PAGE_SIZE / 1024);
559 for (bits = 1; (1 << bits) < cache_size; ++bits) {
566 * Initialize site @s, with @d as the top level device.
568 int lu_site_init(struct lu_site *s, struct lu_device *top)
575 memset(s, 0, sizeof *s);
576 rwlock_init(&s->ls_guard);
577 CFS_INIT_LIST_HEAD(&s->ls_lru);
578 CFS_INIT_LIST_HEAD(&s->ls_linkage);
583 for (bits = lu_htable_order(), size = 1 << bits;
585 cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL;
586 --bits, size >>= 1) {
588 * Scale hash table down, until allocation succeeds.
593 s->ls_hash_size = size;
594 s->ls_hash_bits = bits;
595 s->ls_hash_mask = size - 1;
597 for (i = 0; i < size; i++)
598 INIT_HLIST_HEAD(&s->ls_hash[i]);
602 EXPORT_SYMBOL(lu_site_init);
605 * Finalize @s and release its resources.
607 void lu_site_fini(struct lu_site *s)
609 LASSERT(list_empty(&s->ls_lru));
610 LASSERT(s->ls_total == 0);
612 down(&lu_sites_guard);
613 list_del_init(&s->ls_linkage);
616 if (s->ls_hash != NULL) {
618 for (i = 0; i < s->ls_hash_size; i++)
619 LASSERT(hlist_empty(&s->ls_hash[i]));
620 cfs_free_large(s->ls_hash);
623 if (s->ls_top_dev != NULL) {
624 s->ls_top_dev->ld_site = NULL;
625 lu_device_put(s->ls_top_dev);
626 s->ls_top_dev = NULL;
629 EXPORT_SYMBOL(lu_site_fini);
632 * Called when initialization of stack for this site is completed.
634 int lu_site_init_finish(struct lu_site *s)
637 down(&lu_sites_guard);
638 result = lu_context_refill(&lu_shrink_env.le_ctx);
640 list_add(&s->ls_linkage, &lu_sites);
644 EXPORT_SYMBOL(lu_site_init_finish);
647 * Acquire additional reference on device @d
649 void lu_device_get(struct lu_device *d)
651 atomic_inc(&d->ld_ref);
653 EXPORT_SYMBOL(lu_device_get);
656 * Release reference on device @d.
658 void lu_device_put(struct lu_device *d)
660 atomic_dec(&d->ld_ref);
662 EXPORT_SYMBOL(lu_device_put);
665 * Initialize device @d of type @t.
667 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
669 memset(d, 0, sizeof *d);
670 atomic_set(&d->ld_ref, 0);
674 EXPORT_SYMBOL(lu_device_init);
677 * Finalize device @d.
679 void lu_device_fini(struct lu_device *d)
681 if (d->ld_obd != NULL)
683 lprocfs_obd_cleanup(d->ld_obd);
685 LASSERTF(atomic_read(&d->ld_ref) == 0,
686 "Refcount is %u\n", atomic_read(&d->ld_ref));
688 EXPORT_SYMBOL(lu_device_fini);
691 * Initialize object @o that is part of compound object @h and was created by
694 int lu_object_init(struct lu_object *o,
695 struct lu_object_header *h, struct lu_device *d)
697 memset(o, 0, sizeof *o);
701 CFS_INIT_LIST_HEAD(&o->lo_linkage);
704 EXPORT_SYMBOL(lu_object_init);
707 * Finalize object and release its resources.
709 void lu_object_fini(struct lu_object *o)
711 LASSERT(list_empty(&o->lo_linkage));
713 if (o->lo_dev != NULL) {
714 lu_device_put(o->lo_dev);
718 EXPORT_SYMBOL(lu_object_fini);
721 * Add object @o as first layer of compound object @h
723 * This is typically called by the ->ldo_object_alloc() method of top-level
726 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
728 list_move(&o->lo_linkage, &h->loh_layers);
730 EXPORT_SYMBOL(lu_object_add_top);
733 * Add object @o as a layer of compound object, going after @before.1
735 * This is typically called by the ->ldo_object_alloc() method of
738 void lu_object_add(struct lu_object *before, struct lu_object *o)
740 list_move(&o->lo_linkage, &before->lo_linkage);
742 EXPORT_SYMBOL(lu_object_add);
745 * Initialize compound object.
747 int lu_object_header_init(struct lu_object_header *h)
749 memset(h, 0, sizeof *h);
750 atomic_set(&h->loh_ref, 1);
751 INIT_HLIST_NODE(&h->loh_hash);
752 CFS_INIT_LIST_HEAD(&h->loh_lru);
753 CFS_INIT_LIST_HEAD(&h->loh_layers);
756 EXPORT_SYMBOL(lu_object_header_init);
759 * Finalize compound object.
761 void lu_object_header_fini(struct lu_object_header *h)
763 LASSERT(list_empty(&h->loh_layers));
764 LASSERT(list_empty(&h->loh_lru));
765 LASSERT(hlist_unhashed(&h->loh_hash));
767 EXPORT_SYMBOL(lu_object_header_fini);
770 * Given a compound object, find its slice, corresponding to the device type
773 struct lu_object *lu_object_locate(struct lu_object_header *h,
774 struct lu_device_type *dtype)
778 list_for_each_entry(o, &h->loh_layers, lo_linkage) {
779 if (o->lo_dev->ld_type == dtype)
784 EXPORT_SYMBOL(lu_object_locate);
789 * Finalize and free devices in the device stack.
791 * Finalize device stack by purging object cache, and calling
792 * lu_device_type_operations::ldto_device_fini() and
793 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
795 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
797 struct lu_site *site = top->ld_site;
798 struct lu_device *scan;
799 struct lu_device *next;
801 lu_site_purge(env, site, ~0);
802 for (scan = top; scan != NULL; scan = next) {
803 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
808 lu_site_purge(env, site, ~0);
810 if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
812 * Uh-oh, objects still exist.
814 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
816 lu_site_print(env, site, &cookie, lu_cdebug_printer);
819 for (scan = top; scan != NULL; scan = next) {
820 const struct lu_device_type *ldt = scan->ld_type;
821 struct obd_type *type;
823 next = ldt->ldt_ops->ldto_device_free(env, scan);
824 type = ldt->ldt_obd_type;
826 class_put_type(type);
829 EXPORT_SYMBOL(lu_stack_fini);
833 * Maximal number of tld slots.
835 LU_CONTEXT_KEY_NR = 16
838 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
840 static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
845 int lu_context_key_register(struct lu_context_key *key)
850 LASSERT(key->lct_init != NULL);
851 LASSERT(key->lct_fini != NULL);
852 LASSERT(key->lct_tags != 0);
853 LASSERT(key->lct_owner != NULL);
856 spin_lock(&lu_keys_guard);
857 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
858 if (lu_keys[i] == NULL) {
860 atomic_set(&key->lct_used, 1);
866 spin_unlock(&lu_keys_guard);
869 EXPORT_SYMBOL(lu_context_key_register);
871 static void key_fini(struct lu_context *ctx, int index)
873 if (ctx->lc_value[index] != NULL) {
874 struct lu_context_key *key;
876 key = lu_keys[index];
877 LASSERT(key != NULL);
878 LASSERT(key->lct_fini != NULL);
879 LASSERT(atomic_read(&key->lct_used) > 1);
881 key->lct_fini(ctx, key, ctx->lc_value[index]);
882 atomic_dec(&key->lct_used);
883 LASSERT(key->lct_owner != NULL);
884 if (!(ctx->lc_tags & LCT_NOREF)) {
885 LASSERT(module_refcount(key->lct_owner) > 0);
886 module_put(key->lct_owner);
888 ctx->lc_value[index] = NULL;
895 void lu_context_key_degister(struct lu_context_key *key)
897 LASSERT(atomic_read(&key->lct_used) >= 1);
898 LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
900 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
902 if (atomic_read(&key->lct_used) > 1)
903 CERROR("key has instances.\n");
904 spin_lock(&lu_keys_guard);
905 lu_keys[key->lct_index] = NULL;
906 spin_unlock(&lu_keys_guard);
908 EXPORT_SYMBOL(lu_context_key_degister);
911 * Return value associated with key @key in context @ctx.
913 void *lu_context_key_get(const struct lu_context *ctx,
914 struct lu_context_key *key)
916 LASSERT(ctx->lc_state == LCS_ENTERED);
917 LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
918 return ctx->lc_value[key->lct_index];
920 EXPORT_SYMBOL(lu_context_key_get);
922 static void keys_fini(struct lu_context *ctx)
926 if (ctx->lc_value != NULL) {
927 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
929 OBD_FREE(ctx->lc_value,
930 ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
931 ctx->lc_value = NULL;
935 static int keys_fill(const struct lu_context *ctx)
939 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
940 struct lu_context_key *key;
943 if (ctx->lc_value[i] == NULL &&
944 key != NULL && key->lct_tags & ctx->lc_tags) {
947 LASSERT(key->lct_init != NULL);
948 LASSERT(key->lct_index == i);
950 value = key->lct_init(ctx, key);
951 if (unlikely(IS_ERR(value)))
952 return PTR_ERR(value);
953 LASSERT(key->lct_owner != NULL);
954 if (!(ctx->lc_tags & LCT_NOREF))
955 try_module_get(key->lct_owner);
956 atomic_inc(&key->lct_used);
957 ctx->lc_value[i] = value;
963 static int keys_init(struct lu_context *ctx)
967 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
968 if (likely(ctx->lc_value != NULL))
969 result = keys_fill(ctx);
979 * Initialize context data-structure. Create values for all keys.
981 int lu_context_init(struct lu_context *ctx, __u32 tags)
983 memset(ctx, 0, sizeof *ctx);
984 ctx->lc_state = LCS_INITIALIZED;
986 return keys_init(ctx);
988 EXPORT_SYMBOL(lu_context_init);
991 * Finalize context data-structure. Destroy key values.
993 void lu_context_fini(struct lu_context *ctx)
995 LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
996 ctx->lc_state = LCS_FINALIZED;
999 EXPORT_SYMBOL(lu_context_fini);
1002 * Called before entering context.
1004 void lu_context_enter(struct lu_context *ctx)
1006 LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1007 ctx->lc_state = LCS_ENTERED;
1009 EXPORT_SYMBOL(lu_context_enter);
1012 * Called after exiting from @ctx
1014 void lu_context_exit(struct lu_context *ctx)
1018 LASSERT(ctx->lc_state == LCS_ENTERED);
1019 ctx->lc_state = LCS_LEFT;
1020 if (ctx->lc_value != NULL) {
1021 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1022 if (ctx->lc_value[i] != NULL) {
1023 struct lu_context_key *key;
1026 LASSERT(key != NULL);
1027 if (key->lct_exit != NULL)
1029 key, ctx->lc_value[i]);
1034 EXPORT_SYMBOL(lu_context_exit);
1037 * Allocate for context all missing keys that were registered after context
1040 int lu_context_refill(const struct lu_context *ctx)
1042 LASSERT(ctx->lc_value != NULL);
1043 return keys_fill(ctx);
1045 EXPORT_SYMBOL(lu_context_refill);
1047 static int lu_env_setup(struct lu_env *env, struct lu_context *ses,
1048 __u32 tags, int noref)
1052 LASSERT(ergo(!noref, !(tags & LCT_NOREF)));
1055 result = lu_context_init(&env->le_ctx, tags);
1056 if (likely(result == 0))
1057 lu_context_enter(&env->le_ctx);
1061 static int lu_env_init_noref(struct lu_env *env, struct lu_context *ses,
1064 return lu_env_setup(env, ses, tags, 1);
1067 int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags)
1069 return lu_env_setup(env, ses, tags, 0);
1071 EXPORT_SYMBOL(lu_env_init);
1073 void lu_env_fini(struct lu_env *env)
1075 lu_context_exit(&env->le_ctx);
1076 lu_context_fini(&env->le_ctx);
1079 EXPORT_SYMBOL(lu_env_fini);
1081 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1084 struct lu_site *tmp;
1087 CFS_LIST_HEAD(splice);
1089 if (nr != 0 && !(gfp_mask & __GFP_FS))
1092 down(&lu_sites_guard);
1093 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1095 remain = lu_site_purge(&lu_shrink_env, s, remain);
1097 * Move just shrunk site to the tail of site list to
1098 * assure shrinking fairness.
1100 list_move_tail(&s->ls_linkage, &splice);
1102 read_lock(&s->ls_guard);
1103 cached += s->ls_total - s->ls_busy;
1104 read_unlock(&s->ls_guard);
1108 list_splice(&splice, lu_sites.prev);
1109 up(&lu_sites_guard);
1113 static struct shrinker *lu_site_shrinker = NULL;
1116 * Initialization of global lu_* data.
1118 int lu_global_init(void)
1122 LU_CONTEXT_KEY_INIT(&lu_global_key);
1123 result = lu_context_key_register(&lu_global_key);
1126 * At this level, we don't know what tags are needed, so
1127 * allocate them conservatively. This should not be too bad,
1128 * because this environment is global.
1130 down(&lu_sites_guard);
1131 result = lu_env_init_noref(&lu_shrink_env, NULL, LCT_SHRINKER);
1132 up(&lu_sites_guard);
1135 * seeks estimation: 3 seeks to read a record from oi,
1136 * one to read inode, one for ea. Unfortunately
1137 * setting this high value results in lu_object/inode
1138 * cache consuming all the memory.
1140 lu_site_shrinker = set_shrinker(DEFAULT_SEEKS,
1143 result = lu_time_global_init();
1150 * Dual to lu_global_init().
1152 void lu_global_fini(void)
1154 lu_time_global_fini();
1155 if (lu_site_shrinker != NULL) {
1156 remove_shrinker(lu_site_shrinker);
1157 lu_site_shrinker = NULL;
1160 lu_context_key_degister(&lu_global_key);
1163 * Tear shrinker environment down _after_ de-registering
1164 * lu_global_key, because the latter has a value in the former.
1166 down(&lu_sites_guard);
1167 lu_env_fini(&lu_shrink_env);
1168 up(&lu_sites_guard);
1171 struct lu_buf LU_BUF_NULL = {
1175 EXPORT_SYMBOL(LU_BUF_NULL);
1178 * XXX: Functions below logically belong to fid module, but they are used by
1179 * dt_store_open(). Put them here until better place is found.
1182 void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid,
1183 struct lu_fid *befider)
1193 * Two cases: compact 6 bytes representation for a common case, and
1194 * full 17 byte representation for "unusual" fid.
1198 * Check that usual case is really usual.
1200 CLASSERT(LUSTRE_SEQ_MAX_WIDTH < 0xffffull);
1202 if (fid_is_igif(fid) ||
1203 seq > 0xffffffull || oid > 0xffff || fid_ver(fid) != 0) {
1204 fid_cpu_to_be(befider, fid);
1205 recsize = sizeof *befider;
1207 unsigned char *small_befider;
1209 small_befider = (char *)befider;
1211 small_befider[0] = seq >> 16;
1212 small_befider[1] = seq >> 8;
1213 small_befider[2] = seq;
1215 small_befider[3] = oid >> 8;
1216 small_befider[4] = oid;
1220 memcpy(pack->fp_area, befider, recsize);
1221 pack->fp_len = recsize + 1;
1223 EXPORT_SYMBOL(fid_pack);
1225 int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid)
1230 switch (pack->fp_len) {
1231 case sizeof *fid + 1:
1232 memcpy(fid, pack->fp_area, sizeof *fid);
1233 fid_be_to_cpu(fid, fid);
1236 const unsigned char *area;
1238 area = pack->fp_area;
1239 fid->f_seq = (area[0] << 16) | (area[1] << 8) | area[2];
1240 fid->f_oid = (area[3] << 8) | area[4];
1245 CERROR("Unexpected packed fid size: %d\n", pack->fp_len);
1250 EXPORT_SYMBOL(fid_unpack);
1252 const char *lu_time_names[LU_TIME_NR] = {
1253 [LU_TIME_FIND_LOOKUP] = "find_lookup",
1254 [LU_TIME_FIND_ALLOC] = "find_alloc",
1255 [LU_TIME_FIND_INSERT] = "find_insert"
1257 EXPORT_SYMBOL(lu_time_names);