Whamcloud - gitweb
LU-1017 handle -EAGAIN properly in lu_object_find_try()
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * Copyright (c) 2011 Whamcloud, Inc.
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  *
39  * lustre/obdclass/lu_object.c
40  *
41  * Lustre Object.
42  * These are the only exported functions, they provide some generic
43  * infrastructure for managing object devices
44  *
45  *   Author: Nikita Danilov <nikita.danilov@sun.com>
46  */
47
48 #define DEBUG_SUBSYSTEM S_CLASS
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52
53 #include <libcfs/libcfs.h>
54
55 #ifdef __KERNEL__
56 # include <linux/module.h>
57 #endif
58
59 /* hash_long() */
60 #include <libcfs/libcfs_hash.h>
61 #include <obd_class.h>
62 #include <obd_support.h>
63 #include <lustre_disk.h>
64 #include <lustre_fid.h>
65 #include <lu_object.h>
66 #include <libcfs/list.h>
67 /* lu_time_global_{init,fini}() */
68 #include <lu_time.h>
69
70 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
71
72 /**
73  * Decrease reference counter on object. If last reference is freed, return
74  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
75  * case, free object immediately.
76  */
77 void lu_object_put(const struct lu_env *env, struct lu_object *o)
78 {
79         struct lu_site_bkt_data *bkt;
80         struct lu_object_header *top;
81         struct lu_site          *site;
82         struct lu_object        *orig;
83         cfs_hash_bd_t            bd;
84
85         top  = o->lo_header;
86         site = o->lo_dev->ld_site;
87         orig = o;
88
89         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
90         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
91
92         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
93                 if (lu_object_is_dying(top)) {
94
95                         /*
96                          * somebody may be waiting for this, currently only
97                          * used for cl_object, see cl_object_put_last().
98                          */
99                         cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
100                 }
101                 return;
102         }
103
104         LASSERT(bkt->lsb_busy > 0);
105         bkt->lsb_busy--;
106         /*
107          * When last reference is released, iterate over object
108          * layers, and notify them that object is no longer busy.
109          */
110         cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
111                 if (o->lo_ops->loo_object_release != NULL)
112                         o->lo_ops->loo_object_release(env, o);
113         }
114
115         if (!lu_object_is_dying(top)) {
116                 LASSERT(cfs_list_empty(&top->loh_lru));
117                 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
118                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
119                 return;
120         }
121
122         /*
123          * If object is dying (will not be cached), removed it
124          * from hash table and LRU.
125          *
126          * This is done with hash table and LRU lists locked. As the only
127          * way to acquire first reference to previously unreferenced
128          * object is through hash-table lookup (lu_object_find()),
129          * or LRU scanning (lu_site_purge()), that are done under hash-table
130          * and LRU lock, no race with concurrent object lookup is possible
131          * and we can safely destroy object below.
132          */
133         cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
134         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
135         /*
136          * Object was already removed from hash and lru above, can
137          * kill it.
138          */
139         lu_object_free(env, orig);
140 }
141 EXPORT_SYMBOL(lu_object_put);
142
143 /**
144  * Allocate new object.
145  *
146  * This follows object creation protocol, described in the comment within
147  * struct lu_device_operations definition.
148  */
149 static struct lu_object *lu_object_alloc(const struct lu_env *env,
150                                          struct lu_device *dev,
151                                          const struct lu_fid *f,
152                                          const struct lu_object_conf *conf)
153 {
154         struct lu_object *scan;
155         struct lu_object *top;
156         cfs_list_t *layers;
157         int clean;
158         int result;
159         ENTRY;
160
161         /*
162          * Create top-level object slice. This will also create
163          * lu_object_header.
164          */
165         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
166         if (top == NULL)
167                 RETURN(ERR_PTR(-ENOMEM));
168         /*
169          * This is the only place where object fid is assigned. It's constant
170          * after this point.
171          */
172         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
173         top->lo_header->loh_fid = *f;
174         layers = &top->lo_header->loh_layers;
175         do {
176                 /*
177                  * Call ->loo_object_init() repeatedly, until no more new
178                  * object slices are created.
179                  */
180                 clean = 1;
181                 cfs_list_for_each_entry(scan, layers, lo_linkage) {
182                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
183                                 continue;
184                         clean = 0;
185                         scan->lo_header = top->lo_header;
186                         result = scan->lo_ops->loo_object_init(env, scan, conf);
187                         if (result != 0) {
188                                 lu_object_free(env, top);
189                                 RETURN(ERR_PTR(result));
190                         }
191                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
192                 }
193         } while (!clean);
194
195         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
196                 if (scan->lo_ops->loo_object_start != NULL) {
197                         result = scan->lo_ops->loo_object_start(env, scan);
198                         if (result != 0) {
199                                 lu_object_free(env, top);
200                                 RETURN(ERR_PTR(result));
201                         }
202                 }
203         }
204
205         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
206         RETURN(top);
207 }
208
209 /**
210  * Free an object.
211  */
212 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
213 {
214         struct lu_site_bkt_data *bkt;
215         struct lu_site          *site;
216         struct lu_object        *scan;
217         cfs_list_t              *layers;
218         cfs_list_t               splice;
219
220         site   = o->lo_dev->ld_site;
221         layers = &o->lo_header->loh_layers;
222         bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
223         /*
224          * First call ->loo_object_delete() method to release all resources.
225          */
226         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
227                 if (scan->lo_ops->loo_object_delete != NULL)
228                         scan->lo_ops->loo_object_delete(env, scan);
229         }
230
231         /*
232          * Then, splice object layers into stand-alone list, and call
233          * ->loo_object_free() on all layers to free memory. Splice is
234          * necessary, because lu_object_header is freed together with the
235          * top-level slice.
236          */
237         CFS_INIT_LIST_HEAD(&splice);
238         cfs_list_splice_init(layers, &splice);
239         while (!cfs_list_empty(&splice)) {
240                 /*
241                  * Free layers in bottom-to-top order, so that object header
242                  * lives as long as possible and ->loo_object_free() methods
243                  * can look at its contents.
244                  */
245                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
246                 cfs_list_del_init(&o->lo_linkage);
247                 LASSERT(o->lo_ops->loo_object_free != NULL);
248                 o->lo_ops->loo_object_free(env, o);
249         }
250
251         if (cfs_waitq_active(&bkt->lsb_marche_funebre))
252                 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
253 }
254
255 /**
256  * Free \a nr objects from the cold end of the site LRU list.
257  */
258 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
259 {
260         struct lu_object_header *h;
261         struct lu_object_header *temp;
262         struct lu_site_bkt_data *bkt;
263         cfs_hash_bd_t            bd;
264         cfs_hash_bd_t            bd2;
265         cfs_list_t               dispose;
266         int                      did_sth;
267         int                      start;
268         int                      count;
269         int                      bnr;
270         int                      i;
271
272         CFS_INIT_LIST_HEAD(&dispose);
273         /*
274          * Under LRU list lock, scan LRU list and move unreferenced objects to
275          * the dispose list, removing them from LRU and hash table.
276          */
277         start = s->ls_purge_start;
278         bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
279  again:
280         did_sth = 0;
281         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
282                 if (i < start)
283                         continue;
284                 count = bnr;
285                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
286                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
287
288                 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
289                         LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
290
291                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
292                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
293
294                         cfs_hash_bd_del_locked(s->ls_obj_hash,
295                                                &bd2, &h->loh_hash);
296                         cfs_list_move(&h->loh_lru, &dispose);
297                         if (did_sth == 0)
298                                 did_sth = 1;
299
300                         if (nr != ~0 && --nr == 0)
301                                 break;
302
303                         if (count > 0 && --count == 0)
304                                 break;
305
306                 }
307                 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
308                 cfs_cond_resched();
309                 /*
310                  * Free everything on the dispose list. This is safe against
311                  * races due to the reasons described in lu_object_put().
312                  */
313                 while (!cfs_list_empty(&dispose)) {
314                         h = container_of0(dispose.next,
315                                           struct lu_object_header, loh_lru);
316                         cfs_list_del_init(&h->loh_lru);
317                         lu_object_free(env, lu_object_top(h));
318                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
319                 }
320
321                 if (nr == 0)
322                         break;
323         }
324
325         if (nr != 0 && did_sth && start != 0) {
326                 start = 0; /* restart from the first bucket */
327                 goto again;
328         }
329         /* race on s->ls_purge_start, but nobody cares */
330         s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
331
332         return nr;
333 }
334 EXPORT_SYMBOL(lu_site_purge);
335
336 /*
337  * Object printing.
338  *
339  * Code below has to jump through certain loops to output object description
340  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
341  * composes object description from strings that are parts of _lines_ of
342  * output (i.e., strings that are not terminated by newline). This doesn't fit
343  * very well into libcfs_debug_msg() interface that assumes that each message
344  * supplied to it is a self-contained output line.
345  *
346  * To work around this, strings are collected in a temporary buffer
347  * (implemented as a value of lu_cdebug_key key), until terminating newline
348  * character is detected.
349  *
350  */
351
352 enum {
353         /**
354          * Maximal line size.
355          *
356          * XXX overflow is not handled correctly.
357          */
358         LU_CDEBUG_LINE = 512
359 };
360
361 struct lu_cdebug_data {
362         /**
363          * Temporary buffer.
364          */
365         char lck_area[LU_CDEBUG_LINE];
366 };
367
368 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
369 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
370
371 /**
372  * Key, holding temporary buffer. This key is registered very early by
373  * lu_global_init().
374  */
375 struct lu_context_key lu_global_key = {
376         .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
377         .lct_init = lu_global_key_init,
378         .lct_fini = lu_global_key_fini
379 };
380
381 /**
382  * Printer function emitting messages through libcfs_debug_msg().
383  */
384 int lu_cdebug_printer(const struct lu_env *env,
385                       void *cookie, const char *format, ...)
386 {
387         struct lu_cdebug_print_info *info = cookie;
388         struct lu_cdebug_data       *key;
389         int used;
390         int complete;
391         va_list args;
392
393         va_start(args, format);
394
395         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
396         LASSERT(key != NULL);
397
398         used = strlen(key->lck_area);
399         complete = format[strlen(format) - 1] == '\n';
400         /*
401          * Append new chunk to the buffer.
402          */
403         vsnprintf(key->lck_area + used,
404                   ARRAY_SIZE(key->lck_area) - used, format, args);
405         if (complete) {
406                 if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
407                         libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
408                                          (char *)info->lpi_file, info->lpi_fn,
409                                          info->lpi_line, "%s", key->lck_area);
410                 key->lck_area[0] = 0;
411         }
412         va_end(args);
413         return 0;
414 }
415 EXPORT_SYMBOL(lu_cdebug_printer);
416
417 /**
418  * Print object header.
419  */
420 void lu_object_header_print(const struct lu_env *env, void *cookie,
421                             lu_printer_t printer,
422                             const struct lu_object_header *hdr)
423 {
424         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
425                    hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
426                    PFID(&hdr->loh_fid),
427                    cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
428                    cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
429                    "" : " lru",
430                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
431 }
432 EXPORT_SYMBOL(lu_object_header_print);
433
434 /**
435  * Print human readable representation of the \a o to the \a printer.
436  */
437 void lu_object_print(const struct lu_env *env, void *cookie,
438                      lu_printer_t printer, const struct lu_object *o)
439 {
440         static const char ruler[] = "........................................";
441         struct lu_object_header *top;
442         int depth;
443
444         top = o->lo_header;
445         lu_object_header_print(env, cookie, printer, top);
446         (*printer)(env, cookie, "{ \n");
447         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
448                 depth = o->lo_depth + 4;
449
450                 /*
451                  * print `.' \a depth times followed by type name and address
452                  */
453                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
454                            o->lo_dev->ld_type->ldt_name, o);
455                 if (o->lo_ops->loo_object_print != NULL)
456                         o->lo_ops->loo_object_print(env, cookie, printer, o);
457                 (*printer)(env, cookie, "\n");
458         }
459         (*printer)(env, cookie, "} header@%p\n", top);
460 }
461 EXPORT_SYMBOL(lu_object_print);
462
463 /**
464  * Check object consistency.
465  */
466 int lu_object_invariant(const struct lu_object *o)
467 {
468         struct lu_object_header *top;
469
470         top = o->lo_header;
471         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
472                 if (o->lo_ops->loo_object_invariant != NULL &&
473                     !o->lo_ops->loo_object_invariant(o))
474                         return 0;
475         }
476         return 1;
477 }
478 EXPORT_SYMBOL(lu_object_invariant);
479
480 static struct lu_object *htable_lookup(struct lu_site *s,
481                                        cfs_hash_bd_t *bd,
482                                        const struct lu_fid *f,
483                                        cfs_waitlink_t *waiter,
484                                        __u64 *version)
485 {
486         struct lu_site_bkt_data *bkt;
487         struct lu_object_header *h;
488         cfs_hlist_node_t        *hnode;
489         __u64  ver = cfs_hash_bd_version_get(bd);
490
491         if (*version == ver)
492                 return NULL;
493
494         *version = ver;
495         bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
496         /* cfs_hash_bd_lookup_intent is a somehow "internal" function
497          * of cfs_hash, but we don't want refcount on object right now */
498         hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
499         if (hnode == NULL) {
500                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
501                 return NULL;
502         }
503
504         h = container_of0(hnode, struct lu_object_header, loh_hash);
505         if (likely(!lu_object_is_dying(h))) {
506                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
507                 cfs_list_del_init(&h->loh_lru);
508                 return lu_object_top(h);
509         }
510
511         /*
512          * Lookup found an object being destroyed this object cannot be
513          * returned (to assure that references to dying objects are eventually
514          * drained), and moreover, lookup has to wait until object is freed.
515          */
516         cfs_atomic_dec(&h->loh_ref);
517
518         cfs_waitlink_init(waiter);
519         cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
520         cfs_set_current_state(CFS_TASK_UNINT);
521         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
522         return ERR_PTR(-EAGAIN);
523 }
524
525 /**
526  * Search cache for an object with the fid \a f. If such object is found,
527  * return it. Otherwise, create new object, insert it into cache and return
528  * it. In any case, additional reference is acquired on the returned object.
529  */
530 struct lu_object *lu_object_find(const struct lu_env *env,
531                                  struct lu_device *dev, const struct lu_fid *f,
532                                  const struct lu_object_conf *conf)
533 {
534         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
535 }
536 EXPORT_SYMBOL(lu_object_find);
537
538 static struct lu_object *lu_object_new(const struct lu_env *env,
539                                        struct lu_device *dev,
540                                        const struct lu_fid *f,
541                                        const struct lu_object_conf *conf)
542 {
543         struct lu_object        *o;
544         cfs_hash_t              *hs;
545         cfs_hash_bd_t            bd;
546         struct lu_site_bkt_data *bkt;
547
548         o = lu_object_alloc(env, dev, f, conf);
549         if (unlikely(IS_ERR(o)))
550                 return o;
551
552         hs = dev->ld_site->ls_obj_hash;
553         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
554         bkt = cfs_hash_bd_extra_get(hs, &bd);
555         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
556         bkt->lsb_busy++;
557         cfs_hash_bd_unlock(hs, &bd, 1);
558         return o;
559 }
560
561 /**
562  * Core logic of lu_object_find*() functions.
563  */
564 static struct lu_object *lu_object_find_try(const struct lu_env *env,
565                                             struct lu_device *dev,
566                                             const struct lu_fid *f,
567                                             const struct lu_object_conf *conf,
568                                             cfs_waitlink_t *waiter)
569 {
570         struct lu_object      *o;
571         struct lu_object      *shadow;
572         struct lu_site        *s;
573         cfs_hash_t            *hs;
574         cfs_hash_bd_t          bd;
575         __u64                  version = 0;
576
577         /*
578          * This uses standard index maintenance protocol:
579          *
580          *     - search index under lock, and return object if found;
581          *     - otherwise, unlock index, allocate new object;
582          *     - lock index and search again;
583          *     - if nothing is found (usual case), insert newly created
584          *       object into index;
585          *     - otherwise (race: other thread inserted object), free
586          *       object just allocated.
587          *     - unlock index;
588          *     - return object.
589          *
590          * For "LOC_F_NEW" case, we are sure the object is new established.
591          * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
592          * just alloc and insert directly.
593          *
594          * If dying object is found during index search, add @waiter to the
595          * site wait-queue and return ERR_PTR(-EAGAIN).
596          */
597         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
598                 return lu_object_new(env, dev, f, conf);
599
600         s  = dev->ld_site;
601         hs = s->ls_obj_hash;
602         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
603         o = htable_lookup(s, &bd, f, waiter, &version);
604         cfs_hash_bd_unlock(hs, &bd, 1);
605         if (o != NULL)
606                 return o;
607
608         /*
609          * Allocate new object. This may result in rather complicated
610          * operations, including fld queries, inode loading, etc.
611          */
612         o = lu_object_alloc(env, dev, f, conf);
613         if (unlikely(IS_ERR(o)))
614                 return o;
615
616         LASSERT(lu_fid_eq(lu_object_fid(o), f));
617
618         cfs_hash_bd_lock(hs, &bd, 1);
619
620         shadow = htable_lookup(s, &bd, f, waiter, &version);
621         if (shadow == NULL) {
622                 struct lu_site_bkt_data *bkt;
623
624                 bkt = cfs_hash_bd_extra_get(hs, &bd);
625                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
626                 bkt->lsb_busy++;
627                 cfs_hash_bd_unlock(hs, &bd, 1);
628                 return o;
629         } else {
630                 if (!cfs_list_empty(&shadow->lo_header->loh_lru))
631                         cfs_list_del_init(&shadow->lo_header->loh_lru);
632                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
633                 cfs_hash_bd_unlock(hs, &bd, 1);
634                 lu_object_free(env, o);
635                 return shadow;
636         }
637 }
638
639 /**
640  * Much like lu_object_find(), but top level device of object is specifically
641  * \a dev rather than top level device of the site. This interface allows
642  * objects of different "stacking" to be created within the same site.
643  */
644 struct lu_object *lu_object_find_at(const struct lu_env *env,
645                                     struct lu_device *dev,
646                                     const struct lu_fid *f,
647                                     const struct lu_object_conf *conf)
648 {
649         struct lu_site_bkt_data *bkt;
650         struct lu_object        *obj;
651         cfs_waitlink_t           wait;
652
653         while (1) {
654                 obj = lu_object_find_try(env, dev, f, conf, &wait);
655                 if (obj != ERR_PTR(-EAGAIN))
656                         return obj;
657                 /*
658                  * lu_object_find_try() already added waiter into the
659                  * wait queue.
660                  */
661                 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
662                 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
663                 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
664         }
665 }
666 EXPORT_SYMBOL(lu_object_find_at);
667
668 /**
669  * Find object with given fid, and return its slice belonging to given device.
670  */
671 struct lu_object *lu_object_find_slice(const struct lu_env *env,
672                                        struct lu_device *dev,
673                                        const struct lu_fid *f,
674                                        const struct lu_object_conf *conf)
675 {
676         struct lu_object *top;
677         struct lu_object *obj;
678
679         top = lu_object_find(env, dev, f, conf);
680         if (!IS_ERR(top)) {
681                 obj = lu_object_locate(top->lo_header, dev->ld_type);
682                 if (obj == NULL)
683                         lu_object_put(env, top);
684         } else
685                 obj = top;
686         return obj;
687 }
688 EXPORT_SYMBOL(lu_object_find_slice);
689
690 /**
691  * Global list of all device types.
692  */
693 static CFS_LIST_HEAD(lu_device_types);
694
695 int lu_device_type_init(struct lu_device_type *ldt)
696 {
697         int result;
698
699         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
700         result = ldt->ldt_ops->ldto_init(ldt);
701         if (result == 0)
702                 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
703         return result;
704 }
705 EXPORT_SYMBOL(lu_device_type_init);
706
707 void lu_device_type_fini(struct lu_device_type *ldt)
708 {
709         cfs_list_del_init(&ldt->ldt_linkage);
710         ldt->ldt_ops->ldto_fini(ldt);
711 }
712 EXPORT_SYMBOL(lu_device_type_fini);
713
714 void lu_types_stop(void)
715 {
716         struct lu_device_type *ldt;
717
718         cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
719                 if (ldt->ldt_device_nr == 0)
720                         ldt->ldt_ops->ldto_stop(ldt);
721         }
722 }
723 EXPORT_SYMBOL(lu_types_stop);
724
725 /**
726  * Global list of all sites on this node
727  */
728 static CFS_LIST_HEAD(lu_sites);
729 static CFS_DECLARE_MUTEX(lu_sites_guard);
730
731 /**
732  * Global environment used by site shrinker.
733  */
734 static struct lu_env lu_shrink_env;
735
736 struct lu_site_print_arg {
737         struct lu_env   *lsp_env;
738         void            *lsp_cookie;
739         lu_printer_t     lsp_printer;
740 };
741
742 static int
743 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
744                   cfs_hlist_node_t *hnode, void *data)
745 {
746         struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
747         struct lu_object_header  *h;
748
749         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
750         if (!cfs_list_empty(&h->loh_layers)) {
751                 const struct lu_object *o;
752
753                 o = lu_object_top(h);
754                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
755                                 arg->lsp_printer, o);
756         } else {
757                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
758                                        arg->lsp_printer, h);
759         }
760         return 0;
761 }
762
763 /**
764  * Print all objects in \a s.
765  */
766 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
767                    lu_printer_t printer)
768 {
769         struct lu_site_print_arg arg = {
770                 .lsp_env     = (struct lu_env *)env,
771                 .lsp_cookie  = cookie,
772                 .lsp_printer = printer,
773         };
774
775         cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
776 }
777 EXPORT_SYMBOL(lu_site_print);
778
779 enum {
780         LU_CACHE_PERCENT_MAX     = 50,
781         LU_CACHE_PERCENT_DEFAULT = 20
782 };
783
784 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
785 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
786                 "Percentage of memory to be used as lu_object cache");
787
788 /**
789  * Return desired hash table order.
790  */
791 static int lu_htable_order(void)
792 {
793         unsigned long cache_size;
794         int bits;
795
796         /*
797          * Calculate hash table size, assuming that we want reasonable
798          * performance when 20% of total memory is occupied by cache of
799          * lu_objects.
800          *
801          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
802          */
803         cache_size = cfs_num_physpages;
804
805 #if BITS_PER_LONG == 32
806         /* limit hashtable size for lowmem systems to low RAM */
807         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
808                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
809 #endif
810
811         /* clear off unreasonable cache setting. */
812         if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
813                 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
814                       " the range of (0, %u]. Will use default value: %u.\n",
815                       lu_cache_percent, LU_CACHE_PERCENT_MAX,
816                       LU_CACHE_PERCENT_DEFAULT);
817
818                 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
819         }
820         cache_size = cache_size / 100 * lu_cache_percent *
821                 (CFS_PAGE_SIZE / 1024);
822
823         for (bits = 1; (1 << bits) < cache_size; ++bits) {
824                 ;
825         }
826         return bits;
827 }
828
829 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
830                                 const void *key, unsigned mask)
831 {
832         struct lu_fid  *fid = (struct lu_fid *)key;
833         __u32           hash;
834
835         hash = fid_flatten32(fid);
836         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
837         hash = cfs_hash_long(hash, hs->hs_bkt_bits);
838
839         /* give me another random factor */
840         hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
841
842         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
843         hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
844
845         return hash & mask;
846 }
847
848 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
849 {
850         return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
851 }
852
853 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
854 {
855         struct lu_object_header *h;
856
857         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
858         return &h->loh_fid;
859 }
860
861 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
862 {
863         struct lu_object_header *h;
864
865         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
866         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
867 }
868
869 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
870 {
871         struct lu_object_header *h;
872
873         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
874         if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
875                 struct lu_site_bkt_data *bkt;
876                 cfs_hash_bd_t            bd;
877
878                 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
879                 bkt = cfs_hash_bd_extra_get(hs, &bd);
880                 bkt->lsb_busy++;
881         }
882 }
883
884 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
885 {
886         LBUG(); /* we should never called it */
887 }
888
889 cfs_hash_ops_t lu_site_hash_ops = {
890         .hs_hash        = lu_obj_hop_hash,
891         .hs_key         = lu_obj_hop_key,
892         .hs_keycmp      = lu_obj_hop_keycmp,
893         .hs_object      = lu_obj_hop_object,
894         .hs_get         = lu_obj_hop_get,
895         .hs_put_locked  = lu_obj_hop_put_locked,
896 };
897
898 /**
899  * Initialize site \a s, with \a d as the top level device.
900  */
901 #define LU_SITE_BITS_MIN    12
902 #define LU_SITE_BITS_MAX    24
903 /**
904  * total 256 buckets, we don't want too many buckets because:
905  * - consume too much memory
906  * - avoid unbalanced LRU list
907  */
908 #define LU_SITE_BKT_BITS    8
909
910 int lu_site_init(struct lu_site *s, struct lu_device *top)
911 {
912         struct lu_site_bkt_data *bkt;
913         cfs_hash_bd_t bd;
914         char name[16];
915         int bits;
916         int i;
917         ENTRY;
918
919         memset(s, 0, sizeof *s);
920         bits = lu_htable_order();
921         snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
922         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
923              bits >= LU_SITE_BITS_MIN; bits--) {
924                 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
925                                                  bits - LU_SITE_BKT_BITS,
926                                                  sizeof(*bkt), 0, 0,
927                                                  &lu_site_hash_ops,
928                                                  CFS_HASH_SPIN_BKTLOCK |
929                                                  CFS_HASH_NO_ITEMREF |
930                                                  CFS_HASH_DEPTH |
931                                                  CFS_HASH_ASSERT_EMPTY);
932                 if (s->ls_obj_hash != NULL)
933                         break;
934         }
935
936         if (s->ls_obj_hash == NULL) {
937                 CERROR("failed to create lu_site hash with bits: %d\n", bits);
938                 return -ENOMEM;
939         }
940
941         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
942                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
943                 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
944                 cfs_waitq_init(&bkt->lsb_marche_funebre);
945         }
946
947         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
948         if (s->ls_stats == NULL) {
949                 cfs_hash_putref(s->ls_obj_hash);
950                 s->ls_obj_hash = NULL;
951                 return -ENOMEM;
952         }
953
954         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
955                              0, "created", "created");
956         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
957                              0, "cache_hit", "cache_hit");
958         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
959                              0, "cache_miss", "cache_miss");
960         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
961                              0, "cache_race", "cache_race");
962         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
963                              0, "cache_death_race", "cache_death_race");
964         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
965                              0, "lru_purged", "lru_purged");
966
967         CFS_INIT_LIST_HEAD(&s->ls_linkage);
968         s->ls_top_dev = top;
969         top->ld_site = s;
970         lu_device_get(top);
971         lu_ref_add(&top->ld_reference, "site-top", s);
972
973         CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
974         cfs_spin_lock_init(&s->ls_ld_lock);
975
976         cfs_spin_lock(&s->ls_ld_lock);
977         cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage);
978         cfs_spin_unlock(&s->ls_ld_lock);
979
980         RETURN(0);
981 }
982 EXPORT_SYMBOL(lu_site_init);
983
984 /**
985  * Finalize \a s and release its resources.
986  */
987 void lu_site_fini(struct lu_site *s)
988 {
989         cfs_down(&lu_sites_guard);
990         cfs_list_del_init(&s->ls_linkage);
991         cfs_up(&lu_sites_guard);
992
993         if (s->ls_obj_hash != NULL) {
994                 cfs_hash_putref(s->ls_obj_hash);
995                 s->ls_obj_hash = NULL;
996         }
997
998         if (s->ls_top_dev != NULL) {
999                 s->ls_top_dev->ld_site = NULL;
1000                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1001                 lu_device_put(s->ls_top_dev);
1002                 s->ls_top_dev = NULL;
1003         }
1004
1005         if (s->ls_stats != NULL)
1006                 lprocfs_free_stats(&s->ls_stats);
1007 }
1008 EXPORT_SYMBOL(lu_site_fini);
1009
1010 /**
1011  * Called when initialization of stack for this site is completed.
1012  */
1013 int lu_site_init_finish(struct lu_site *s)
1014 {
1015         int result;
1016         cfs_down(&lu_sites_guard);
1017         result = lu_context_refill(&lu_shrink_env.le_ctx);
1018         if (result == 0)
1019                 cfs_list_add(&s->ls_linkage, &lu_sites);
1020         cfs_up(&lu_sites_guard);
1021         return result;
1022 }
1023 EXPORT_SYMBOL(lu_site_init_finish);
1024
1025 /**
1026  * Acquire additional reference on device \a d
1027  */
1028 void lu_device_get(struct lu_device *d)
1029 {
1030         cfs_atomic_inc(&d->ld_ref);
1031 }
1032 EXPORT_SYMBOL(lu_device_get);
1033
1034 /**
1035  * Release reference on device \a d.
1036  */
1037 void lu_device_put(struct lu_device *d)
1038 {
1039         LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1040         cfs_atomic_dec(&d->ld_ref);
1041 }
1042 EXPORT_SYMBOL(lu_device_put);
1043
1044 /**
1045  * Initialize device \a d of type \a t.
1046  */
1047 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1048 {
1049         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1050                 t->ldt_ops->ldto_start(t);
1051         memset(d, 0, sizeof *d);
1052         cfs_atomic_set(&d->ld_ref, 0);
1053         d->ld_type = t;
1054         lu_ref_init(&d->ld_reference);
1055         CFS_INIT_LIST_HEAD(&d->ld_linkage);
1056         return 0;
1057 }
1058 EXPORT_SYMBOL(lu_device_init);
1059
1060 /**
1061  * Finalize device \a d.
1062  */
1063 void lu_device_fini(struct lu_device *d)
1064 {
1065         struct lu_device_type *t;
1066
1067         t = d->ld_type;
1068         if (d->ld_obd != NULL) {
1069                 d->ld_obd->obd_lu_dev = NULL;
1070                 d->ld_obd = NULL;
1071         }
1072
1073         lu_ref_fini(&d->ld_reference);
1074         LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1075                  "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1076         LASSERT(t->ldt_device_nr > 0);
1077         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1078                 t->ldt_ops->ldto_stop(t);
1079 }
1080 EXPORT_SYMBOL(lu_device_fini);
1081
1082 /**
1083  * Initialize object \a o that is part of compound object \a h and was created
1084  * by device \a d.
1085  */
1086 int lu_object_init(struct lu_object *o,
1087                    struct lu_object_header *h, struct lu_device *d)
1088 {
1089         memset(o, 0, sizeof *o);
1090         o->lo_header = h;
1091         o->lo_dev    = d;
1092         lu_device_get(d);
1093         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1094         CFS_INIT_LIST_HEAD(&o->lo_linkage);
1095         return 0;
1096 }
1097 EXPORT_SYMBOL(lu_object_init);
1098
1099 /**
1100  * Finalize object and release its resources.
1101  */
1102 void lu_object_fini(struct lu_object *o)
1103 {
1104         struct lu_device *dev = o->lo_dev;
1105
1106         LASSERT(cfs_list_empty(&o->lo_linkage));
1107
1108         if (dev != NULL) {
1109                 lu_ref_del_at(&dev->ld_reference,
1110                               o->lo_dev_ref , "lu_object", o);
1111                 lu_device_put(dev);
1112                 o->lo_dev = NULL;
1113         }
1114 }
1115 EXPORT_SYMBOL(lu_object_fini);
1116
1117 /**
1118  * Add object \a o as first layer of compound object \a h
1119  *
1120  * This is typically called by the ->ldo_object_alloc() method of top-level
1121  * device.
1122  */
1123 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1124 {
1125         cfs_list_move(&o->lo_linkage, &h->loh_layers);
1126 }
1127 EXPORT_SYMBOL(lu_object_add_top);
1128
1129 /**
1130  * Add object \a o as a layer of compound object, going after \a before.
1131  *
1132  * This is typically called by the ->ldo_object_alloc() method of \a
1133  * before->lo_dev.
1134  */
1135 void lu_object_add(struct lu_object *before, struct lu_object *o)
1136 {
1137         cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1138 }
1139 EXPORT_SYMBOL(lu_object_add);
1140
1141 /**
1142  * Initialize compound object.
1143  */
1144 int lu_object_header_init(struct lu_object_header *h)
1145 {
1146         memset(h, 0, sizeof *h);
1147         cfs_atomic_set(&h->loh_ref, 1);
1148         CFS_INIT_HLIST_NODE(&h->loh_hash);
1149         CFS_INIT_LIST_HEAD(&h->loh_lru);
1150         CFS_INIT_LIST_HEAD(&h->loh_layers);
1151         lu_ref_init(&h->loh_reference);
1152         return 0;
1153 }
1154 EXPORT_SYMBOL(lu_object_header_init);
1155
1156 /**
1157  * Finalize compound object.
1158  */
1159 void lu_object_header_fini(struct lu_object_header *h)
1160 {
1161         LASSERT(cfs_list_empty(&h->loh_layers));
1162         LASSERT(cfs_list_empty(&h->loh_lru));
1163         LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1164         lu_ref_fini(&h->loh_reference);
1165 }
1166 EXPORT_SYMBOL(lu_object_header_fini);
1167
1168 /**
1169  * Given a compound object, find its slice, corresponding to the device type
1170  * \a dtype.
1171  */
1172 struct lu_object *lu_object_locate(struct lu_object_header *h,
1173                                    const struct lu_device_type *dtype)
1174 {
1175         struct lu_object *o;
1176
1177         cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1178                 if (o->lo_dev->ld_type == dtype)
1179                         return o;
1180         }
1181         return NULL;
1182 }
1183 EXPORT_SYMBOL(lu_object_locate);
1184
1185
1186
1187 /**
1188  * Finalize and free devices in the device stack.
1189  *
1190  * Finalize device stack by purging object cache, and calling
1191  * lu_device_type_operations::ldto_device_fini() and
1192  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1193  */
1194 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1195 {
1196         struct lu_site   *site = top->ld_site;
1197         struct lu_device *scan;
1198         struct lu_device *next;
1199
1200         lu_site_purge(env, site, ~0);
1201         for (scan = top; scan != NULL; scan = next) {
1202                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1203                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1204                 lu_device_put(scan);
1205         }
1206
1207         /* purge again. */
1208         lu_site_purge(env, site, ~0);
1209
1210         if (!cfs_hash_is_empty(site->ls_obj_hash)) {
1211                 /*
1212                  * Uh-oh, objects still exist.
1213                  */
1214                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
1215
1216                 lu_site_print(env, site, &cookie, lu_cdebug_printer);
1217         }
1218
1219         for (scan = top; scan != NULL; scan = next) {
1220                 const struct lu_device_type *ldt = scan->ld_type;
1221                 struct obd_type             *type;
1222
1223                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1224                 type = ldt->ldt_obd_type;
1225                 if (type != NULL) {
1226                         type->typ_refcnt--;
1227                         class_put_type(type);
1228                 }
1229         }
1230 }
1231 EXPORT_SYMBOL(lu_stack_fini);
1232
1233 enum {
1234         /**
1235          * Maximal number of tld slots.
1236          */
1237         LU_CONTEXT_KEY_NR = 32
1238 };
1239
1240 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1241
1242 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1243
1244 /**
1245  * Global counter incremented whenever key is registered, unregistered,
1246  * revived or quiesced. This is used to void unnecessary calls to
1247  * lu_context_refill(). No locking is provided, as initialization and shutdown
1248  * are supposed to be externally serialized.
1249  */
1250 static unsigned key_set_version = 0;
1251
1252 /**
1253  * Register new key.
1254  */
1255 int lu_context_key_register(struct lu_context_key *key)
1256 {
1257         int result;
1258         int i;
1259
1260         LASSERT(key->lct_init != NULL);
1261         LASSERT(key->lct_fini != NULL);
1262         LASSERT(key->lct_tags != 0);
1263         LASSERT(key->lct_owner != NULL);
1264
1265         result = -ENFILE;
1266         cfs_spin_lock(&lu_keys_guard);
1267         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1268                 if (lu_keys[i] == NULL) {
1269                         key->lct_index = i;
1270                         cfs_atomic_set(&key->lct_used, 1);
1271                         lu_keys[i] = key;
1272                         lu_ref_init(&key->lct_reference);
1273                         result = 0;
1274                         ++key_set_version;
1275                         break;
1276                 }
1277         }
1278         cfs_spin_unlock(&lu_keys_guard);
1279         return result;
1280 }
1281 EXPORT_SYMBOL(lu_context_key_register);
1282
1283 static void key_fini(struct lu_context *ctx, int index)
1284 {
1285         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1286                 struct lu_context_key *key;
1287
1288                 key = lu_keys[index];
1289                 LASSERT(key != NULL);
1290                 LASSERT(key->lct_fini != NULL);
1291                 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1292
1293                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1294                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1295                 cfs_atomic_dec(&key->lct_used);
1296                 LASSERT(key->lct_owner != NULL);
1297                 if (!(ctx->lc_tags & LCT_NOREF)) {
1298                         LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1299                         cfs_module_put(key->lct_owner);
1300                 }
1301                 ctx->lc_value[index] = NULL;
1302         }
1303 }
1304
1305 /**
1306  * Deregister key.
1307  */
1308 void lu_context_key_degister(struct lu_context_key *key)
1309 {
1310         LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1311         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1312
1313         lu_context_key_quiesce(key);
1314
1315         ++key_set_version;
1316         cfs_spin_lock(&lu_keys_guard);
1317         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1318         if (lu_keys[key->lct_index]) {
1319                 lu_keys[key->lct_index] = NULL;
1320                 lu_ref_fini(&key->lct_reference);
1321         }
1322         cfs_spin_unlock(&lu_keys_guard);
1323
1324         LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1325                  "key has instances: %d\n",
1326                  cfs_atomic_read(&key->lct_used));
1327 }
1328 EXPORT_SYMBOL(lu_context_key_degister);
1329
1330 /**
1331  * Register a number of keys. This has to be called after all keys have been
1332  * initialized by a call to LU_CONTEXT_KEY_INIT().
1333  */
1334 int lu_context_key_register_many(struct lu_context_key *k, ...)
1335 {
1336         struct lu_context_key *key = k;
1337         va_list args;
1338         int result;
1339
1340         va_start(args, k);
1341         do {
1342                 result = lu_context_key_register(key);
1343                 if (result)
1344                         break;
1345                 key = va_arg(args, struct lu_context_key *);
1346         } while (key != NULL);
1347         va_end(args);
1348
1349         if (result != 0) {
1350                 va_start(args, k);
1351                 while (k != key) {
1352                         lu_context_key_degister(k);
1353                         k = va_arg(args, struct lu_context_key *);
1354                 }
1355                 va_end(args);
1356         }
1357
1358         return result;
1359 }
1360 EXPORT_SYMBOL(lu_context_key_register_many);
1361
1362 /**
1363  * De-register a number of keys. This is a dual to
1364  * lu_context_key_register_many().
1365  */
1366 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1367 {
1368         va_list args;
1369
1370         va_start(args, k);
1371         do {
1372                 lu_context_key_degister(k);
1373                 k = va_arg(args, struct lu_context_key*);
1374         } while (k != NULL);
1375         va_end(args);
1376 }
1377 EXPORT_SYMBOL(lu_context_key_degister_many);
1378
1379 /**
1380  * Revive a number of keys.
1381  */
1382 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1383 {
1384         va_list args;
1385
1386         va_start(args, k);
1387         do {
1388                 lu_context_key_revive(k);
1389                 k = va_arg(args, struct lu_context_key*);
1390         } while (k != NULL);
1391         va_end(args);
1392 }
1393 EXPORT_SYMBOL(lu_context_key_revive_many);
1394
1395 /**
1396  * Quiescent a number of keys.
1397  */
1398 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1399 {
1400         va_list args;
1401
1402         va_start(args, k);
1403         do {
1404                 lu_context_key_quiesce(k);
1405                 k = va_arg(args, struct lu_context_key*);
1406         } while (k != NULL);
1407         va_end(args);
1408 }
1409 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1410
1411 /**
1412  * Return value associated with key \a key in context \a ctx.
1413  */
1414 void *lu_context_key_get(const struct lu_context *ctx,
1415                          const struct lu_context_key *key)
1416 {
1417         LINVRNT(ctx->lc_state == LCS_ENTERED);
1418         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1419         LASSERT(lu_keys[key->lct_index] == key);
1420         return ctx->lc_value[key->lct_index];
1421 }
1422 EXPORT_SYMBOL(lu_context_key_get);
1423
1424 /**
1425  * List of remembered contexts. XXX document me.
1426  */
1427 static CFS_LIST_HEAD(lu_context_remembered);
1428
1429 /**
1430  * Destroy \a key in all remembered contexts. This is used to destroy key
1431  * values in "shared" contexts (like service threads), when a module owning
1432  * the key is about to be unloaded.
1433  */
1434 void lu_context_key_quiesce(struct lu_context_key *key)
1435 {
1436         struct lu_context *ctx;
1437         extern unsigned cl_env_cache_purge(unsigned nr);
1438
1439         if (!(key->lct_tags & LCT_QUIESCENT)) {
1440                 /*
1441                  * XXX layering violation.
1442                  */
1443                 cl_env_cache_purge(~0);
1444                 key->lct_tags |= LCT_QUIESCENT;
1445                 /*
1446                  * XXX memory barrier has to go here.
1447                  */
1448                 cfs_spin_lock(&lu_keys_guard);
1449                 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1450                                         lc_remember)
1451                         key_fini(ctx, key->lct_index);
1452                 cfs_spin_unlock(&lu_keys_guard);
1453                 ++key_set_version;
1454         }
1455 }
1456 EXPORT_SYMBOL(lu_context_key_quiesce);
1457
1458 void lu_context_key_revive(struct lu_context_key *key)
1459 {
1460         key->lct_tags &= ~LCT_QUIESCENT;
1461         ++key_set_version;
1462 }
1463 EXPORT_SYMBOL(lu_context_key_revive);
1464
1465 static void keys_fini(struct lu_context *ctx)
1466 {
1467         int i;
1468
1469         cfs_spin_lock(&lu_keys_guard);
1470         if (ctx->lc_value != NULL) {
1471                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1472                         key_fini(ctx, i);
1473                 OBD_FREE(ctx->lc_value,
1474                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1475                 ctx->lc_value = NULL;
1476         }
1477         cfs_spin_unlock(&lu_keys_guard);
1478 }
1479
1480 static int keys_fill(struct lu_context *ctx)
1481 {
1482         int i;
1483
1484         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1485                 struct lu_context_key *key;
1486
1487                 key = lu_keys[i];
1488                 if (ctx->lc_value[i] == NULL && key != NULL &&
1489                     (key->lct_tags & ctx->lc_tags) &&
1490                     /*
1491                      * Don't create values for a LCT_QUIESCENT key, as this
1492                      * will pin module owning a key.
1493                      */
1494                     !(key->lct_tags & LCT_QUIESCENT)) {
1495                         void *value;
1496
1497                         LINVRNT(key->lct_init != NULL);
1498                         LINVRNT(key->lct_index == i);
1499
1500                         value = key->lct_init(ctx, key);
1501                         if (unlikely(IS_ERR(value)))
1502                                 return PTR_ERR(value);
1503
1504                         LASSERT(key->lct_owner != NULL);
1505                         if (!(ctx->lc_tags & LCT_NOREF))
1506                                 cfs_try_module_get(key->lct_owner);
1507                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1508                         cfs_atomic_inc(&key->lct_used);
1509                         /*
1510                          * This is the only place in the code, where an
1511                          * element of ctx->lc_value[] array is set to non-NULL
1512                          * value.
1513                          */
1514                         ctx->lc_value[i] = value;
1515                         if (key->lct_exit != NULL)
1516                                 ctx->lc_tags |= LCT_HAS_EXIT;
1517                 }
1518                 ctx->lc_version = key_set_version;
1519         }
1520         return 0;
1521 }
1522
1523 static int keys_init(struct lu_context *ctx)
1524 {
1525         int result;
1526
1527         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1528         if (likely(ctx->lc_value != NULL))
1529                 result = keys_fill(ctx);
1530         else
1531                 result = -ENOMEM;
1532
1533         if (result != 0)
1534                 keys_fini(ctx);
1535         return result;
1536 }
1537
1538 /**
1539  * Initialize context data-structure. Create values for all keys.
1540  */
1541 int lu_context_init(struct lu_context *ctx, __u32 tags)
1542 {
1543         memset(ctx, 0, sizeof *ctx);
1544         ctx->lc_state = LCS_INITIALIZED;
1545         ctx->lc_tags = tags;
1546         if (tags & LCT_REMEMBER) {
1547                 cfs_spin_lock(&lu_keys_guard);
1548                 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1549                 cfs_spin_unlock(&lu_keys_guard);
1550         } else
1551                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1552         return keys_init(ctx);
1553 }
1554 EXPORT_SYMBOL(lu_context_init);
1555
1556 /**
1557  * Finalize context data-structure. Destroy key values.
1558  */
1559 void lu_context_fini(struct lu_context *ctx)
1560 {
1561         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1562         ctx->lc_state = LCS_FINALIZED;
1563         keys_fini(ctx);
1564         cfs_spin_lock(&lu_keys_guard);
1565         cfs_list_del_init(&ctx->lc_remember);
1566         cfs_spin_unlock(&lu_keys_guard);
1567 }
1568 EXPORT_SYMBOL(lu_context_fini);
1569
1570 /**
1571  * Called before entering context.
1572  */
1573 void lu_context_enter(struct lu_context *ctx)
1574 {
1575         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1576         ctx->lc_state = LCS_ENTERED;
1577 }
1578 EXPORT_SYMBOL(lu_context_enter);
1579
1580 /**
1581  * Called after exiting from \a ctx
1582  */
1583 void lu_context_exit(struct lu_context *ctx)
1584 {
1585         int i;
1586
1587         LINVRNT(ctx->lc_state == LCS_ENTERED);
1588         ctx->lc_state = LCS_LEFT;
1589         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1590                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1591                         if (ctx->lc_value[i] != NULL) {
1592                                 struct lu_context_key *key;
1593
1594                                 key = lu_keys[i];
1595                                 LASSERT(key != NULL);
1596                                 if (key->lct_exit != NULL)
1597                                         key->lct_exit(ctx,
1598                                                       key, ctx->lc_value[i]);
1599                         }
1600                 }
1601         }
1602 }
1603 EXPORT_SYMBOL(lu_context_exit);
1604
1605 /**
1606  * Allocate for context all missing keys that were registered after context
1607  * creation.
1608  */
1609 int lu_context_refill(struct lu_context *ctx)
1610 {
1611         LINVRNT(ctx->lc_value != NULL);
1612         return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1613 }
1614 EXPORT_SYMBOL(lu_context_refill);
1615
1616 /**
1617  * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1618  * obd being added. Currently, this is only used on client side, specifically
1619  * for echo device client, for other stack (like ptlrpc threads), context are
1620  * predefined when the lu_device type are registered, during the module probe
1621  * phase.
1622  */
1623 __u32 lu_context_tags_default = 0;
1624 __u32 lu_session_tags_default = 0;
1625
1626 void lu_context_tags_update(__u32 tags)
1627 {
1628         cfs_spin_lock(&lu_keys_guard);
1629         lu_context_tags_default |= tags;
1630         key_set_version ++;
1631         cfs_spin_unlock(&lu_keys_guard);
1632 }
1633 EXPORT_SYMBOL(lu_context_tags_update);
1634
1635 void lu_context_tags_clear(__u32 tags)
1636 {
1637         cfs_spin_lock(&lu_keys_guard);
1638         lu_context_tags_default &= ~tags;
1639         key_set_version ++;
1640         cfs_spin_unlock(&lu_keys_guard);
1641 }
1642 EXPORT_SYMBOL(lu_context_tags_clear);
1643
1644 void lu_session_tags_update(__u32 tags)
1645 {
1646         cfs_spin_lock(&lu_keys_guard);
1647         lu_session_tags_default |= tags;
1648         key_set_version ++;
1649         cfs_spin_unlock(&lu_keys_guard);
1650 }
1651 EXPORT_SYMBOL(lu_session_tags_update);
1652
1653 void lu_session_tags_clear(__u32 tags)
1654 {
1655         cfs_spin_lock(&lu_keys_guard);
1656         lu_session_tags_default &= ~tags;
1657         key_set_version ++;
1658         cfs_spin_unlock(&lu_keys_guard);
1659 }
1660 EXPORT_SYMBOL(lu_session_tags_clear);
1661
1662 int lu_env_init(struct lu_env *env, __u32 tags)
1663 {
1664         int result;
1665
1666         env->le_ses = NULL;
1667         result = lu_context_init(&env->le_ctx, tags);
1668         if (likely(result == 0))
1669                 lu_context_enter(&env->le_ctx);
1670         return result;
1671 }
1672 EXPORT_SYMBOL(lu_env_init);
1673
1674 void lu_env_fini(struct lu_env *env)
1675 {
1676         lu_context_exit(&env->le_ctx);
1677         lu_context_fini(&env->le_ctx);
1678         env->le_ses = NULL;
1679 }
1680 EXPORT_SYMBOL(lu_env_fini);
1681
1682 int lu_env_refill(struct lu_env *env)
1683 {
1684         int result;
1685
1686         result = lu_context_refill(&env->le_ctx);
1687         if (result == 0 && env->le_ses != NULL)
1688                 result = lu_context_refill(env->le_ses);
1689         return result;
1690 }
1691 EXPORT_SYMBOL(lu_env_refill);
1692
1693 /**
1694  * Currently, this API will only be used by echo client.
1695  * Because echo client and normal lustre client will share
1696  * same cl_env cache. So echo client needs to refresh
1697  * the env context after it get one from the cache, especially
1698  * when normal client and echo client co-exist in the same client.
1699  */
1700 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1701                           __u32 stags)
1702 {
1703         int    result;
1704
1705         if ((env->le_ctx.lc_tags & ctags) != ctags) {
1706                 env->le_ctx.lc_version = 0;
1707                 env->le_ctx.lc_tags |= ctags;
1708         }
1709
1710         if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1711                 env->le_ses->lc_version = 0;
1712                 env->le_ses->lc_tags |= stags;
1713         }
1714
1715         result = lu_env_refill(env);
1716
1717         return result;
1718 }
1719 EXPORT_SYMBOL(lu_env_refill_by_tags);
1720
1721 static struct cfs_shrinker *lu_site_shrinker = NULL;
1722
1723 typedef struct lu_site_stats{
1724         unsigned        lss_populated;
1725         unsigned        lss_max_search;
1726         unsigned        lss_total;
1727         unsigned        lss_busy;
1728 } lu_site_stats_t;
1729
1730 static void lu_site_stats_get(cfs_hash_t *hs,
1731                               lu_site_stats_t *stats, int populated)
1732 {
1733         cfs_hash_bd_t bd;
1734         int           i;
1735
1736         cfs_hash_for_each_bucket(hs, &bd, i) {
1737                 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1738                 cfs_hlist_head_t        *hhead;
1739
1740                 cfs_hash_bd_lock(hs, &bd, 1);
1741                 stats->lss_busy  += bkt->lsb_busy;
1742                 stats->lss_total += cfs_hash_bd_count_get(&bd);
1743                 stats->lss_max_search = max((int)stats->lss_max_search,
1744                                             cfs_hash_bd_depmax_get(&bd));
1745                 if (!populated) {
1746                         cfs_hash_bd_unlock(hs, &bd, 1);
1747                         continue;
1748                 }
1749
1750                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1751                         if (!cfs_hlist_empty(hhead))
1752                                 stats->lss_populated++;
1753                 }
1754                 cfs_hash_bd_unlock(hs, &bd, 1);
1755         }
1756 }
1757
1758 #ifdef __KERNEL__
1759
1760 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1761 {
1762         lu_site_stats_t stats;
1763         struct lu_site *s;
1764         struct lu_site *tmp;
1765         int cached = 0;
1766         int remain = shrink_param(sc, nr_to_scan);
1767         CFS_LIST_HEAD(splice);
1768
1769         if (remain != 0) {
1770                 if (!(shrink_param(sc, gfp_mask) & __GFP_FS))
1771                         return -1;
1772                 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1773         }
1774
1775         cfs_down(&lu_sites_guard);
1776         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1777                 if (shrink_param(sc, nr_to_scan) != 0) {
1778                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1779                         /*
1780                          * Move just shrunk site to the tail of site list to
1781                          * assure shrinking fairness.
1782                          */
1783                         cfs_list_move_tail(&s->ls_linkage, &splice);
1784                 }
1785
1786                 memset(&stats, 0, sizeof(stats));
1787                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1788                 cached += stats.lss_total - stats.lss_busy;
1789                 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1790                         break;
1791         }
1792         cfs_list_splice(&splice, lu_sites.prev);
1793         cfs_up(&lu_sites_guard);
1794
1795         cached = (cached / 100) * sysctl_vfs_cache_pressure;
1796         if (shrink_param(sc, nr_to_scan) == 0)
1797                 CDEBUG(D_INODE, "%d objects cached\n", cached);
1798         return cached;
1799 }
1800
1801 /*
1802  * Debugging stuff.
1803  */
1804
1805 /**
1806  * Environment to be used in debugger, contains all tags.
1807  */
1808 struct lu_env lu_debugging_env;
1809
1810 /**
1811  * Debugging printer function using printk().
1812  */
1813 int lu_printk_printer(const struct lu_env *env,
1814                       void *unused, const char *format, ...)
1815 {
1816         va_list args;
1817
1818         va_start(args, format);
1819         vprintk(format, args);
1820         va_end(args);
1821         return 0;
1822 }
1823
1824 void lu_debugging_setup(void)
1825 {
1826         lu_env_init(&lu_debugging_env, ~0);
1827 }
1828
1829 void lu_context_keys_dump(void)
1830 {
1831         int i;
1832
1833         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1834                 struct lu_context_key *key;
1835
1836                 key = lu_keys[i];
1837                 if (key != NULL) {
1838                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1839                                i, key, key->lct_tags,
1840                                key->lct_init, key->lct_fini, key->lct_exit,
1841                                key->lct_index, cfs_atomic_read(&key->lct_used),
1842                                key->lct_owner ? key->lct_owner->name : "",
1843                                key->lct_owner);
1844                         lu_ref_print(&key->lct_reference);
1845                 }
1846         }
1847 }
1848 EXPORT_SYMBOL(lu_context_keys_dump);
1849 #else  /* !__KERNEL__ */
1850 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1851 {
1852         return 0;
1853 }
1854 #endif /* __KERNEL__ */
1855
1856 int  cl_global_init(void);
1857 void cl_global_fini(void);
1858 int  lu_ref_global_init(void);
1859 void lu_ref_global_fini(void);
1860
1861 int dt_global_init(void);
1862 void dt_global_fini(void);
1863
1864 int llo_global_init(void);
1865 void llo_global_fini(void);
1866
1867 /**
1868  * Initialization of global lu_* data.
1869  */
1870 int lu_global_init(void)
1871 {
1872         int result;
1873
1874         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1875
1876         result = lu_ref_global_init();
1877         if (result != 0)
1878                 return result;
1879
1880         LU_CONTEXT_KEY_INIT(&lu_global_key);
1881         result = lu_context_key_register(&lu_global_key);
1882         if (result != 0)
1883                 return result;
1884         /*
1885          * At this level, we don't know what tags are needed, so allocate them
1886          * conservatively. This should not be too bad, because this
1887          * environment is global.
1888          */
1889         cfs_down(&lu_sites_guard);
1890         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1891         cfs_up(&lu_sites_guard);
1892         if (result != 0)
1893                 return result;
1894
1895         /*
1896          * seeks estimation: 3 seeks to read a record from oi, one to read
1897          * inode, one for ea. Unfortunately setting this high value results in
1898          * lu_object/inode cache consuming all the memory.
1899          */
1900         lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1901         if (lu_site_shrinker == NULL)
1902                 return -ENOMEM;
1903
1904         result = lu_time_global_init();
1905         if (result)
1906                 GOTO(out, result);
1907
1908 #ifdef __KERNEL__
1909         result = dt_global_init();
1910         if (result)
1911                 GOTO(out, result);
1912
1913         result = llo_global_init();
1914         if (result)
1915                 GOTO(out, result);
1916 #endif
1917         result = cl_global_init();
1918 out:
1919
1920         return result;
1921 }
1922
1923 /**
1924  * Dual to lu_global_init().
1925  */
1926 void lu_global_fini(void)
1927 {
1928         cl_global_fini();
1929 #ifdef __KERNEL__
1930         llo_global_fini();
1931         dt_global_fini();
1932 #endif
1933         lu_time_global_fini();
1934         if (lu_site_shrinker != NULL) {
1935                 cfs_remove_shrinker(lu_site_shrinker);
1936                 lu_site_shrinker = NULL;
1937         }
1938
1939         lu_context_key_degister(&lu_global_key);
1940
1941         /*
1942          * Tear shrinker environment down _after_ de-registering
1943          * lu_global_key, because the latter has a value in the former.
1944          */
1945         cfs_down(&lu_sites_guard);
1946         lu_env_fini(&lu_shrink_env);
1947         cfs_up(&lu_sites_guard);
1948
1949         lu_ref_global_fini();
1950 }
1951
1952 struct lu_buf LU_BUF_NULL = {
1953         .lb_buf = NULL,
1954         .lb_len = 0
1955 };
1956 EXPORT_SYMBOL(LU_BUF_NULL);
1957
1958 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1959 {
1960 #ifdef LPROCFS
1961         struct lprocfs_counter ret;
1962
1963         lprocfs_stats_collect(stats, idx, &ret);
1964         return (__u32)ret.lc_count;
1965 #else
1966         return 0;
1967 #endif
1968 }
1969
1970 /**
1971  * Output site statistical counters into a buffer. Suitable for
1972  * lprocfs_rd_*()-style functions.
1973  */
1974 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1975 {
1976         lu_site_stats_t stats;
1977
1978         memset(&stats, 0, sizeof(stats));
1979         lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1980
1981         return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
1982                         stats.lss_busy,
1983                         stats.lss_total,
1984                         stats.lss_populated,
1985                         CFS_HASH_NHLIST(s->ls_obj_hash),
1986                         stats.lss_max_search,
1987                         ls_stats_read(s->ls_stats, LU_SS_CREATED),
1988                         ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1989                         ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1990                         ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1991                         ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
1992                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
1993 }
1994 EXPORT_SYMBOL(lu_site_stats_print);
1995
1996 const char *lu_time_names[LU_TIME_NR] = {
1997         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1998         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1999         [LU_TIME_FIND_INSERT] = "find_insert"
2000 };
2001 EXPORT_SYMBOL(lu_time_names);
2002
2003 /**
2004  * Helper function to initialize a number of kmem slab caches at once.
2005  */
2006 int lu_kmem_init(struct lu_kmem_descr *caches)
2007 {
2008         int result;
2009         struct lu_kmem_descr *iter = caches;
2010
2011         for (result = 0; iter->ckd_cache != NULL; ++iter) {
2012                 *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
2013                                                         iter->ckd_size,
2014                                                         0, 0);
2015                 if (*iter->ckd_cache == NULL) {
2016                         result = -ENOMEM;
2017                         /* free all previously allocated caches */
2018                         lu_kmem_fini(caches);
2019                         break;
2020                 }
2021         }
2022         return result;
2023 }
2024 EXPORT_SYMBOL(lu_kmem_init);
2025
2026 /**
2027  * Helper function to finalize a number of kmem slab cached at once. Dual to
2028  * lu_kmem_init().
2029  */
2030 void lu_kmem_fini(struct lu_kmem_descr *caches)
2031 {
2032         int rc;
2033
2034         for (; caches->ckd_cache != NULL; ++caches) {
2035                 if (*caches->ckd_cache != NULL) {
2036                         rc = cfs_mem_cache_destroy(*caches->ckd_cache);
2037                         LASSERTF(rc == 0, "couldn't destroy %s slab\n",
2038                                  caches->ckd_name);
2039                         *caches->ckd_cache = NULL;
2040                 }
2041         }
2042 }
2043 EXPORT_SYMBOL(lu_kmem_fini);