Whamcloud - gitweb
LU-911 obdapi: add env to few methods
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lu_object.c
37  *
38  * Lustre Object.
39  * These are the only exported functions, they provide some generic
40  * infrastructure for managing object devices
41  *
42  *   Author: Nikita Danilov <nikita.danilov@sun.com>
43  */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46 #ifndef EXPORT_SYMTAB
47 # define EXPORT_SYMTAB
48 #endif
49
50 #include <libcfs/libcfs.h>
51
52 #ifdef __KERNEL__
53 # include <linux/module.h>
54 #endif
55
56 /* hash_long() */
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
65 #include <lu_time.h>
66
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
68
69 /**
70  * Decrease reference counter on object. If last reference is freed, return
71  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72  * case, free object immediately.
73  */
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
75 {
76         struct lu_site_bkt_data *bkt;
77         struct lu_object_header *top;
78         struct lu_site          *site;
79         struct lu_object        *orig;
80         cfs_hash_bd_t            bd;
81
82         top  = o->lo_header;
83         site = o->lo_dev->ld_site;
84         orig = o;
85
86         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
87         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
88
89         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
90                 if (lu_object_is_dying(top)) {
91
92                         /*
93                          * somebody may be waiting for this, currently only
94                          * used for cl_object, see cl_object_put_last().
95                          */
96                         cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
97                 }
98                 return;
99         }
100
101         LASSERT(bkt->lsb_busy > 0);
102         bkt->lsb_busy--;
103         /*
104          * When last reference is released, iterate over object
105          * layers, and notify them that object is no longer busy.
106          */
107         cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
108                 if (o->lo_ops->loo_object_release != NULL)
109                         o->lo_ops->loo_object_release(env, o);
110         }
111
112         if (!lu_object_is_dying(top)) {
113                 LASSERT(cfs_list_empty(&top->loh_lru));
114                 cfs_list_add_tail(&top->loh_lru, &bkt->lsb_lru);
115                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
116                 return;
117         }
118
119         /*
120          * If object is dying (will not be cached), removed it
121          * from hash table and LRU.
122          *
123          * This is done with hash table and LRU lists locked. As the only
124          * way to acquire first reference to previously unreferenced
125          * object is through hash-table lookup (lu_object_find()),
126          * or LRU scanning (lu_site_purge()), that are done under hash-table
127          * and LRU lock, no race with concurrent object lookup is possible
128          * and we can safely destroy object below.
129          */
130         cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
131         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
132         /*
133          * Object was already removed from hash and lru above, can
134          * kill it.
135          */
136         lu_object_free(env, orig);
137 }
138 EXPORT_SYMBOL(lu_object_put);
139
140 /**
141  * Allocate new object.
142  *
143  * This follows object creation protocol, described in the comment within
144  * struct lu_device_operations definition.
145  */
146 static struct lu_object *lu_object_alloc(const struct lu_env *env,
147                                          struct lu_device *dev,
148                                          const struct lu_fid *f,
149                                          const struct lu_object_conf *conf)
150 {
151         struct lu_object *scan;
152         struct lu_object *top;
153         cfs_list_t *layers;
154         int clean;
155         int result;
156         ENTRY;
157
158         /*
159          * Create top-level object slice. This will also create
160          * lu_object_header.
161          */
162         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
163         if (top == NULL)
164                 RETURN(ERR_PTR(-ENOMEM));
165         /*
166          * This is the only place where object fid is assigned. It's constant
167          * after this point.
168          */
169         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
170         top->lo_header->loh_fid = *f;
171         layers = &top->lo_header->loh_layers;
172         do {
173                 /*
174                  * Call ->loo_object_init() repeatedly, until no more new
175                  * object slices are created.
176                  */
177                 clean = 1;
178                 cfs_list_for_each_entry(scan, layers, lo_linkage) {
179                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
180                                 continue;
181                         clean = 0;
182                         scan->lo_header = top->lo_header;
183                         result = scan->lo_ops->loo_object_init(env, scan, conf);
184                         if (result != 0) {
185                                 lu_object_free(env, top);
186                                 RETURN(ERR_PTR(result));
187                         }
188                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
189                 }
190         } while (!clean);
191
192         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
193                 if (scan->lo_ops->loo_object_start != NULL) {
194                         result = scan->lo_ops->loo_object_start(env, scan);
195                         if (result != 0) {
196                                 lu_object_free(env, top);
197                                 RETURN(ERR_PTR(result));
198                         }
199                 }
200         }
201
202         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
203         RETURN(top);
204 }
205
206 /**
207  * Free an object.
208  */
209 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
210 {
211         struct lu_site_bkt_data *bkt;
212         struct lu_site          *site;
213         struct lu_object        *scan;
214         cfs_list_t              *layers;
215         cfs_list_t               splice;
216
217         site   = o->lo_dev->ld_site;
218         layers = &o->lo_header->loh_layers;
219         bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
220         /*
221          * First call ->loo_object_delete() method to release all resources.
222          */
223         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
224                 if (scan->lo_ops->loo_object_delete != NULL)
225                         scan->lo_ops->loo_object_delete(env, scan);
226         }
227
228         /*
229          * Then, splice object layers into stand-alone list, and call
230          * ->loo_object_free() on all layers to free memory. Splice is
231          * necessary, because lu_object_header is freed together with the
232          * top-level slice.
233          */
234         CFS_INIT_LIST_HEAD(&splice);
235         cfs_list_splice_init(layers, &splice);
236         while (!cfs_list_empty(&splice)) {
237                 /*
238                  * Free layers in bottom-to-top order, so that object header
239                  * lives as long as possible and ->loo_object_free() methods
240                  * can look at its contents.
241                  */
242                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
243                 cfs_list_del_init(&o->lo_linkage);
244                 LASSERT(o->lo_ops->loo_object_free != NULL);
245                 o->lo_ops->loo_object_free(env, o);
246         }
247
248         if (cfs_waitq_active(&bkt->lsb_marche_funebre))
249                 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
250 }
251
252 /**
253  * Free \a nr objects from the cold end of the site LRU list.
254  */
255 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
256 {
257         struct lu_object_header *h;
258         struct lu_object_header *temp;
259         struct lu_site_bkt_data *bkt;
260         cfs_hash_bd_t            bd;
261         cfs_hash_bd_t            bd2;
262         cfs_list_t               dispose;
263         int                      did_sth;
264         int                      start;
265         int                      count;
266         int                      bnr;
267         int                      i;
268
269         CFS_INIT_LIST_HEAD(&dispose);
270         /*
271          * Under LRU list lock, scan LRU list and move unreferenced objects to
272          * the dispose list, removing them from LRU and hash table.
273          */
274         start = s->ls_purge_start;
275         bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
276  again:
277         did_sth = 0;
278         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
279                 if (i < start)
280                         continue;
281                 count = bnr;
282                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
283                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
284
285                 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
286                         LASSERT(cfs_atomic_read(&h->loh_ref) == 0);
287
288                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
289                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
290
291                         cfs_hash_bd_del_locked(s->ls_obj_hash,
292                                                &bd2, &h->loh_hash);
293                         cfs_list_move(&h->loh_lru, &dispose);
294                         if (did_sth == 0)
295                                 did_sth = 1;
296
297                         if (nr != ~0 && --nr == 0)
298                                 break;
299
300                         if (count > 0 && --count == 0)
301                                 break;
302
303                 }
304                 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
305                 cfs_cond_resched();
306                 /*
307                  * Free everything on the dispose list. This is safe against
308                  * races due to the reasons described in lu_object_put().
309                  */
310                 while (!cfs_list_empty(&dispose)) {
311                         h = container_of0(dispose.next,
312                                           struct lu_object_header, loh_lru);
313                         cfs_list_del_init(&h->loh_lru);
314                         lu_object_free(env, lu_object_top(h));
315                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
316                 }
317
318                 if (nr == 0)
319                         break;
320         }
321
322         if (nr != 0 && did_sth && start != 0) {
323                 start = 0; /* restart from the first bucket */
324                 goto again;
325         }
326         /* race on s->ls_purge_start, but nobody cares */
327         s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
328
329         return nr;
330 }
331 EXPORT_SYMBOL(lu_site_purge);
332
333 /*
334  * Object printing.
335  *
336  * Code below has to jump through certain loops to output object description
337  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
338  * composes object description from strings that are parts of _lines_ of
339  * output (i.e., strings that are not terminated by newline). This doesn't fit
340  * very well into libcfs_debug_msg() interface that assumes that each message
341  * supplied to it is a self-contained output line.
342  *
343  * To work around this, strings are collected in a temporary buffer
344  * (implemented as a value of lu_cdebug_key key), until terminating newline
345  * character is detected.
346  *
347  */
348
349 enum {
350         /**
351          * Maximal line size.
352          *
353          * XXX overflow is not handled correctly.
354          */
355         LU_CDEBUG_LINE = 512
356 };
357
358 struct lu_cdebug_data {
359         /**
360          * Temporary buffer.
361          */
362         char lck_area[LU_CDEBUG_LINE];
363 };
364
365 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
366 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
367
368 /**
369  * Key, holding temporary buffer. This key is registered very early by
370  * lu_global_init().
371  */
372 struct lu_context_key lu_global_key = {
373         .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
374                     LCT_MG_THREAD | LCT_CL_THREAD,
375         .lct_init = lu_global_key_init,
376         .lct_fini = lu_global_key_fini
377 };
378
379 /**
380  * Printer function emitting messages through libcfs_debug_msg().
381  */
382 int lu_cdebug_printer(const struct lu_env *env,
383                       void *cookie, const char *format, ...)
384 {
385         struct libcfs_debug_msg_data *msgdata = cookie;
386         struct lu_cdebug_data        *key;
387         int used;
388         int complete;
389         va_list args;
390
391         va_start(args, format);
392
393         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
394         LASSERT(key != NULL);
395
396         used = strlen(key->lck_area);
397         complete = format[strlen(format) - 1] == '\n';
398         /*
399          * Append new chunk to the buffer.
400          */
401         vsnprintf(key->lck_area + used,
402                   ARRAY_SIZE(key->lck_area) - used, format, args);
403         if (complete) {
404                 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
405                         libcfs_debug_msg(msgdata, "%s", key->lck_area);
406                 key->lck_area[0] = 0;
407         }
408         va_end(args);
409         return 0;
410 }
411 EXPORT_SYMBOL(lu_cdebug_printer);
412
413 /**
414  * Print object header.
415  */
416 void lu_object_header_print(const struct lu_env *env, void *cookie,
417                             lu_printer_t printer,
418                             const struct lu_object_header *hdr)
419 {
420         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
421                    hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
422                    PFID(&hdr->loh_fid),
423                    cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
424                    cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
425                    "" : " lru",
426                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
427 }
428 EXPORT_SYMBOL(lu_object_header_print);
429
430 /**
431  * Print human readable representation of the \a o to the \a printer.
432  */
433 void lu_object_print(const struct lu_env *env, void *cookie,
434                      lu_printer_t printer, const struct lu_object *o)
435 {
436         static const char ruler[] = "........................................";
437         struct lu_object_header *top;
438         int depth;
439
440         top = o->lo_header;
441         lu_object_header_print(env, cookie, printer, top);
442         (*printer)(env, cookie, "{ \n");
443         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
444                 depth = o->lo_depth + 4;
445
446                 /*
447                  * print `.' \a depth times followed by type name and address
448                  */
449                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
450                            o->lo_dev->ld_type->ldt_name, o);
451                 if (o->lo_ops->loo_object_print != NULL)
452                         o->lo_ops->loo_object_print(env, cookie, printer, o);
453                 (*printer)(env, cookie, "\n");
454         }
455         (*printer)(env, cookie, "} header@%p\n", top);
456 }
457 EXPORT_SYMBOL(lu_object_print);
458
459 /**
460  * Check object consistency.
461  */
462 int lu_object_invariant(const struct lu_object *o)
463 {
464         struct lu_object_header *top;
465
466         top = o->lo_header;
467         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
468                 if (o->lo_ops->loo_object_invariant != NULL &&
469                     !o->lo_ops->loo_object_invariant(o))
470                         return 0;
471         }
472         return 1;
473 }
474 EXPORT_SYMBOL(lu_object_invariant);
475
476 static struct lu_object *htable_lookup(struct lu_site *s,
477                                        cfs_hash_bd_t *bd,
478                                        const struct lu_fid *f,
479                                        cfs_waitlink_t *waiter,
480                                        __u64 *version)
481 {
482         struct lu_site_bkt_data *bkt;
483         struct lu_object_header *h;
484         cfs_hlist_node_t        *hnode;
485         __u64  ver = cfs_hash_bd_version_get(bd);
486
487         if (*version == ver)
488                 return NULL;
489
490         *version = ver;
491         bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
492         /* cfs_hash_bd_lookup_intent is a somehow "internal" function
493          * of cfs_hash, but we don't want refcount on object right now */
494         hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
495         if (hnode == NULL) {
496                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
497                 return NULL;
498         }
499
500         h = container_of0(hnode, struct lu_object_header, loh_hash);
501         if (likely(!lu_object_is_dying(h))) {
502                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
503                 cfs_list_del_init(&h->loh_lru);
504                 return lu_object_top(h);
505         }
506
507         /*
508          * Lookup found an object being destroyed this object cannot be
509          * returned (to assure that references to dying objects are eventually
510          * drained), and moreover, lookup has to wait until object is freed.
511          */
512         cfs_atomic_dec(&h->loh_ref);
513
514         cfs_waitlink_init(waiter);
515         cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
516         cfs_set_current_state(CFS_TASK_UNINT);
517         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
518         return ERR_PTR(-EAGAIN);
519 }
520
521 /**
522  * Search cache for an object with the fid \a f. If such object is found,
523  * return it. Otherwise, create new object, insert it into cache and return
524  * it. In any case, additional reference is acquired on the returned object.
525  */
526 struct lu_object *lu_object_find(const struct lu_env *env,
527                                  struct lu_device *dev, const struct lu_fid *f,
528                                  const struct lu_object_conf *conf)
529 {
530         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
531 }
532 EXPORT_SYMBOL(lu_object_find);
533
534 static struct lu_object *lu_object_new(const struct lu_env *env,
535                                        struct lu_device *dev,
536                                        const struct lu_fid *f,
537                                        const struct lu_object_conf *conf)
538 {
539         struct lu_object        *o;
540         cfs_hash_t              *hs;
541         cfs_hash_bd_t            bd;
542         struct lu_site_bkt_data *bkt;
543
544         o = lu_object_alloc(env, dev, f, conf);
545         if (unlikely(IS_ERR(o)))
546                 return o;
547
548         hs = dev->ld_site->ls_obj_hash;
549         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
550         bkt = cfs_hash_bd_extra_get(hs, &bd);
551         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
552         bkt->lsb_busy++;
553         cfs_hash_bd_unlock(hs, &bd, 1);
554         return o;
555 }
556
557 /**
558  * Core logic of lu_object_find*() functions.
559  */
560 static struct lu_object *lu_object_find_try(const struct lu_env *env,
561                                             struct lu_device *dev,
562                                             const struct lu_fid *f,
563                                             const struct lu_object_conf *conf,
564                                             cfs_waitlink_t *waiter)
565 {
566         struct lu_object      *o;
567         struct lu_object      *shadow;
568         struct lu_site        *s;
569         cfs_hash_t            *hs;
570         cfs_hash_bd_t          bd;
571         __u64                  version = 0;
572
573         /*
574          * This uses standard index maintenance protocol:
575          *
576          *     - search index under lock, and return object if found;
577          *     - otherwise, unlock index, allocate new object;
578          *     - lock index and search again;
579          *     - if nothing is found (usual case), insert newly created
580          *       object into index;
581          *     - otherwise (race: other thread inserted object), free
582          *       object just allocated.
583          *     - unlock index;
584          *     - return object.
585          *
586          * For "LOC_F_NEW" case, we are sure the object is new established.
587          * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
588          * just alloc and insert directly.
589          *
590          * If dying object is found during index search, add @waiter to the
591          * site wait-queue and return ERR_PTR(-EAGAIN).
592          */
593         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
594                 return lu_object_new(env, dev, f, conf);
595
596         s  = dev->ld_site;
597         hs = s->ls_obj_hash;
598         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
599         o = htable_lookup(s, &bd, f, waiter, &version);
600         cfs_hash_bd_unlock(hs, &bd, 1);
601         if (o != NULL)
602                 return o;
603
604         /*
605          * Allocate new object. This may result in rather complicated
606          * operations, including fld queries, inode loading, etc.
607          */
608         o = lu_object_alloc(env, dev, f, conf);
609         if (unlikely(IS_ERR(o)))
610                 return o;
611
612         LASSERT(lu_fid_eq(lu_object_fid(o), f));
613
614         cfs_hash_bd_lock(hs, &bd, 1);
615
616         shadow = htable_lookup(s, &bd, f, waiter, &version);
617         if (likely(shadow == NULL)) {
618                 struct lu_site_bkt_data *bkt;
619
620                 bkt = cfs_hash_bd_extra_get(hs, &bd);
621                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
622                 bkt->lsb_busy++;
623                 cfs_hash_bd_unlock(hs, &bd, 1);
624                 return o;
625         }
626
627         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
628         cfs_hash_bd_unlock(hs, &bd, 1);
629         lu_object_free(env, o);
630         return shadow;
631 }
632
633 /**
634  * Much like lu_object_find(), but top level device of object is specifically
635  * \a dev rather than top level device of the site. This interface allows
636  * objects of different "stacking" to be created within the same site.
637  */
638 struct lu_object *lu_object_find_at(const struct lu_env *env,
639                                     struct lu_device *dev,
640                                     const struct lu_fid *f,
641                                     const struct lu_object_conf *conf)
642 {
643         struct lu_site_bkt_data *bkt;
644         struct lu_object        *obj;
645         cfs_waitlink_t           wait;
646
647         while (1) {
648                 obj = lu_object_find_try(env, dev, f, conf, &wait);
649                 if (obj != ERR_PTR(-EAGAIN))
650                         return obj;
651                 /*
652                  * lu_object_find_try() already added waiter into the
653                  * wait queue.
654                  */
655                 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
656                 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
657                 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
658         }
659 }
660 EXPORT_SYMBOL(lu_object_find_at);
661
662 /**
663  * Find object with given fid, and return its slice belonging to given device.
664  */
665 struct lu_object *lu_object_find_slice(const struct lu_env *env,
666                                        struct lu_device *dev,
667                                        const struct lu_fid *f,
668                                        const struct lu_object_conf *conf)
669 {
670         struct lu_object *top;
671         struct lu_object *obj;
672
673         top = lu_object_find(env, dev, f, conf);
674         if (!IS_ERR(top)) {
675                 obj = lu_object_locate(top->lo_header, dev->ld_type);
676                 if (obj == NULL)
677                         lu_object_put(env, top);
678         } else
679                 obj = top;
680         return obj;
681 }
682 EXPORT_SYMBOL(lu_object_find_slice);
683
684 /**
685  * Global list of all device types.
686  */
687 static CFS_LIST_HEAD(lu_device_types);
688
689 int lu_device_type_init(struct lu_device_type *ldt)
690 {
691         int result;
692
693         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
694         result = ldt->ldt_ops->ldto_init(ldt);
695         if (result == 0)
696                 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
697         return result;
698 }
699 EXPORT_SYMBOL(lu_device_type_init);
700
701 void lu_device_type_fini(struct lu_device_type *ldt)
702 {
703         cfs_list_del_init(&ldt->ldt_linkage);
704         ldt->ldt_ops->ldto_fini(ldt);
705 }
706 EXPORT_SYMBOL(lu_device_type_fini);
707
708 void lu_types_stop(void)
709 {
710         struct lu_device_type *ldt;
711
712         cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
713                 if (ldt->ldt_device_nr == 0)
714                         ldt->ldt_ops->ldto_stop(ldt);
715         }
716 }
717 EXPORT_SYMBOL(lu_types_stop);
718
719 /**
720  * Global list of all sites on this node
721  */
722 static CFS_LIST_HEAD(lu_sites);
723 static CFS_DEFINE_MUTEX(lu_sites_guard);
724
725 /**
726  * Global environment used by site shrinker.
727  */
728 static struct lu_env lu_shrink_env;
729
730 struct lu_site_print_arg {
731         struct lu_env   *lsp_env;
732         void            *lsp_cookie;
733         lu_printer_t     lsp_printer;
734 };
735
736 static int
737 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
738                   cfs_hlist_node_t *hnode, void *data)
739 {
740         struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
741         struct lu_object_header  *h;
742
743         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
744         if (!cfs_list_empty(&h->loh_layers)) {
745                 const struct lu_object *o;
746
747                 o = lu_object_top(h);
748                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
749                                 arg->lsp_printer, o);
750         } else {
751                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
752                                        arg->lsp_printer, h);
753         }
754         return 0;
755 }
756
757 /**
758  * Print all objects in \a s.
759  */
760 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
761                    lu_printer_t printer)
762 {
763         struct lu_site_print_arg arg = {
764                 .lsp_env     = (struct lu_env *)env,
765                 .lsp_cookie  = cookie,
766                 .lsp_printer = printer,
767         };
768
769         cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
770 }
771 EXPORT_SYMBOL(lu_site_print);
772
773 enum {
774         LU_CACHE_PERCENT_MAX     = 50,
775         LU_CACHE_PERCENT_DEFAULT = 20
776 };
777
778 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
779 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
780                 "Percentage of memory to be used as lu_object cache");
781
782 /**
783  * Return desired hash table order.
784  */
785 static int lu_htable_order(void)
786 {
787         unsigned long cache_size;
788         int bits;
789
790         /*
791          * Calculate hash table size, assuming that we want reasonable
792          * performance when 20% of total memory is occupied by cache of
793          * lu_objects.
794          *
795          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
796          */
797         cache_size = cfs_num_physpages;
798
799 #if BITS_PER_LONG == 32
800         /* limit hashtable size for lowmem systems to low RAM */
801         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
802                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
803 #endif
804
805         /* clear off unreasonable cache setting. */
806         if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
807                 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
808                       " the range of (0, %u]. Will use default value: %u.\n",
809                       lu_cache_percent, LU_CACHE_PERCENT_MAX,
810                       LU_CACHE_PERCENT_DEFAULT);
811
812                 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
813         }
814         cache_size = cache_size / 100 * lu_cache_percent *
815                 (CFS_PAGE_SIZE / 1024);
816
817         for (bits = 1; (1 << bits) < cache_size; ++bits) {
818                 ;
819         }
820         return bits;
821 }
822
823 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
824                                 const void *key, unsigned mask)
825 {
826         struct lu_fid  *fid = (struct lu_fid *)key;
827         __u32           hash;
828
829         hash = fid_flatten32(fid);
830         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
831         hash = cfs_hash_long(hash, hs->hs_bkt_bits);
832
833         /* give me another random factor */
834         hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
835
836         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
837         hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
838
839         return hash & mask;
840 }
841
842 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
843 {
844         return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
845 }
846
847 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
848 {
849         struct lu_object_header *h;
850
851         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
852         return &h->loh_fid;
853 }
854
855 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
856 {
857         struct lu_object_header *h;
858
859         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
860         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
861 }
862
863 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
864 {
865         struct lu_object_header *h;
866
867         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
868         if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
869                 struct lu_site_bkt_data *bkt;
870                 cfs_hash_bd_t            bd;
871
872                 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
873                 bkt = cfs_hash_bd_extra_get(hs, &bd);
874                 bkt->lsb_busy++;
875         }
876 }
877
878 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
879 {
880         LBUG(); /* we should never called it */
881 }
882
883 cfs_hash_ops_t lu_site_hash_ops = {
884         .hs_hash        = lu_obj_hop_hash,
885         .hs_key         = lu_obj_hop_key,
886         .hs_keycmp      = lu_obj_hop_keycmp,
887         .hs_object      = lu_obj_hop_object,
888         .hs_get         = lu_obj_hop_get,
889         .hs_put_locked  = lu_obj_hop_put_locked,
890 };
891
892 /**
893  * Initialize site \a s, with \a d as the top level device.
894  */
895 #define LU_SITE_BITS_MIN    12
896 #define LU_SITE_BITS_MAX    24
897 /**
898  * total 256 buckets, we don't want too many buckets because:
899  * - consume too much memory
900  * - avoid unbalanced LRU list
901  */
902 #define LU_SITE_BKT_BITS    8
903
904 int lu_site_init(struct lu_site *s, struct lu_device *top)
905 {
906         struct lu_site_bkt_data *bkt;
907         cfs_hash_bd_t bd;
908         char name[16];
909         int bits;
910         int i;
911         ENTRY;
912
913         memset(s, 0, sizeof *s);
914         bits = lu_htable_order();
915         snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
916         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
917              bits >= LU_SITE_BITS_MIN; bits--) {
918                 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
919                                                  bits - LU_SITE_BKT_BITS,
920                                                  sizeof(*bkt), 0, 0,
921                                                  &lu_site_hash_ops,
922                                                  CFS_HASH_SPIN_BKTLOCK |
923                                                  CFS_HASH_NO_ITEMREF |
924                                                  CFS_HASH_DEPTH |
925                                                  CFS_HASH_ASSERT_EMPTY);
926                 if (s->ls_obj_hash != NULL)
927                         break;
928         }
929
930         if (s->ls_obj_hash == NULL) {
931                 CERROR("failed to create lu_site hash with bits: %d\n", bits);
932                 return -ENOMEM;
933         }
934
935         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
936                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
937                 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
938                 cfs_waitq_init(&bkt->lsb_marche_funebre);
939         }
940
941         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
942         if (s->ls_stats == NULL) {
943                 cfs_hash_putref(s->ls_obj_hash);
944                 s->ls_obj_hash = NULL;
945                 return -ENOMEM;
946         }
947
948         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
949                              0, "created", "created");
950         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
951                              0, "cache_hit", "cache_hit");
952         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
953                              0, "cache_miss", "cache_miss");
954         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
955                              0, "cache_race", "cache_race");
956         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
957                              0, "cache_death_race", "cache_death_race");
958         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
959                              0, "lru_purged", "lru_purged");
960
961         CFS_INIT_LIST_HEAD(&s->ls_linkage);
962         s->ls_top_dev = top;
963         top->ld_site = s;
964         lu_device_get(top);
965         lu_ref_add(&top->ld_reference, "site-top", s);
966
967         CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
968         cfs_spin_lock_init(&s->ls_ld_lock);
969
970         cfs_spin_lock(&s->ls_ld_lock);
971         cfs_list_add(&top->ld_linkage, &s->ls_ld_linkage);
972         cfs_spin_unlock(&s->ls_ld_lock);
973
974         RETURN(0);
975 }
976 EXPORT_SYMBOL(lu_site_init);
977
978 /**
979  * Finalize \a s and release its resources.
980  */
981 void lu_site_fini(struct lu_site *s)
982 {
983         cfs_mutex_lock(&lu_sites_guard);
984         cfs_list_del_init(&s->ls_linkage);
985         cfs_mutex_unlock(&lu_sites_guard);
986
987         if (s->ls_obj_hash != NULL) {
988                 cfs_hash_putref(s->ls_obj_hash);
989                 s->ls_obj_hash = NULL;
990         }
991
992         if (s->ls_top_dev != NULL) {
993                 s->ls_top_dev->ld_site = NULL;
994                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
995                 lu_device_put(s->ls_top_dev);
996                 s->ls_top_dev = NULL;
997         }
998
999         if (s->ls_stats != NULL)
1000                 lprocfs_free_stats(&s->ls_stats);
1001 }
1002 EXPORT_SYMBOL(lu_site_fini);
1003
1004 /**
1005  * Called when initialization of stack for this site is completed.
1006  */
1007 int lu_site_init_finish(struct lu_site *s)
1008 {
1009         int result;
1010         cfs_mutex_lock(&lu_sites_guard);
1011         result = lu_context_refill(&lu_shrink_env.le_ctx);
1012         if (result == 0)
1013                 cfs_list_add(&s->ls_linkage, &lu_sites);
1014         cfs_mutex_unlock(&lu_sites_guard);
1015         return result;
1016 }
1017 EXPORT_SYMBOL(lu_site_init_finish);
1018
1019 /**
1020  * Acquire additional reference on device \a d
1021  */
1022 void lu_device_get(struct lu_device *d)
1023 {
1024         cfs_atomic_inc(&d->ld_ref);
1025 }
1026 EXPORT_SYMBOL(lu_device_get);
1027
1028 /**
1029  * Release reference on device \a d.
1030  */
1031 void lu_device_put(struct lu_device *d)
1032 {
1033         LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1034         cfs_atomic_dec(&d->ld_ref);
1035 }
1036 EXPORT_SYMBOL(lu_device_put);
1037
1038 /**
1039  * Initialize device \a d of type \a t.
1040  */
1041 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1042 {
1043         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1044                 t->ldt_ops->ldto_start(t);
1045         memset(d, 0, sizeof *d);
1046         cfs_atomic_set(&d->ld_ref, 0);
1047         d->ld_type = t;
1048         lu_ref_init(&d->ld_reference);
1049         CFS_INIT_LIST_HEAD(&d->ld_linkage);
1050         return 0;
1051 }
1052 EXPORT_SYMBOL(lu_device_init);
1053
1054 /**
1055  * Finalize device \a d.
1056  */
1057 void lu_device_fini(struct lu_device *d)
1058 {
1059         struct lu_device_type *t;
1060
1061         t = d->ld_type;
1062         if (d->ld_obd != NULL) {
1063                 d->ld_obd->obd_lu_dev = NULL;
1064                 d->ld_obd = NULL;
1065         }
1066
1067         lu_ref_fini(&d->ld_reference);
1068         LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1069                  "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1070         LASSERT(t->ldt_device_nr > 0);
1071         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1072                 t->ldt_ops->ldto_stop(t);
1073 }
1074 EXPORT_SYMBOL(lu_device_fini);
1075
1076 /**
1077  * Initialize object \a o that is part of compound object \a h and was created
1078  * by device \a d.
1079  */
1080 int lu_object_init(struct lu_object *o,
1081                    struct lu_object_header *h, struct lu_device *d)
1082 {
1083         memset(o, 0, sizeof *o);
1084         o->lo_header = h;
1085         o->lo_dev    = d;
1086         lu_device_get(d);
1087         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1088         CFS_INIT_LIST_HEAD(&o->lo_linkage);
1089         return 0;
1090 }
1091 EXPORT_SYMBOL(lu_object_init);
1092
1093 /**
1094  * Finalize object and release its resources.
1095  */
1096 void lu_object_fini(struct lu_object *o)
1097 {
1098         struct lu_device *dev = o->lo_dev;
1099
1100         LASSERT(cfs_list_empty(&o->lo_linkage));
1101
1102         if (dev != NULL) {
1103                 lu_ref_del_at(&dev->ld_reference,
1104                               o->lo_dev_ref , "lu_object", o);
1105                 lu_device_put(dev);
1106                 o->lo_dev = NULL;
1107         }
1108 }
1109 EXPORT_SYMBOL(lu_object_fini);
1110
1111 /**
1112  * Add object \a o as first layer of compound object \a h
1113  *
1114  * This is typically called by the ->ldo_object_alloc() method of top-level
1115  * device.
1116  */
1117 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1118 {
1119         cfs_list_move(&o->lo_linkage, &h->loh_layers);
1120 }
1121 EXPORT_SYMBOL(lu_object_add_top);
1122
1123 /**
1124  * Add object \a o as a layer of compound object, going after \a before.
1125  *
1126  * This is typically called by the ->ldo_object_alloc() method of \a
1127  * before->lo_dev.
1128  */
1129 void lu_object_add(struct lu_object *before, struct lu_object *o)
1130 {
1131         cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1132 }
1133 EXPORT_SYMBOL(lu_object_add);
1134
1135 /**
1136  * Initialize compound object.
1137  */
1138 int lu_object_header_init(struct lu_object_header *h)
1139 {
1140         memset(h, 0, sizeof *h);
1141         cfs_atomic_set(&h->loh_ref, 1);
1142         CFS_INIT_HLIST_NODE(&h->loh_hash);
1143         CFS_INIT_LIST_HEAD(&h->loh_lru);
1144         CFS_INIT_LIST_HEAD(&h->loh_layers);
1145         lu_ref_init(&h->loh_reference);
1146         return 0;
1147 }
1148 EXPORT_SYMBOL(lu_object_header_init);
1149
1150 /**
1151  * Finalize compound object.
1152  */
1153 void lu_object_header_fini(struct lu_object_header *h)
1154 {
1155         LASSERT(cfs_list_empty(&h->loh_layers));
1156         LASSERT(cfs_list_empty(&h->loh_lru));
1157         LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1158         lu_ref_fini(&h->loh_reference);
1159 }
1160 EXPORT_SYMBOL(lu_object_header_fini);
1161
1162 /**
1163  * Given a compound object, find its slice, corresponding to the device type
1164  * \a dtype.
1165  */
1166 struct lu_object *lu_object_locate(struct lu_object_header *h,
1167                                    const struct lu_device_type *dtype)
1168 {
1169         struct lu_object *o;
1170
1171         cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1172                 if (o->lo_dev->ld_type == dtype)
1173                         return o;
1174         }
1175         return NULL;
1176 }
1177 EXPORT_SYMBOL(lu_object_locate);
1178
1179
1180
1181 /**
1182  * Finalize and free devices in the device stack.
1183  *
1184  * Finalize device stack by purging object cache, and calling
1185  * lu_device_type_operations::ldto_device_fini() and
1186  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1187  */
1188 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1189 {
1190         struct lu_site   *site = top->ld_site;
1191         struct lu_device *scan;
1192         struct lu_device *next;
1193
1194         lu_site_purge(env, site, ~0);
1195         for (scan = top; scan != NULL; scan = next) {
1196                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1197                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1198                 lu_device_put(scan);
1199         }
1200
1201         /* purge again. */
1202         lu_site_purge(env, site, ~0);
1203
1204         if (!cfs_hash_is_empty(site->ls_obj_hash)) {
1205                 /*
1206                  * Uh-oh, objects still exist.
1207                  */
1208                 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
1209
1210                 lu_site_print(env, site, &msgdata, lu_cdebug_printer);
1211         }
1212
1213         for (scan = top; scan != NULL; scan = next) {
1214                 const struct lu_device_type *ldt = scan->ld_type;
1215                 struct obd_type             *type;
1216
1217                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1218                 type = ldt->ldt_obd_type;
1219                 if (type != NULL) {
1220                         type->typ_refcnt--;
1221                         class_put_type(type);
1222                 }
1223         }
1224 }
1225 EXPORT_SYMBOL(lu_stack_fini);
1226
1227 enum {
1228         /**
1229          * Maximal number of tld slots.
1230          */
1231         LU_CONTEXT_KEY_NR = 32
1232 };
1233
1234 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1235
1236 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1237
1238 /**
1239  * Global counter incremented whenever key is registered, unregistered,
1240  * revived or quiesced. This is used to void unnecessary calls to
1241  * lu_context_refill(). No locking is provided, as initialization and shutdown
1242  * are supposed to be externally serialized.
1243  */
1244 static unsigned key_set_version = 0;
1245
1246 /**
1247  * Register new key.
1248  */
1249 int lu_context_key_register(struct lu_context_key *key)
1250 {
1251         int result;
1252         int i;
1253
1254         LASSERT(key->lct_init != NULL);
1255         LASSERT(key->lct_fini != NULL);
1256         LASSERT(key->lct_tags != 0);
1257         LASSERT(key->lct_owner != NULL);
1258
1259         result = -ENFILE;
1260         cfs_spin_lock(&lu_keys_guard);
1261         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1262                 if (lu_keys[i] == NULL) {
1263                         key->lct_index = i;
1264                         cfs_atomic_set(&key->lct_used, 1);
1265                         lu_keys[i] = key;
1266                         lu_ref_init(&key->lct_reference);
1267                         result = 0;
1268                         ++key_set_version;
1269                         break;
1270                 }
1271         }
1272         cfs_spin_unlock(&lu_keys_guard);
1273         return result;
1274 }
1275 EXPORT_SYMBOL(lu_context_key_register);
1276
1277 static void key_fini(struct lu_context *ctx, int index)
1278 {
1279         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1280                 struct lu_context_key *key;
1281
1282                 key = lu_keys[index];
1283                 LASSERT(key != NULL);
1284                 LASSERT(key->lct_fini != NULL);
1285                 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1286
1287                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1288                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1289                 cfs_atomic_dec(&key->lct_used);
1290                 LASSERT(key->lct_owner != NULL);
1291                 if (!(ctx->lc_tags & LCT_NOREF)) {
1292                         LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1293                         cfs_module_put(key->lct_owner);
1294                 }
1295                 ctx->lc_value[index] = NULL;
1296         }
1297 }
1298
1299 /**
1300  * Deregister key.
1301  */
1302 void lu_context_key_degister(struct lu_context_key *key)
1303 {
1304         LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1305         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1306
1307         lu_context_key_quiesce(key);
1308
1309         ++key_set_version;
1310         cfs_spin_lock(&lu_keys_guard);
1311         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1312         if (lu_keys[key->lct_index]) {
1313                 lu_keys[key->lct_index] = NULL;
1314                 lu_ref_fini(&key->lct_reference);
1315         }
1316         cfs_spin_unlock(&lu_keys_guard);
1317
1318         LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1319                  "key has instances: %d\n",
1320                  cfs_atomic_read(&key->lct_used));
1321 }
1322 EXPORT_SYMBOL(lu_context_key_degister);
1323
1324 /**
1325  * Register a number of keys. This has to be called after all keys have been
1326  * initialized by a call to LU_CONTEXT_KEY_INIT().
1327  */
1328 int lu_context_key_register_many(struct lu_context_key *k, ...)
1329 {
1330         struct lu_context_key *key = k;
1331         va_list args;
1332         int result;
1333
1334         va_start(args, k);
1335         do {
1336                 result = lu_context_key_register(key);
1337                 if (result)
1338                         break;
1339                 key = va_arg(args, struct lu_context_key *);
1340         } while (key != NULL);
1341         va_end(args);
1342
1343         if (result != 0) {
1344                 va_start(args, k);
1345                 while (k != key) {
1346                         lu_context_key_degister(k);
1347                         k = va_arg(args, struct lu_context_key *);
1348                 }
1349                 va_end(args);
1350         }
1351
1352         return result;
1353 }
1354 EXPORT_SYMBOL(lu_context_key_register_many);
1355
1356 /**
1357  * De-register a number of keys. This is a dual to
1358  * lu_context_key_register_many().
1359  */
1360 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1361 {
1362         va_list args;
1363
1364         va_start(args, k);
1365         do {
1366                 lu_context_key_degister(k);
1367                 k = va_arg(args, struct lu_context_key*);
1368         } while (k != NULL);
1369         va_end(args);
1370 }
1371 EXPORT_SYMBOL(lu_context_key_degister_many);
1372
1373 /**
1374  * Revive a number of keys.
1375  */
1376 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1377 {
1378         va_list args;
1379
1380         va_start(args, k);
1381         do {
1382                 lu_context_key_revive(k);
1383                 k = va_arg(args, struct lu_context_key*);
1384         } while (k != NULL);
1385         va_end(args);
1386 }
1387 EXPORT_SYMBOL(lu_context_key_revive_many);
1388
1389 /**
1390  * Quiescent a number of keys.
1391  */
1392 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1393 {
1394         va_list args;
1395
1396         va_start(args, k);
1397         do {
1398                 lu_context_key_quiesce(k);
1399                 k = va_arg(args, struct lu_context_key*);
1400         } while (k != NULL);
1401         va_end(args);
1402 }
1403 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1404
1405 /**
1406  * Return value associated with key \a key in context \a ctx.
1407  */
1408 void *lu_context_key_get(const struct lu_context *ctx,
1409                          const struct lu_context_key *key)
1410 {
1411         LINVRNT(ctx->lc_state == LCS_ENTERED);
1412         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1413         LASSERT(lu_keys[key->lct_index] == key);
1414         return ctx->lc_value[key->lct_index];
1415 }
1416 EXPORT_SYMBOL(lu_context_key_get);
1417
1418 /**
1419  * List of remembered contexts. XXX document me.
1420  */
1421 static CFS_LIST_HEAD(lu_context_remembered);
1422
1423 /**
1424  * Destroy \a key in all remembered contexts. This is used to destroy key
1425  * values in "shared" contexts (like service threads), when a module owning
1426  * the key is about to be unloaded.
1427  */
1428 void lu_context_key_quiesce(struct lu_context_key *key)
1429 {
1430         struct lu_context *ctx;
1431         extern unsigned cl_env_cache_purge(unsigned nr);
1432
1433         if (!(key->lct_tags & LCT_QUIESCENT)) {
1434                 /*
1435                  * XXX layering violation.
1436                  */
1437                 cl_env_cache_purge(~0);
1438                 key->lct_tags |= LCT_QUIESCENT;
1439                 /*
1440                  * XXX memory barrier has to go here.
1441                  */
1442                 cfs_spin_lock(&lu_keys_guard);
1443                 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1444                                         lc_remember)
1445                         key_fini(ctx, key->lct_index);
1446                 cfs_spin_unlock(&lu_keys_guard);
1447                 ++key_set_version;
1448         }
1449 }
1450 EXPORT_SYMBOL(lu_context_key_quiesce);
1451
1452 void lu_context_key_revive(struct lu_context_key *key)
1453 {
1454         key->lct_tags &= ~LCT_QUIESCENT;
1455         ++key_set_version;
1456 }
1457 EXPORT_SYMBOL(lu_context_key_revive);
1458
1459 static void keys_fini(struct lu_context *ctx)
1460 {
1461         int i;
1462
1463         cfs_spin_lock(&lu_keys_guard);
1464         if (ctx->lc_value != NULL) {
1465                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1466                         key_fini(ctx, i);
1467                 OBD_FREE(ctx->lc_value,
1468                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1469                 ctx->lc_value = NULL;
1470         }
1471         cfs_spin_unlock(&lu_keys_guard);
1472 }
1473
1474 static int keys_fill(struct lu_context *ctx)
1475 {
1476         int i;
1477
1478         LINVRNT(ctx->lc_value != NULL);
1479         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1480                 struct lu_context_key *key;
1481
1482                 key = lu_keys[i];
1483                 if (ctx->lc_value[i] == NULL && key != NULL &&
1484                     (key->lct_tags & ctx->lc_tags) &&
1485                     /*
1486                      * Don't create values for a LCT_QUIESCENT key, as this
1487                      * will pin module owning a key.
1488                      */
1489                     !(key->lct_tags & LCT_QUIESCENT)) {
1490                         void *value;
1491
1492                         LINVRNT(key->lct_init != NULL);
1493                         LINVRNT(key->lct_index == i);
1494
1495                         value = key->lct_init(ctx, key);
1496                         if (unlikely(IS_ERR(value)))
1497                                 return PTR_ERR(value);
1498
1499                         LASSERT(key->lct_owner != NULL);
1500                         if (!(ctx->lc_tags & LCT_NOREF))
1501                                 cfs_try_module_get(key->lct_owner);
1502                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1503                         cfs_atomic_inc(&key->lct_used);
1504                         /*
1505                          * This is the only place in the code, where an
1506                          * element of ctx->lc_value[] array is set to non-NULL
1507                          * value.
1508                          */
1509                         ctx->lc_value[i] = value;
1510                         if (key->lct_exit != NULL)
1511                                 ctx->lc_tags |= LCT_HAS_EXIT;
1512                 }
1513                 ctx->lc_version = key_set_version;
1514         }
1515         return 0;
1516 }
1517
1518 static int keys_init(struct lu_context *ctx)
1519 {
1520         int result;
1521
1522         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1523         if (likely(ctx->lc_value != NULL))
1524                 result = keys_fill(ctx);
1525         else
1526                 result = -ENOMEM;
1527
1528         if (result != 0)
1529                 keys_fini(ctx);
1530         return result;
1531 }
1532
1533 /**
1534  * Initialize context data-structure. Create values for all keys.
1535  */
1536 int lu_context_init(struct lu_context *ctx, __u32 tags)
1537 {
1538         memset(ctx, 0, sizeof *ctx);
1539         ctx->lc_state = LCS_INITIALIZED;
1540         ctx->lc_tags = tags;
1541         if (tags & LCT_REMEMBER) {
1542                 cfs_spin_lock(&lu_keys_guard);
1543                 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1544                 cfs_spin_unlock(&lu_keys_guard);
1545         } else
1546                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1547         return keys_init(ctx);
1548 }
1549 EXPORT_SYMBOL(lu_context_init);
1550
1551 /**
1552  * Finalize context data-structure. Destroy key values.
1553  */
1554 void lu_context_fini(struct lu_context *ctx)
1555 {
1556         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1557         ctx->lc_state = LCS_FINALIZED;
1558         keys_fini(ctx);
1559         cfs_spin_lock(&lu_keys_guard);
1560         cfs_list_del_init(&ctx->lc_remember);
1561         cfs_spin_unlock(&lu_keys_guard);
1562 }
1563 EXPORT_SYMBOL(lu_context_fini);
1564
1565 /**
1566  * Called before entering context.
1567  */
1568 void lu_context_enter(struct lu_context *ctx)
1569 {
1570         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1571         ctx->lc_state = LCS_ENTERED;
1572 }
1573 EXPORT_SYMBOL(lu_context_enter);
1574
1575 /**
1576  * Called after exiting from \a ctx
1577  */
1578 void lu_context_exit(struct lu_context *ctx)
1579 {
1580         int i;
1581
1582         LINVRNT(ctx->lc_state == LCS_ENTERED);
1583         ctx->lc_state = LCS_LEFT;
1584         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1585                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1586                         if (ctx->lc_value[i] != NULL) {
1587                                 struct lu_context_key *key;
1588
1589                                 key = lu_keys[i];
1590                                 LASSERT(key != NULL);
1591                                 if (key->lct_exit != NULL)
1592                                         key->lct_exit(ctx,
1593                                                       key, ctx->lc_value[i]);
1594                         }
1595                 }
1596         }
1597 }
1598 EXPORT_SYMBOL(lu_context_exit);
1599
1600 /**
1601  * Allocate for context all missing keys that were registered after context
1602  * creation. key_set_version is only changed in rare cases when modules
1603  * are loaded and removed.
1604  */
1605 int lu_context_refill(struct lu_context *ctx)
1606 {
1607         return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1608 }
1609 EXPORT_SYMBOL(lu_context_refill);
1610
1611 /**
1612  * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1613  * obd being added. Currently, this is only used on client side, specifically
1614  * for echo device client, for other stack (like ptlrpc threads), context are
1615  * predefined when the lu_device type are registered, during the module probe
1616  * phase.
1617  */
1618 __u32 lu_context_tags_default = 0;
1619 __u32 lu_session_tags_default = 0;
1620
1621 void lu_context_tags_update(__u32 tags)
1622 {
1623         cfs_spin_lock(&lu_keys_guard);
1624         lu_context_tags_default |= tags;
1625         key_set_version ++;
1626         cfs_spin_unlock(&lu_keys_guard);
1627 }
1628 EXPORT_SYMBOL(lu_context_tags_update);
1629
1630 void lu_context_tags_clear(__u32 tags)
1631 {
1632         cfs_spin_lock(&lu_keys_guard);
1633         lu_context_tags_default &= ~tags;
1634         key_set_version ++;
1635         cfs_spin_unlock(&lu_keys_guard);
1636 }
1637 EXPORT_SYMBOL(lu_context_tags_clear);
1638
1639 void lu_session_tags_update(__u32 tags)
1640 {
1641         cfs_spin_lock(&lu_keys_guard);
1642         lu_session_tags_default |= tags;
1643         key_set_version ++;
1644         cfs_spin_unlock(&lu_keys_guard);
1645 }
1646 EXPORT_SYMBOL(lu_session_tags_update);
1647
1648 void lu_session_tags_clear(__u32 tags)
1649 {
1650         cfs_spin_lock(&lu_keys_guard);
1651         lu_session_tags_default &= ~tags;
1652         key_set_version ++;
1653         cfs_spin_unlock(&lu_keys_guard);
1654 }
1655 EXPORT_SYMBOL(lu_session_tags_clear);
1656
1657 int lu_env_init(struct lu_env *env, __u32 tags)
1658 {
1659         int result;
1660
1661         env->le_ses = NULL;
1662         result = lu_context_init(&env->le_ctx, tags);
1663         if (likely(result == 0))
1664                 lu_context_enter(&env->le_ctx);
1665         return result;
1666 }
1667 EXPORT_SYMBOL(lu_env_init);
1668
1669 void lu_env_fini(struct lu_env *env)
1670 {
1671         lu_context_exit(&env->le_ctx);
1672         lu_context_fini(&env->le_ctx);
1673         env->le_ses = NULL;
1674 }
1675 EXPORT_SYMBOL(lu_env_fini);
1676
1677 int lu_env_refill(struct lu_env *env)
1678 {
1679         int result;
1680
1681         result = lu_context_refill(&env->le_ctx);
1682         if (result == 0 && env->le_ses != NULL)
1683                 result = lu_context_refill(env->le_ses);
1684         return result;
1685 }
1686 EXPORT_SYMBOL(lu_env_refill);
1687
1688 /**
1689  * Currently, this API will only be used by echo client.
1690  * Because echo client and normal lustre client will share
1691  * same cl_env cache. So echo client needs to refresh
1692  * the env context after it get one from the cache, especially
1693  * when normal client and echo client co-exist in the same client.
1694  */
1695 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1696                           __u32 stags)
1697 {
1698         int    result;
1699
1700         if ((env->le_ctx.lc_tags & ctags) != ctags) {
1701                 env->le_ctx.lc_version = 0;
1702                 env->le_ctx.lc_tags |= ctags;
1703         }
1704
1705         if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1706                 env->le_ses->lc_version = 0;
1707                 env->le_ses->lc_tags |= stags;
1708         }
1709
1710         result = lu_env_refill(env);
1711
1712         return result;
1713 }
1714 EXPORT_SYMBOL(lu_env_refill_by_tags);
1715
1716 static struct cfs_shrinker *lu_site_shrinker = NULL;
1717
1718 typedef struct lu_site_stats{
1719         unsigned        lss_populated;
1720         unsigned        lss_max_search;
1721         unsigned        lss_total;
1722         unsigned        lss_busy;
1723 } lu_site_stats_t;
1724
1725 static void lu_site_stats_get(cfs_hash_t *hs,
1726                               lu_site_stats_t *stats, int populated)
1727 {
1728         cfs_hash_bd_t bd;
1729         int           i;
1730
1731         cfs_hash_for_each_bucket(hs, &bd, i) {
1732                 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1733                 cfs_hlist_head_t        *hhead;
1734
1735                 cfs_hash_bd_lock(hs, &bd, 1);
1736                 stats->lss_busy  += bkt->lsb_busy;
1737                 stats->lss_total += cfs_hash_bd_count_get(&bd);
1738                 stats->lss_max_search = max((int)stats->lss_max_search,
1739                                             cfs_hash_bd_depmax_get(&bd));
1740                 if (!populated) {
1741                         cfs_hash_bd_unlock(hs, &bd, 1);
1742                         continue;
1743                 }
1744
1745                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1746                         if (!cfs_hlist_empty(hhead))
1747                                 stats->lss_populated++;
1748                 }
1749                 cfs_hash_bd_unlock(hs, &bd, 1);
1750         }
1751 }
1752
1753 #ifdef __KERNEL__
1754
1755 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
1756 {
1757         lu_site_stats_t stats;
1758         struct lu_site *s;
1759         struct lu_site *tmp;
1760         int cached = 0;
1761         int remain = shrink_param(sc, nr_to_scan);
1762         CFS_LIST_HEAD(splice);
1763
1764         if (remain != 0) {
1765                 if (!(shrink_param(sc, gfp_mask) & __GFP_FS))
1766                         return -1;
1767                 CDEBUG(D_INODE, "Shrink %d objects\n", remain);
1768         }
1769
1770         cfs_mutex_lock(&lu_sites_guard);
1771         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1772                 if (shrink_param(sc, nr_to_scan) != 0) {
1773                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1774                         /*
1775                          * Move just shrunk site to the tail of site list to
1776                          * assure shrinking fairness.
1777                          */
1778                         cfs_list_move_tail(&s->ls_linkage, &splice);
1779                 }
1780
1781                 memset(&stats, 0, sizeof(stats));
1782                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1783                 cached += stats.lss_total - stats.lss_busy;
1784                 if (shrink_param(sc, nr_to_scan) && remain <= 0)
1785                         break;
1786         }
1787         cfs_list_splice(&splice, lu_sites.prev);
1788         cfs_mutex_unlock(&lu_sites_guard);
1789
1790         cached = (cached / 100) * sysctl_vfs_cache_pressure;
1791         if (shrink_param(sc, nr_to_scan) == 0)
1792                 CDEBUG(D_INODE, "%d objects cached\n", cached);
1793         return cached;
1794 }
1795
1796 /*
1797  * Debugging stuff.
1798  */
1799
1800 /**
1801  * Environment to be used in debugger, contains all tags.
1802  */
1803 struct lu_env lu_debugging_env;
1804
1805 /**
1806  * Debugging printer function using printk().
1807  */
1808 int lu_printk_printer(const struct lu_env *env,
1809                       void *unused, const char *format, ...)
1810 {
1811         va_list args;
1812
1813         va_start(args, format);
1814         vprintk(format, args);
1815         va_end(args);
1816         return 0;
1817 }
1818
1819 void lu_debugging_setup(void)
1820 {
1821         lu_env_init(&lu_debugging_env, ~0);
1822 }
1823
1824 void lu_context_keys_dump(void)
1825 {
1826         int i;
1827
1828         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1829                 struct lu_context_key *key;
1830
1831                 key = lu_keys[i];
1832                 if (key != NULL) {
1833                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1834                                i, key, key->lct_tags,
1835                                key->lct_init, key->lct_fini, key->lct_exit,
1836                                key->lct_index, cfs_atomic_read(&key->lct_used),
1837                                key->lct_owner ? key->lct_owner->name : "",
1838                                key->lct_owner);
1839                         lu_ref_print(&key->lct_reference);
1840                 }
1841         }
1842 }
1843 EXPORT_SYMBOL(lu_context_keys_dump);
1844 #else  /* !__KERNEL__ */
1845 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1846 {
1847         return 0;
1848 }
1849 #endif /* __KERNEL__ */
1850
1851 int  cl_global_init(void);
1852 void cl_global_fini(void);
1853 int  lu_ref_global_init(void);
1854 void lu_ref_global_fini(void);
1855
1856 int dt_global_init(void);
1857 void dt_global_fini(void);
1858
1859 int llo_global_init(void);
1860 void llo_global_fini(void);
1861
1862 /**
1863  * Initialization of global lu_* data.
1864  */
1865 int lu_global_init(void)
1866 {
1867         int result;
1868
1869         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1870
1871         result = lu_ref_global_init();
1872         if (result != 0)
1873                 return result;
1874
1875         LU_CONTEXT_KEY_INIT(&lu_global_key);
1876         result = lu_context_key_register(&lu_global_key);
1877         if (result != 0)
1878                 return result;
1879         /*
1880          * At this level, we don't know what tags are needed, so allocate them
1881          * conservatively. This should not be too bad, because this
1882          * environment is global.
1883          */
1884         cfs_mutex_lock(&lu_sites_guard);
1885         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1886         cfs_mutex_unlock(&lu_sites_guard);
1887         if (result != 0)
1888                 return result;
1889
1890         /*
1891          * seeks estimation: 3 seeks to read a record from oi, one to read
1892          * inode, one for ea. Unfortunately setting this high value results in
1893          * lu_object/inode cache consuming all the memory.
1894          */
1895         lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1896         if (lu_site_shrinker == NULL)
1897                 return -ENOMEM;
1898
1899         result = lu_time_global_init();
1900         if (result)
1901                 GOTO(out, result);
1902
1903 #ifdef __KERNEL__
1904         result = dt_global_init();
1905         if (result)
1906                 GOTO(out, result);
1907
1908         result = llo_global_init();
1909         if (result)
1910                 GOTO(out, result);
1911 #endif
1912         result = cl_global_init();
1913 out:
1914
1915         return result;
1916 }
1917
1918 /**
1919  * Dual to lu_global_init().
1920  */
1921 void lu_global_fini(void)
1922 {
1923         cl_global_fini();
1924 #ifdef __KERNEL__
1925         llo_global_fini();
1926         dt_global_fini();
1927 #endif
1928         lu_time_global_fini();
1929         if (lu_site_shrinker != NULL) {
1930                 cfs_remove_shrinker(lu_site_shrinker);
1931                 lu_site_shrinker = NULL;
1932         }
1933
1934         lu_context_key_degister(&lu_global_key);
1935
1936         /*
1937          * Tear shrinker environment down _after_ de-registering
1938          * lu_global_key, because the latter has a value in the former.
1939          */
1940         cfs_mutex_lock(&lu_sites_guard);
1941         lu_env_fini(&lu_shrink_env);
1942         cfs_mutex_unlock(&lu_sites_guard);
1943
1944         lu_ref_global_fini();
1945 }
1946
1947 struct lu_buf LU_BUF_NULL = {
1948         .lb_buf = NULL,
1949         .lb_len = 0
1950 };
1951 EXPORT_SYMBOL(LU_BUF_NULL);
1952
1953 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1954 {
1955 #ifdef LPROCFS
1956         struct lprocfs_counter ret;
1957
1958         lprocfs_stats_collect(stats, idx, &ret);
1959         return (__u32)ret.lc_count;
1960 #else
1961         return 0;
1962 #endif
1963 }
1964
1965 /**
1966  * Output site statistical counters into a buffer. Suitable for
1967  * lprocfs_rd_*()-style functions.
1968  */
1969 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1970 {
1971         lu_site_stats_t stats;
1972
1973         memset(&stats, 0, sizeof(stats));
1974         lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1975
1976         return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
1977                         stats.lss_busy,
1978                         stats.lss_total,
1979                         stats.lss_populated,
1980                         CFS_HASH_NHLIST(s->ls_obj_hash),
1981                         stats.lss_max_search,
1982                         ls_stats_read(s->ls_stats, LU_SS_CREATED),
1983                         ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1984                         ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1985                         ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1986                         ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
1987                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
1988 }
1989 EXPORT_SYMBOL(lu_site_stats_print);
1990
1991 const char *lu_time_names[LU_TIME_NR] = {
1992         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1993         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1994         [LU_TIME_FIND_INSERT] = "find_insert"
1995 };
1996 EXPORT_SYMBOL(lu_time_names);
1997
1998 /**
1999  * Helper function to initialize a number of kmem slab caches at once.
2000  */
2001 int lu_kmem_init(struct lu_kmem_descr *caches)
2002 {
2003         int result;
2004         struct lu_kmem_descr *iter = caches;
2005
2006         for (result = 0; iter->ckd_cache != NULL; ++iter) {
2007                 *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name,
2008                                                         iter->ckd_size,
2009                                                         0, 0);
2010                 if (*iter->ckd_cache == NULL) {
2011                         result = -ENOMEM;
2012                         /* free all previously allocated caches */
2013                         lu_kmem_fini(caches);
2014                         break;
2015                 }
2016         }
2017         return result;
2018 }
2019 EXPORT_SYMBOL(lu_kmem_init);
2020
2021 /**
2022  * Helper function to finalize a number of kmem slab cached at once. Dual to
2023  * lu_kmem_init().
2024  */
2025 void lu_kmem_fini(struct lu_kmem_descr *caches)
2026 {
2027         int rc;
2028
2029         for (; caches->ckd_cache != NULL; ++caches) {
2030                 if (*caches->ckd_cache != NULL) {
2031                         rc = cfs_mem_cache_destroy(*caches->ckd_cache);
2032                         LASSERTF(rc == 0, "couldn't destroy %s slab\n",
2033                                  caches->ckd_name);
2034                         *caches->ckd_cache = NULL;
2035                 }
2036         }
2037 }
2038 EXPORT_SYMBOL(lu_kmem_fini);