Whamcloud - gitweb
3f1e5e75a3653c9a877a62678d6543812f8ce1c0
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * Copyright (c) 2011 Whamcloud, Inc.
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  *
39  * lustre/obdclass/lu_object.c
40  *
41  * Lustre Object.
42  * These are the only exported functions, they provide some generic
43  * infrastructure for managing object devices
44  *
45  *   Author: Nikita Danilov <nikita.danilov@sun.com>
46  */
47
48 #define DEBUG_SUBSYSTEM S_CLASS
49 #ifndef EXPORT_SYMTAB
50 # define EXPORT_SYMTAB
51 #endif
52
53 #include <libcfs/libcfs.h>
54
55 #ifdef __KERNEL__
56 # include <linux/module.h>
57 #endif
58
59 /* hash_long() */
60 #include <libcfs/libcfs_hash.h>
61 #include <obd_class.h>
62 #include <obd_support.h>
63 #include <lustre_disk.h>
64 #include <lustre_fid.h>
65 #include <lu_object.h>
66 #include <libcfs/list.h>
67 /* lu_time_global_{init,fini}() */
68 #include <lu_time.h>
69
70 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
71
72 /**
73  * Decrease reference counter on object. If last reference is freed, return
74  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
75  * case, free object immediately.
76  */
77 void lu_object_put(const struct lu_env *env, struct lu_object *o)
78 {
79         struct lu_site_bkt_data *bkt;
80         struct lu_object_header *top;
81         struct lu_site          *site;
82         struct lu_object        *orig;
83         cfs_hash_bd_t            bd;
84
85         top  = o->lo_header;
86         site = o->lo_dev->ld_site;
87         orig = o;
88
89         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
90         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
91
92         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
93                 if (lu_object_is_dying(top)) {
94
95                         /*
96                          * somebody may be waiting for this, currently only
97                          * used for cl_object, see cl_object_put_last().
98                          */
99                         cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
100                 }
101                 return;
102         }
103
104         LASSERT(bkt->lsb_busy > 0);
105         bkt->lsb_busy--;
106         /*
107          * When last reference is released, iterate over object
108          * layers, and notify them that object is no longer busy.
109          */
110         cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
111                 if (o->lo_ops->loo_object_release != NULL)
112                         o->lo_ops->loo_object_release(env, o);
113         }
114
115         if (!lu_object_is_dying(top)) {
116                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
117                 return;
118         }
119
120         /*
121          * If object is dying (will not be cached), removed it
122          * from hash table and LRU.
123          *
124          * This is done with hash table and LRU lists locked. As the only
125          * way to acquire first reference to previously unreferenced
126          * object is through hash-table lookup (lu_object_find()),
127          * or LRU scanning (lu_site_purge()), that are done under hash-table
128          * and LRU lock, no race with concurrent object lookup is possible
129          * and we can safely destroy object below.
130          */
131         cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
132         cfs_list_del_init(&top->loh_lru);
133         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
134         /*
135          * Object was already removed from hash and lru above, can
136          * kill it.
137          */
138         lu_object_free(env, orig);
139 }
140 EXPORT_SYMBOL(lu_object_put);
141
142 /**
143  * Allocate new object.
144  *
145  * This follows object creation protocol, described in the comment within
146  * struct lu_device_operations definition.
147  */
148 static struct lu_object *lu_object_alloc(const struct lu_env *env,
149                                          struct lu_device *dev,
150                                          const struct lu_fid *f,
151                                          const struct lu_object_conf *conf)
152 {
153         struct lu_object *scan;
154         struct lu_object *top;
155         cfs_list_t *layers;
156         int clean;
157         int result;
158         ENTRY;
159
160         /*
161          * Create top-level object slice. This will also create
162          * lu_object_header.
163          */
164         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
165         if (top == NULL)
166                 RETURN(ERR_PTR(-ENOMEM));
167         /*
168          * This is the only place where object fid is assigned. It's constant
169          * after this point.
170          */
171         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
172         top->lo_header->loh_fid = *f;
173         layers = &top->lo_header->loh_layers;
174         do {
175                 /*
176                  * Call ->loo_object_init() repeatedly, until no more new
177                  * object slices are created.
178                  */
179                 clean = 1;
180                 cfs_list_for_each_entry(scan, layers, lo_linkage) {
181                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
182                                 continue;
183                         clean = 0;
184                         scan->lo_header = top->lo_header;
185                         result = scan->lo_ops->loo_object_init(env, scan, conf);
186                         if (result != 0) {
187                                 lu_object_free(env, top);
188                                 RETURN(ERR_PTR(result));
189                         }
190                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
191                 }
192         } while (!clean);
193
194         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
195                 if (scan->lo_ops->loo_object_start != NULL) {
196                         result = scan->lo_ops->loo_object_start(env, scan);
197                         if (result != 0) {
198                                 lu_object_free(env, top);
199                                 RETURN(ERR_PTR(result));
200                         }
201                 }
202         }
203
204         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
205         RETURN(top);
206 }
207
208 /**
209  * Free an object.
210  */
211 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
212 {
213         struct lu_site_bkt_data *bkt;
214         struct lu_site          *site;
215         struct lu_object        *scan;
216         cfs_list_t              *layers;
217         cfs_list_t               splice;
218
219         site   = o->lo_dev->ld_site;
220         layers = &o->lo_header->loh_layers;
221         bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
222         /*
223          * First call ->loo_object_delete() method to release all resources.
224          */
225         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
226                 if (scan->lo_ops->loo_object_delete != NULL)
227                         scan->lo_ops->loo_object_delete(env, scan);
228         }
229
230         /*
231          * Then, splice object layers into stand-alone list, and call
232          * ->loo_object_free() on all layers to free memory. Splice is
233          * necessary, because lu_object_header is freed together with the
234          * top-level slice.
235          */
236         CFS_INIT_LIST_HEAD(&splice);
237         cfs_list_splice_init(layers, &splice);
238         while (!cfs_list_empty(&splice)) {
239                 /*
240                  * Free layers in bottom-to-top order, so that object header
241                  * lives as long as possible and ->loo_object_free() methods
242                  * can look at its contents.
243                  */
244                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
245                 cfs_list_del_init(&o->lo_linkage);
246                 LASSERT(o->lo_ops->loo_object_free != NULL);
247                 o->lo_ops->loo_object_free(env, o);
248         }
249
250         if (cfs_waitq_active(&bkt->lsb_marche_funebre))
251                 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
252 }
253
254 /**
255  * Free \a nr objects from the cold end of the site LRU list.
256  */
257 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
258 {
259         struct lu_object_header *h;
260         struct lu_object_header *temp;
261         struct lu_site_bkt_data *bkt;
262         cfs_hash_bd_t            bd;
263         cfs_hash_bd_t            bd2;
264         cfs_list_t               dispose;
265         int                      did_sth;
266         int                      start;
267         int                      count;
268         int                      bnr;
269         int                      i;
270
271         CFS_INIT_LIST_HEAD(&dispose);
272         /*
273          * Under LRU list lock, scan LRU list and move unreferenced objects to
274          * the dispose list, removing them from LRU and hash table.
275          */
276         start = s->ls_purge_start;
277         bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
278  again:
279         did_sth = 0;
280         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
281                 if (i < start)
282                         continue;
283                 count = bnr;
284                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
285                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
286
287                 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
288                         /*
289                          * Objects are sorted in lru order, and "busy"
290                          * objects (ones with h->loh_ref > 0) naturally tend to
291                          * live near hot end that we scan last. Unfortunately,
292                          * sites usually have small (less then ten) number of
293                          * busy yet rarely accessed objects (some global
294                          * objects, accessed directly through pointers,
295                          * bypassing hash table).
296                          * Currently algorithm scans them over and over again.
297                          * Probably we should move busy objects out of LRU,
298                          * or we can live with that.
299                          */
300                         if (cfs_atomic_read(&h->loh_ref) > 0)
301                                 continue;
302
303                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
304                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
305
306                         cfs_hash_bd_del_locked(s->ls_obj_hash,
307                                                &bd2, &h->loh_hash);
308                         cfs_list_move(&h->loh_lru, &dispose);
309                         if (did_sth == 0)
310                                 did_sth = 1;
311
312                         if (nr != ~0 && --nr == 0)
313                                 break;
314
315                         if (count > 0 && --count == 0)
316                                 break;
317
318                 }
319                 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
320                 cfs_cond_resched();
321                 /*
322                  * Free everything on the dispose list. This is safe against
323                  * races due to the reasons described in lu_object_put().
324                  */
325                 while (!cfs_list_empty(&dispose)) {
326                         h = container_of0(dispose.next,
327                                           struct lu_object_header, loh_lru);
328                         cfs_list_del_init(&h->loh_lru);
329                         lu_object_free(env, lu_object_top(h));
330                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
331                 }
332
333                 if (nr == 0)
334                         break;
335         }
336
337         if (nr != 0 && did_sth && start != 0) {
338                 start = 0; /* restart from the first bucket */
339                 goto again;
340         }
341         /* race on s->ls_purge_start, but nobody cares */
342         s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
343
344         return nr;
345 }
346 EXPORT_SYMBOL(lu_site_purge);
347
348 /*
349  * Object printing.
350  *
351  * Code below has to jump through certain loops to output object description
352  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
353  * composes object description from strings that are parts of _lines_ of
354  * output (i.e., strings that are not terminated by newline). This doesn't fit
355  * very well into libcfs_debug_msg() interface that assumes that each message
356  * supplied to it is a self-contained output line.
357  *
358  * To work around this, strings are collected in a temporary buffer
359  * (implemented as a value of lu_cdebug_key key), until terminating newline
360  * character is detected.
361  *
362  */
363
364 enum {
365         /**
366          * Maximal line size.
367          *
368          * XXX overflow is not handled correctly.
369          */
370         LU_CDEBUG_LINE = 256
371 };
372
373 struct lu_cdebug_data {
374         /**
375          * Temporary buffer.
376          */
377         char lck_area[LU_CDEBUG_LINE];
378 };
379
380 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
381 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
382
383 /**
384  * Key, holding temporary buffer. This key is registered very early by
385  * lu_global_init().
386  */
387 struct lu_context_key lu_global_key = {
388         .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
389         .lct_init = lu_global_key_init,
390         .lct_fini = lu_global_key_fini
391 };
392
393 /**
394  * Printer function emitting messages through libcfs_debug_msg().
395  */
396 int lu_cdebug_printer(const struct lu_env *env,
397                       void *cookie, const char *format, ...)
398 {
399         struct lu_cdebug_print_info *info = cookie;
400         struct lu_cdebug_data       *key;
401         int used;
402         int complete;
403         va_list args;
404
405         va_start(args, format);
406
407         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
408         LASSERT(key != NULL);
409
410         used = strlen(key->lck_area);
411         complete = format[strlen(format) - 1] == '\n';
412         /*
413          * Append new chunk to the buffer.
414          */
415         vsnprintf(key->lck_area + used,
416                   ARRAY_SIZE(key->lck_area) - used, format, args);
417         if (complete) {
418                 if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
419                         libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
420                                          (char *)info->lpi_file, info->lpi_fn,
421                                          info->lpi_line, "%s", key->lck_area);
422                 key->lck_area[0] = 0;
423         }
424         va_end(args);
425         return 0;
426 }
427 EXPORT_SYMBOL(lu_cdebug_printer);
428
429 /**
430  * Print object header.
431  */
432 void lu_object_header_print(const struct lu_env *env, void *cookie,
433                             lu_printer_t printer,
434                             const struct lu_object_header *hdr)
435 {
436         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
437                    hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
438                    PFID(&hdr->loh_fid),
439                    cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
440                    cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
441                    "" : " lru",
442                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
443 }
444 EXPORT_SYMBOL(lu_object_header_print);
445
446 /**
447  * Print human readable representation of the \a o to the \a printer.
448  */
449 void lu_object_print(const struct lu_env *env, void *cookie,
450                      lu_printer_t printer, const struct lu_object *o)
451 {
452         static const char ruler[] = "........................................";
453         struct lu_object_header *top;
454         int depth;
455
456         top = o->lo_header;
457         lu_object_header_print(env, cookie, printer, top);
458         (*printer)(env, cookie, "{ \n");
459         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
460                 depth = o->lo_depth + 4;
461
462                 /*
463                  * print `.' \a depth times followed by type name and address
464                  */
465                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
466                            o->lo_dev->ld_type->ldt_name, o);
467                 if (o->lo_ops->loo_object_print != NULL)
468                         o->lo_ops->loo_object_print(env, cookie, printer, o);
469                 (*printer)(env, cookie, "\n");
470         }
471         (*printer)(env, cookie, "} header@%p\n", top);
472 }
473 EXPORT_SYMBOL(lu_object_print);
474
475 /**
476  * Check object consistency.
477  */
478 int lu_object_invariant(const struct lu_object *o)
479 {
480         struct lu_object_header *top;
481
482         top = o->lo_header;
483         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
484                 if (o->lo_ops->loo_object_invariant != NULL &&
485                     !o->lo_ops->loo_object_invariant(o))
486                         return 0;
487         }
488         return 1;
489 }
490 EXPORT_SYMBOL(lu_object_invariant);
491
492 static struct lu_object *htable_lookup(struct lu_site *s,
493                                        cfs_hash_bd_t *bd,
494                                        const struct lu_fid *f,
495                                        cfs_waitlink_t *waiter,
496                                        __u64 *version)
497 {
498         struct lu_site_bkt_data *bkt;
499         struct lu_object_header *h;
500         cfs_hlist_node_t        *hnode;
501         __u64  ver = cfs_hash_bd_version_get(bd);
502
503         if (*version == ver)
504                 return NULL;
505
506         *version = ver;
507         bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
508         /* cfs_hash_bd_lookup_intent is a somehow "internal" function
509          * of cfs_hash, but we don't want refcount on object right now */
510         hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
511         if (hnode == NULL) {
512                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
513                 return NULL;
514         }
515
516         h = container_of0(hnode, struct lu_object_header, loh_hash);
517         if (likely(!lu_object_is_dying(h))) {
518                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
519                 return lu_object_top(h);
520         }
521
522         /*
523          * Lookup found an object being destroyed this object cannot be
524          * returned (to assure that references to dying objects are eventually
525          * drained), and moreover, lookup has to wait until object is freed.
526          */
527         cfs_atomic_dec(&h->loh_ref);
528
529         cfs_waitlink_init(waiter);
530         cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
531         cfs_set_current_state(CFS_TASK_UNINT);
532         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
533         return ERR_PTR(-EAGAIN);
534 }
535
536 /**
537  * Search cache for an object with the fid \a f. If such object is found,
538  * return it. Otherwise, create new object, insert it into cache and return
539  * it. In any case, additional reference is acquired on the returned object.
540  */
541 struct lu_object *lu_object_find(const struct lu_env *env,
542                                  struct lu_device *dev, const struct lu_fid *f,
543                                  const struct lu_object_conf *conf)
544 {
545         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
546 }
547 EXPORT_SYMBOL(lu_object_find);
548
549 static struct lu_object *lu_object_new(const struct lu_env *env,
550                                        struct lu_device *dev,
551                                        const struct lu_fid *f,
552                                        const struct lu_object_conf *conf)
553 {
554         struct lu_object        *o;
555         cfs_hash_t              *hs;
556         cfs_hash_bd_t            bd;
557         struct lu_site_bkt_data *bkt;
558
559         o = lu_object_alloc(env, dev, f, conf);
560         if (unlikely(IS_ERR(o)))
561                 return o;
562
563         hs = dev->ld_site->ls_obj_hash;
564         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
565         bkt = cfs_hash_bd_extra_get(hs, &bd);
566         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
567         cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
568         bkt->lsb_busy++;
569         cfs_hash_bd_unlock(hs, &bd, 1);
570         return o;
571 }
572
573 /**
574  * Core logic of lu_object_find*() functions.
575  */
576 static struct lu_object *lu_object_find_try(const struct lu_env *env,
577                                             struct lu_device *dev,
578                                             const struct lu_fid *f,
579                                             const struct lu_object_conf *conf,
580                                             cfs_waitlink_t *waiter)
581 {
582         struct lu_object      *o;
583         struct lu_object      *shadow;
584         struct lu_site        *s;
585         cfs_hash_t            *hs;
586         cfs_hash_bd_t          bd;
587         __u64                  version = 0;
588
589         /*
590          * This uses standard index maintenance protocol:
591          *
592          *     - search index under lock, and return object if found;
593          *     - otherwise, unlock index, allocate new object;
594          *     - lock index and search again;
595          *     - if nothing is found (usual case), insert newly created
596          *       object into index;
597          *     - otherwise (race: other thread inserted object), free
598          *       object just allocated.
599          *     - unlock index;
600          *     - return object.
601          *
602          * For "LOC_F_NEW" case, we are sure the object is new established.
603          * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
604          * just alloc and insert directly.
605          *
606          * If dying object is found during index search, add @waiter to the
607          * site wait-queue and return ERR_PTR(-EAGAIN).
608          */
609         if (conf != NULL && conf->loc_flags & LOC_F_NEW)
610                 return lu_object_new(env, dev, f, conf);
611
612         s  = dev->ld_site;
613         hs = s->ls_obj_hash;
614         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
615         o = htable_lookup(s, &bd, f, waiter, &version);
616         cfs_hash_bd_unlock(hs, &bd, 1);
617         if (o != NULL)
618                 return o;
619
620         /*
621          * Allocate new object. This may result in rather complicated
622          * operations, including fld queries, inode loading, etc.
623          */
624         o = lu_object_alloc(env, dev, f, conf);
625         if (unlikely(IS_ERR(o)))
626                 return o;
627
628         LASSERT(lu_fid_eq(lu_object_fid(o), f));
629
630         cfs_hash_bd_lock(hs, &bd, 1);
631
632         shadow = htable_lookup(s, &bd, f, waiter, &version);
633         if (likely(shadow == NULL)) {
634                 struct lu_site_bkt_data *bkt;
635
636                 bkt = cfs_hash_bd_extra_get(hs, &bd);
637                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
638                 cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
639                 bkt->lsb_busy++;
640                 cfs_hash_bd_unlock(hs, &bd, 1);
641                 return o;
642         }
643
644         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
645         cfs_hash_bd_unlock(hs, &bd, 1);
646         lu_object_free(env, o);
647         return shadow;
648 }
649
650 /**
651  * Much like lu_object_find(), but top level device of object is specifically
652  * \a dev rather than top level device of the site. This interface allows
653  * objects of different "stacking" to be created within the same site.
654  */
655 struct lu_object *lu_object_find_at(const struct lu_env *env,
656                                     struct lu_device *dev,
657                                     const struct lu_fid *f,
658                                     const struct lu_object_conf *conf)
659 {
660         struct lu_site_bkt_data *bkt;
661         struct lu_object        *obj;
662         cfs_waitlink_t           wait;
663
664         while (1) {
665                 obj = lu_object_find_try(env, dev, f, conf, &wait);
666                 if (obj != ERR_PTR(-EAGAIN))
667                         return obj;
668                 /*
669                  * lu_object_find_try() already added waiter into the
670                  * wait queue.
671                  */
672                 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
673                 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
674                 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
675         }
676 }
677 EXPORT_SYMBOL(lu_object_find_at);
678
679 /**
680  * Find object with given fid, and return its slice belonging to given device.
681  */
682 struct lu_object *lu_object_find_slice(const struct lu_env *env,
683                                        struct lu_device *dev,
684                                        const struct lu_fid *f,
685                                        const struct lu_object_conf *conf)
686 {
687         struct lu_object *top;
688         struct lu_object *obj;
689
690         top = lu_object_find(env, dev, f, conf);
691         if (!IS_ERR(top)) {
692                 obj = lu_object_locate(top->lo_header, dev->ld_type);
693                 if (obj == NULL)
694                         lu_object_put(env, top);
695         } else
696                 obj = top;
697         return obj;
698 }
699 EXPORT_SYMBOL(lu_object_find_slice);
700
701 /**
702  * Global list of all device types.
703  */
704 static CFS_LIST_HEAD(lu_device_types);
705
706 int lu_device_type_init(struct lu_device_type *ldt)
707 {
708         int result;
709
710         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
711         result = ldt->ldt_ops->ldto_init(ldt);
712         if (result == 0)
713                 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
714         return result;
715 }
716 EXPORT_SYMBOL(lu_device_type_init);
717
718 void lu_device_type_fini(struct lu_device_type *ldt)
719 {
720         cfs_list_del_init(&ldt->ldt_linkage);
721         ldt->ldt_ops->ldto_fini(ldt);
722 }
723 EXPORT_SYMBOL(lu_device_type_fini);
724
725 void lu_types_stop(void)
726 {
727         struct lu_device_type *ldt;
728
729         cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
730                 if (ldt->ldt_device_nr == 0)
731                         ldt->ldt_ops->ldto_stop(ldt);
732         }
733 }
734 EXPORT_SYMBOL(lu_types_stop);
735
736 /**
737  * Global list of all sites on this node
738  */
739 static CFS_LIST_HEAD(lu_sites);
740 static CFS_DECLARE_MUTEX(lu_sites_guard);
741
742 /**
743  * Global environment used by site shrinker.
744  */
745 static struct lu_env lu_shrink_env;
746
747 struct lu_site_print_arg {
748         struct lu_env   *lsp_env;
749         void            *lsp_cookie;
750         lu_printer_t     lsp_printer;
751 };
752
753 static int
754 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
755                   cfs_hlist_node_t *hnode, void *data)
756 {
757         struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
758         struct lu_object_header  *h;
759
760         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
761         if (!cfs_list_empty(&h->loh_layers)) {
762                 const struct lu_object *o;
763
764                 o = lu_object_top(h);
765                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
766                                 arg->lsp_printer, o);
767         } else {
768                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
769                                        arg->lsp_printer, h);
770         }
771         return 0;
772 }
773
774 /**
775  * Print all objects in \a s.
776  */
777 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
778                    lu_printer_t printer)
779 {
780         struct lu_site_print_arg arg = {
781                 .lsp_env     = (struct lu_env *)env,
782                 .lsp_cookie  = cookie,
783                 .lsp_printer = printer,
784         };
785
786         cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
787 }
788 EXPORT_SYMBOL(lu_site_print);
789
790 enum {
791         LU_CACHE_PERCENT_MAX     = 50,
792         LU_CACHE_PERCENT_DEFAULT = 20
793 };
794
795 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
796 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
797                 "Percentage of memory to be used as lu_object cache");
798
799 /**
800  * Return desired hash table order.
801  */
802 static int lu_htable_order(void)
803 {
804         unsigned long cache_size;
805         int bits;
806
807         /*
808          * Calculate hash table size, assuming that we want reasonable
809          * performance when 20% of total memory is occupied by cache of
810          * lu_objects.
811          *
812          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
813          */
814         cache_size = cfs_num_physpages;
815
816 #if BITS_PER_LONG == 32
817         /* limit hashtable size for lowmem systems to low RAM */
818         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
819                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
820 #endif
821
822         /* clear off unreasonable cache setting. */
823         if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
824                 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
825                       " the range of (0, %u]. Will use default value: %u.\n",
826                       lu_cache_percent, LU_CACHE_PERCENT_MAX,
827                       LU_CACHE_PERCENT_DEFAULT);
828
829                 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
830         }
831         cache_size = cache_size / 100 * lu_cache_percent *
832                 (CFS_PAGE_SIZE / 1024);
833
834         for (bits = 1; (1 << bits) < cache_size; ++bits) {
835                 ;
836         }
837         return bits;
838 }
839
840 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
841                                 const void *key, unsigned mask)
842 {
843         struct lu_fid  *fid = (struct lu_fid *)key;
844         unsigned        hash;
845
846         hash = (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
847         hash += fid_hash(fid, hs->hs_bkt_bits) << hs->hs_bkt_bits;
848         return hash & mask;
849 }
850
851 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
852 {
853         return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
854 }
855
856 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
857 {
858         struct lu_object_header *h;
859
860         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
861         return &h->loh_fid;
862 }
863
864 static int lu_obj_hop_keycmp(const void *key, cfs_hlist_node_t *hnode)
865 {
866         struct lu_object_header *h;
867
868         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
869         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
870 }
871
872 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
873 {
874         struct lu_object_header *h;
875
876         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
877         if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
878                 struct lu_site_bkt_data *bkt;
879                 cfs_hash_bd_t            bd;
880
881                 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
882                 bkt = cfs_hash_bd_extra_get(hs, &bd);
883                 bkt->lsb_busy++;
884         }
885 }
886
887 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
888 {
889         LBUG(); /* we should never called it */
890 }
891
892 cfs_hash_ops_t lu_site_hash_ops = {
893         .hs_hash        = lu_obj_hop_hash,
894         .hs_key         = lu_obj_hop_key,
895         .hs_keycmp      = lu_obj_hop_keycmp,
896         .hs_object      = lu_obj_hop_object,
897         .hs_get         = lu_obj_hop_get,
898         .hs_put_locked  = lu_obj_hop_put_locked,
899 };
900
901 /**
902  * Initialize site \a s, with \a d as the top level device.
903  */
904 #define LU_SITE_BITS_MIN    12
905 #define LU_SITE_BITS_MAX    23
906 /**
907  * total 128 buckets, we don't want too many buckets because:
908  * - consume too much memory
909  * - avoid unbalanced LRU list
910  */
911 #define LU_SITE_BKT_BITS    7
912
913 int lu_site_init(struct lu_site *s, struct lu_device *top)
914 {
915         struct lu_site_bkt_data *bkt;
916         cfs_hash_bd_t bd;
917         int bits;
918         int i;
919         ENTRY;
920
921         memset(s, 0, sizeof *s);
922         bits = lu_htable_order();
923         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
924              bits >= LU_SITE_BITS_MIN; bits--) {
925                 s->ls_obj_hash = cfs_hash_create("lu_site", bits, bits,
926                                                  bits - LU_SITE_BKT_BITS,
927                                                  sizeof(*bkt), 0, 0,
928                                                  &lu_site_hash_ops,
929                                                  CFS_HASH_SPIN_BKTLOCK |
930                                                  CFS_HASH_NO_ITEMREF |
931                                                  CFS_HASH_DEPTH |
932                                                  CFS_HASH_ASSERT_EMPTY);
933                 if (s->ls_obj_hash != NULL)
934                         break;
935         }
936
937         if (s->ls_obj_hash == NULL) {
938                 CERROR("failed to create lu_site hash with bits: %d\n", bits);
939                 return -ENOMEM;
940         }
941
942         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
943                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
944                 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
945                 cfs_waitq_init(&bkt->lsb_marche_funebre);
946         }
947
948         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
949         if (s->ls_stats == NULL) {
950                 cfs_hash_putref(s->ls_obj_hash);
951                 s->ls_obj_hash = NULL;
952                 return -ENOMEM;
953         }
954
955         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
956                              0, "created", "created");
957         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
958                              0, "cache_hit", "cache_hit");
959         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
960                              0, "cache_miss", "cache_miss");
961         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
962                              0, "cache_race", "cache_race");
963         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
964                              0, "cache_death_race", "cache_death_race");
965         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
966                              0, "lru_purged", "lru_purged");
967
968         CFS_INIT_LIST_HEAD(&s->ls_linkage);
969         s->ls_top_dev = top;
970         top->ld_site = s;
971         lu_device_get(top);
972         lu_ref_add(&top->ld_reference, "site-top", s);
973
974         RETURN(0);
975 }
976 EXPORT_SYMBOL(lu_site_init);
977
978 /**
979  * Finalize \a s and release its resources.
980  */
981 void lu_site_fini(struct lu_site *s)
982 {
983         cfs_down(&lu_sites_guard);
984         cfs_list_del_init(&s->ls_linkage);
985         cfs_up(&lu_sites_guard);
986
987         if (s->ls_obj_hash != NULL) {
988                 cfs_hash_putref(s->ls_obj_hash);
989                 s->ls_obj_hash = NULL;
990         }
991
992         if (s->ls_top_dev != NULL) {
993                 s->ls_top_dev->ld_site = NULL;
994                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
995                 lu_device_put(s->ls_top_dev);
996                 s->ls_top_dev = NULL;
997         }
998
999         if (s->ls_stats != NULL)
1000                 lprocfs_free_stats(&s->ls_stats);
1001 }
1002 EXPORT_SYMBOL(lu_site_fini);
1003
1004 /**
1005  * Called when initialization of stack for this site is completed.
1006  */
1007 int lu_site_init_finish(struct lu_site *s)
1008 {
1009         int result;
1010         cfs_down(&lu_sites_guard);
1011         result = lu_context_refill(&lu_shrink_env.le_ctx);
1012         if (result == 0)
1013                 cfs_list_add(&s->ls_linkage, &lu_sites);
1014         cfs_up(&lu_sites_guard);
1015         return result;
1016 }
1017 EXPORT_SYMBOL(lu_site_init_finish);
1018
1019 /**
1020  * Acquire additional reference on device \a d
1021  */
1022 void lu_device_get(struct lu_device *d)
1023 {
1024         cfs_atomic_inc(&d->ld_ref);
1025 }
1026 EXPORT_SYMBOL(lu_device_get);
1027
1028 /**
1029  * Release reference on device \a d.
1030  */
1031 void lu_device_put(struct lu_device *d)
1032 {
1033         LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
1034         cfs_atomic_dec(&d->ld_ref);
1035 }
1036 EXPORT_SYMBOL(lu_device_put);
1037
1038 /**
1039  * Initialize device \a d of type \a t.
1040  */
1041 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1042 {
1043         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1044                 t->ldt_ops->ldto_start(t);
1045         memset(d, 0, sizeof *d);
1046         cfs_atomic_set(&d->ld_ref, 0);
1047         d->ld_type = t;
1048         lu_ref_init(&d->ld_reference);
1049         return 0;
1050 }
1051 EXPORT_SYMBOL(lu_device_init);
1052
1053 /**
1054  * Finalize device \a d.
1055  */
1056 void lu_device_fini(struct lu_device *d)
1057 {
1058         struct lu_device_type *t;
1059
1060         t = d->ld_type;
1061         if (d->ld_obd != NULL) {
1062                 d->ld_obd->obd_lu_dev = NULL;
1063                 d->ld_obd = NULL;
1064         }
1065
1066         lu_ref_fini(&d->ld_reference);
1067         LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1068                  "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1069         LASSERT(t->ldt_device_nr > 0);
1070         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1071                 t->ldt_ops->ldto_stop(t);
1072 }
1073 EXPORT_SYMBOL(lu_device_fini);
1074
1075 /**
1076  * Initialize object \a o that is part of compound object \a h and was created
1077  * by device \a d.
1078  */
1079 int lu_object_init(struct lu_object *o,
1080                    struct lu_object_header *h, struct lu_device *d)
1081 {
1082         memset(o, 0, sizeof *o);
1083         o->lo_header = h;
1084         o->lo_dev    = d;
1085         lu_device_get(d);
1086         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1087         CFS_INIT_LIST_HEAD(&o->lo_linkage);
1088         return 0;
1089 }
1090 EXPORT_SYMBOL(lu_object_init);
1091
1092 /**
1093  * Finalize object and release its resources.
1094  */
1095 void lu_object_fini(struct lu_object *o)
1096 {
1097         struct lu_device *dev = o->lo_dev;
1098
1099         LASSERT(cfs_list_empty(&o->lo_linkage));
1100
1101         if (dev != NULL) {
1102                 lu_ref_del_at(&dev->ld_reference,
1103                               o->lo_dev_ref , "lu_object", o);
1104                 lu_device_put(dev);
1105                 o->lo_dev = NULL;
1106         }
1107 }
1108 EXPORT_SYMBOL(lu_object_fini);
1109
1110 /**
1111  * Add object \a o as first layer of compound object \a h
1112  *
1113  * This is typically called by the ->ldo_object_alloc() method of top-level
1114  * device.
1115  */
1116 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1117 {
1118         cfs_list_move(&o->lo_linkage, &h->loh_layers);
1119 }
1120 EXPORT_SYMBOL(lu_object_add_top);
1121
1122 /**
1123  * Add object \a o as a layer of compound object, going after \a before.
1124  *
1125  * This is typically called by the ->ldo_object_alloc() method of \a
1126  * before->lo_dev.
1127  */
1128 void lu_object_add(struct lu_object *before, struct lu_object *o)
1129 {
1130         cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1131 }
1132 EXPORT_SYMBOL(lu_object_add);
1133
1134 /**
1135  * Initialize compound object.
1136  */
1137 int lu_object_header_init(struct lu_object_header *h)
1138 {
1139         memset(h, 0, sizeof *h);
1140         cfs_atomic_set(&h->loh_ref, 1);
1141         CFS_INIT_HLIST_NODE(&h->loh_hash);
1142         CFS_INIT_LIST_HEAD(&h->loh_lru);
1143         CFS_INIT_LIST_HEAD(&h->loh_layers);
1144         lu_ref_init(&h->loh_reference);
1145         return 0;
1146 }
1147 EXPORT_SYMBOL(lu_object_header_init);
1148
1149 /**
1150  * Finalize compound object.
1151  */
1152 void lu_object_header_fini(struct lu_object_header *h)
1153 {
1154         LASSERT(cfs_list_empty(&h->loh_layers));
1155         LASSERT(cfs_list_empty(&h->loh_lru));
1156         LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1157         lu_ref_fini(&h->loh_reference);
1158 }
1159 EXPORT_SYMBOL(lu_object_header_fini);
1160
1161 /**
1162  * Given a compound object, find its slice, corresponding to the device type
1163  * \a dtype.
1164  */
1165 struct lu_object *lu_object_locate(struct lu_object_header *h,
1166                                    const struct lu_device_type *dtype)
1167 {
1168         struct lu_object *o;
1169
1170         cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1171                 if (o->lo_dev->ld_type == dtype)
1172                         return o;
1173         }
1174         return NULL;
1175 }
1176 EXPORT_SYMBOL(lu_object_locate);
1177
1178
1179
1180 /**
1181  * Finalize and free devices in the device stack.
1182  *
1183  * Finalize device stack by purging object cache, and calling
1184  * lu_device_type_operations::ldto_device_fini() and
1185  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1186  */
1187 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1188 {
1189         struct lu_site   *site = top->ld_site;
1190         struct lu_device *scan;
1191         struct lu_device *next;
1192
1193         lu_site_purge(env, site, ~0);
1194         for (scan = top; scan != NULL; scan = next) {
1195                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1196                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1197                 lu_device_put(scan);
1198         }
1199
1200         /* purge again. */
1201         lu_site_purge(env, site, ~0);
1202
1203         if (!cfs_hash_is_empty(site->ls_obj_hash)) {
1204                 /*
1205                  * Uh-oh, objects still exist.
1206                  */
1207                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
1208
1209                 lu_site_print(env, site, &cookie, lu_cdebug_printer);
1210         }
1211
1212         for (scan = top; scan != NULL; scan = next) {
1213                 const struct lu_device_type *ldt = scan->ld_type;
1214                 struct obd_type             *type;
1215
1216                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1217                 type = ldt->ldt_obd_type;
1218                 if (type != NULL) {
1219                         type->typ_refcnt--;
1220                         class_put_type(type);
1221                 }
1222         }
1223 }
1224 EXPORT_SYMBOL(lu_stack_fini);
1225
1226 enum {
1227         /**
1228          * Maximal number of tld slots.
1229          */
1230         LU_CONTEXT_KEY_NR = 32
1231 };
1232
1233 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1234
1235 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1236
1237 /**
1238  * Global counter incremented whenever key is registered, unregistered,
1239  * revived or quiesced. This is used to void unnecessary calls to
1240  * lu_context_refill(). No locking is provided, as initialization and shutdown
1241  * are supposed to be externally serialized.
1242  */
1243 static unsigned key_set_version = 0;
1244
1245 /**
1246  * Register new key.
1247  */
1248 int lu_context_key_register(struct lu_context_key *key)
1249 {
1250         int result;
1251         int i;
1252
1253         LASSERT(key->lct_init != NULL);
1254         LASSERT(key->lct_fini != NULL);
1255         LASSERT(key->lct_tags != 0);
1256         LASSERT(key->lct_owner != NULL);
1257
1258         result = -ENFILE;
1259         cfs_spin_lock(&lu_keys_guard);
1260         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1261                 if (lu_keys[i] == NULL) {
1262                         key->lct_index = i;
1263                         cfs_atomic_set(&key->lct_used, 1);
1264                         lu_keys[i] = key;
1265                         lu_ref_init(&key->lct_reference);
1266                         result = 0;
1267                         ++key_set_version;
1268                         break;
1269                 }
1270         }
1271         cfs_spin_unlock(&lu_keys_guard);
1272         return result;
1273 }
1274 EXPORT_SYMBOL(lu_context_key_register);
1275
1276 static void key_fini(struct lu_context *ctx, int index)
1277 {
1278         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1279                 struct lu_context_key *key;
1280
1281                 key = lu_keys[index];
1282                 LASSERT(key != NULL);
1283                 LASSERT(key->lct_fini != NULL);
1284                 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1285
1286                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1287                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1288                 cfs_atomic_dec(&key->lct_used);
1289                 LASSERT(key->lct_owner != NULL);
1290                 if (!(ctx->lc_tags & LCT_NOREF)) {
1291                         LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1292                         cfs_module_put(key->lct_owner);
1293                 }
1294                 ctx->lc_value[index] = NULL;
1295         }
1296 }
1297
1298 /**
1299  * Deregister key.
1300  */
1301 void lu_context_key_degister(struct lu_context_key *key)
1302 {
1303         LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1304         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1305
1306         lu_context_key_quiesce(key);
1307
1308         ++key_set_version;
1309         cfs_spin_lock(&lu_keys_guard);
1310         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1311         if (lu_keys[key->lct_index]) {
1312                 lu_keys[key->lct_index] = NULL;
1313                 lu_ref_fini(&key->lct_reference);
1314         }
1315         cfs_spin_unlock(&lu_keys_guard);
1316
1317         LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1318                  "key has instances: %d\n",
1319                  cfs_atomic_read(&key->lct_used));
1320 }
1321 EXPORT_SYMBOL(lu_context_key_degister);
1322
1323 /**
1324  * Register a number of keys. This has to be called after all keys have been
1325  * initialized by a call to LU_CONTEXT_KEY_INIT().
1326  */
1327 int lu_context_key_register_many(struct lu_context_key *k, ...)
1328 {
1329         struct lu_context_key *key = k;
1330         va_list args;
1331         int result;
1332
1333         va_start(args, k);
1334         do {
1335                 result = lu_context_key_register(key);
1336                 if (result)
1337                         break;
1338                 key = va_arg(args, struct lu_context_key *);
1339         } while (key != NULL);
1340         va_end(args);
1341
1342         if (result != 0) {
1343                 va_start(args, k);
1344                 while (k != key) {
1345                         lu_context_key_degister(k);
1346                         k = va_arg(args, struct lu_context_key *);
1347                 }
1348                 va_end(args);
1349         }
1350
1351         return result;
1352 }
1353 EXPORT_SYMBOL(lu_context_key_register_many);
1354
1355 /**
1356  * De-register a number of keys. This is a dual to
1357  * lu_context_key_register_many().
1358  */
1359 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1360 {
1361         va_list args;
1362
1363         va_start(args, k);
1364         do {
1365                 lu_context_key_degister(k);
1366                 k = va_arg(args, struct lu_context_key*);
1367         } while (k != NULL);
1368         va_end(args);
1369 }
1370 EXPORT_SYMBOL(lu_context_key_degister_many);
1371
1372 /**
1373  * Revive a number of keys.
1374  */
1375 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1376 {
1377         va_list args;
1378
1379         va_start(args, k);
1380         do {
1381                 lu_context_key_revive(k);
1382                 k = va_arg(args, struct lu_context_key*);
1383         } while (k != NULL);
1384         va_end(args);
1385 }
1386 EXPORT_SYMBOL(lu_context_key_revive_many);
1387
1388 /**
1389  * Quiescent a number of keys.
1390  */
1391 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1392 {
1393         va_list args;
1394
1395         va_start(args, k);
1396         do {
1397                 lu_context_key_quiesce(k);
1398                 k = va_arg(args, struct lu_context_key*);
1399         } while (k != NULL);
1400         va_end(args);
1401 }
1402 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1403
1404 /**
1405  * Return value associated with key \a key in context \a ctx.
1406  */
1407 void *lu_context_key_get(const struct lu_context *ctx,
1408                          const struct lu_context_key *key)
1409 {
1410         LINVRNT(ctx->lc_state == LCS_ENTERED);
1411         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1412         LASSERT(lu_keys[key->lct_index] == key);
1413         return ctx->lc_value[key->lct_index];
1414 }
1415 EXPORT_SYMBOL(lu_context_key_get);
1416
1417 /**
1418  * List of remembered contexts. XXX document me.
1419  */
1420 static CFS_LIST_HEAD(lu_context_remembered);
1421
1422 /**
1423  * Destroy \a key in all remembered contexts. This is used to destroy key
1424  * values in "shared" contexts (like service threads), when a module owning
1425  * the key is about to be unloaded.
1426  */
1427 void lu_context_key_quiesce(struct lu_context_key *key)
1428 {
1429         struct lu_context *ctx;
1430         extern unsigned cl_env_cache_purge(unsigned nr);
1431
1432         if (!(key->lct_tags & LCT_QUIESCENT)) {
1433                 /*
1434                  * XXX layering violation.
1435                  */
1436                 cl_env_cache_purge(~0);
1437                 key->lct_tags |= LCT_QUIESCENT;
1438                 /*
1439                  * XXX memory barrier has to go here.
1440                  */
1441                 cfs_spin_lock(&lu_keys_guard);
1442                 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1443                                         lc_remember)
1444                         key_fini(ctx, key->lct_index);
1445                 cfs_spin_unlock(&lu_keys_guard);
1446                 ++key_set_version;
1447         }
1448 }
1449 EXPORT_SYMBOL(lu_context_key_quiesce);
1450
1451 void lu_context_key_revive(struct lu_context_key *key)
1452 {
1453         key->lct_tags &= ~LCT_QUIESCENT;
1454         ++key_set_version;
1455 }
1456 EXPORT_SYMBOL(lu_context_key_revive);
1457
1458 static void keys_fini(struct lu_context *ctx)
1459 {
1460         int i;
1461
1462         cfs_spin_lock(&lu_keys_guard);
1463         if (ctx->lc_value != NULL) {
1464                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1465                         key_fini(ctx, i);
1466                 OBD_FREE(ctx->lc_value,
1467                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1468                 ctx->lc_value = NULL;
1469         }
1470         cfs_spin_unlock(&lu_keys_guard);
1471 }
1472
1473 static int keys_fill(struct lu_context *ctx)
1474 {
1475         int i;
1476
1477         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1478                 struct lu_context_key *key;
1479
1480                 key = lu_keys[i];
1481                 if (ctx->lc_value[i] == NULL && key != NULL &&
1482                     (key->lct_tags & ctx->lc_tags) &&
1483                     /*
1484                      * Don't create values for a LCT_QUIESCENT key, as this
1485                      * will pin module owning a key.
1486                      */
1487                     !(key->lct_tags & LCT_QUIESCENT)) {
1488                         void *value;
1489
1490                         LINVRNT(key->lct_init != NULL);
1491                         LINVRNT(key->lct_index == i);
1492
1493                         value = key->lct_init(ctx, key);
1494                         if (unlikely(IS_ERR(value)))
1495                                 return PTR_ERR(value);
1496
1497                         LASSERT(key->lct_owner != NULL);
1498                         if (!(ctx->lc_tags & LCT_NOREF))
1499                                 cfs_try_module_get(key->lct_owner);
1500                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1501                         cfs_atomic_inc(&key->lct_used);
1502                         /*
1503                          * This is the only place in the code, where an
1504                          * element of ctx->lc_value[] array is set to non-NULL
1505                          * value.
1506                          */
1507                         ctx->lc_value[i] = value;
1508                         if (key->lct_exit != NULL)
1509                                 ctx->lc_tags |= LCT_HAS_EXIT;
1510                 }
1511                 ctx->lc_version = key_set_version;
1512         }
1513         return 0;
1514 }
1515
1516 static int keys_init(struct lu_context *ctx)
1517 {
1518         int result;
1519
1520         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1521         if (likely(ctx->lc_value != NULL))
1522                 result = keys_fill(ctx);
1523         else
1524                 result = -ENOMEM;
1525
1526         if (result != 0)
1527                 keys_fini(ctx);
1528         return result;
1529 }
1530
1531 /**
1532  * Initialize context data-structure. Create values for all keys.
1533  */
1534 int lu_context_init(struct lu_context *ctx, __u32 tags)
1535 {
1536         memset(ctx, 0, sizeof *ctx);
1537         ctx->lc_state = LCS_INITIALIZED;
1538         ctx->lc_tags = tags;
1539         if (tags & LCT_REMEMBER) {
1540                 cfs_spin_lock(&lu_keys_guard);
1541                 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1542                 cfs_spin_unlock(&lu_keys_guard);
1543         } else
1544                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1545         return keys_init(ctx);
1546 }
1547 EXPORT_SYMBOL(lu_context_init);
1548
1549 /**
1550  * Finalize context data-structure. Destroy key values.
1551  */
1552 void lu_context_fini(struct lu_context *ctx)
1553 {
1554         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1555         ctx->lc_state = LCS_FINALIZED;
1556         keys_fini(ctx);
1557         cfs_spin_lock(&lu_keys_guard);
1558         cfs_list_del_init(&ctx->lc_remember);
1559         cfs_spin_unlock(&lu_keys_guard);
1560 }
1561 EXPORT_SYMBOL(lu_context_fini);
1562
1563 /**
1564  * Called before entering context.
1565  */
1566 void lu_context_enter(struct lu_context *ctx)
1567 {
1568         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1569         ctx->lc_state = LCS_ENTERED;
1570 }
1571 EXPORT_SYMBOL(lu_context_enter);
1572
1573 /**
1574  * Called after exiting from \a ctx
1575  */
1576 void lu_context_exit(struct lu_context *ctx)
1577 {
1578         int i;
1579
1580         LINVRNT(ctx->lc_state == LCS_ENTERED);
1581         ctx->lc_state = LCS_LEFT;
1582         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1583                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1584                         if (ctx->lc_value[i] != NULL) {
1585                                 struct lu_context_key *key;
1586
1587                                 key = lu_keys[i];
1588                                 LASSERT(key != NULL);
1589                                 if (key->lct_exit != NULL)
1590                                         key->lct_exit(ctx,
1591                                                       key, ctx->lc_value[i]);
1592                         }
1593                 }
1594         }
1595 }
1596 EXPORT_SYMBOL(lu_context_exit);
1597
1598 /**
1599  * Allocate for context all missing keys that were registered after context
1600  * creation.
1601  */
1602 int lu_context_refill(struct lu_context *ctx)
1603 {
1604         LINVRNT(ctx->lc_value != NULL);
1605         return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1606 }
1607 EXPORT_SYMBOL(lu_context_refill);
1608
1609 int lu_env_init(struct lu_env *env, __u32 tags)
1610 {
1611         int result;
1612
1613         env->le_ses = NULL;
1614         result = lu_context_init(&env->le_ctx, tags);
1615         if (likely(result == 0))
1616                 lu_context_enter(&env->le_ctx);
1617         return result;
1618 }
1619 EXPORT_SYMBOL(lu_env_init);
1620
1621 void lu_env_fini(struct lu_env *env)
1622 {
1623         lu_context_exit(&env->le_ctx);
1624         lu_context_fini(&env->le_ctx);
1625         env->le_ses = NULL;
1626 }
1627 EXPORT_SYMBOL(lu_env_fini);
1628
1629 int lu_env_refill(struct lu_env *env)
1630 {
1631         int result;
1632
1633         result = lu_context_refill(&env->le_ctx);
1634         if (result == 0 && env->le_ses != NULL)
1635                 result = lu_context_refill(env->le_ses);
1636         return result;
1637 }
1638 EXPORT_SYMBOL(lu_env_refill);
1639
1640 static struct cfs_shrinker *lu_site_shrinker = NULL;
1641
1642 typedef struct lu_site_stats{
1643         unsigned        lss_populated;
1644         unsigned        lss_max_search;
1645         unsigned        lss_total;
1646         unsigned        lss_busy;
1647 } lu_site_stats_t;
1648
1649 static void lu_site_stats_get(cfs_hash_t *hs,
1650                               lu_site_stats_t *stats, int populated)
1651 {
1652         cfs_hash_bd_t bd;
1653         int           i;
1654
1655         cfs_hash_for_each_bucket(hs, &bd, i) {
1656                 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1657                 cfs_hlist_head_t        *hhead;
1658
1659                 cfs_hash_bd_lock(hs, &bd, 1);
1660                 stats->lss_busy  += bkt->lsb_busy;
1661                 stats->lss_total += cfs_hash_bd_count_get(&bd);
1662                 stats->lss_max_search = max((int)stats->lss_max_search,
1663                                             cfs_hash_bd_depmax_get(&bd));
1664                 if (!populated) {
1665                         cfs_hash_bd_unlock(hs, &bd, 1);
1666                         continue;
1667                 }
1668
1669                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1670                         if (!cfs_hlist_empty(hhead))
1671                                 stats->lss_populated++;
1672                 }
1673                 cfs_hash_bd_unlock(hs, &bd, 1);
1674         }
1675 }
1676
1677 #ifdef __KERNEL__
1678 static int lu_cache_shrink(SHRINKER_FIRST_ARG int nr_to_scan,
1679                            unsigned int gfp_mask)
1680 {
1681         lu_site_stats_t stats;
1682         struct lu_site *s;
1683         struct lu_site *tmp;
1684         int cached = 0;
1685         int remain = nr_to_scan;
1686         CFS_LIST_HEAD(splice);
1687
1688         if (nr_to_scan != 0) {
1689                 if (!(gfp_mask & __GFP_FS))
1690                         return -1;
1691                 CDEBUG(D_INODE, "Shrink %d objects\n", nr_to_scan);
1692         }
1693
1694         cfs_down(&lu_sites_guard);
1695         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1696                 if (nr_to_scan != 0) {
1697                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1698                         /*
1699                          * Move just shrunk site to the tail of site list to
1700                          * assure shrinking fairness.
1701                          */
1702                         cfs_list_move_tail(&s->ls_linkage, &splice);
1703                 }
1704
1705                 memset(&stats, 0, sizeof(stats));
1706                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1707                 cached += stats.lss_total - stats.lss_busy;
1708                 if (nr_to_scan && remain <= 0)
1709                         break;
1710         }
1711         cfs_list_splice(&splice, lu_sites.prev);
1712         cfs_up(&lu_sites_guard);
1713
1714         cached = (cached / 100) * sysctl_vfs_cache_pressure;
1715         if (nr_to_scan == 0)
1716                 CDEBUG(D_INODE, "%d objects cached\n", cached);
1717         return cached;
1718 }
1719
1720 /*
1721  * Debugging stuff.
1722  */
1723
1724 /**
1725  * Environment to be used in debugger, contains all tags.
1726  */
1727 struct lu_env lu_debugging_env;
1728
1729 /**
1730  * Debugging printer function using printk().
1731  */
1732 int lu_printk_printer(const struct lu_env *env,
1733                       void *unused, const char *format, ...)
1734 {
1735         va_list args;
1736
1737         va_start(args, format);
1738         vprintk(format, args);
1739         va_end(args);
1740         return 0;
1741 }
1742
1743 void lu_debugging_setup(void)
1744 {
1745         lu_env_init(&lu_debugging_env, ~0);
1746 }
1747
1748 void lu_context_keys_dump(void)
1749 {
1750         int i;
1751
1752         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1753                 struct lu_context_key *key;
1754
1755                 key = lu_keys[i];
1756                 if (key != NULL) {
1757                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1758                                i, key, key->lct_tags,
1759                                key->lct_init, key->lct_fini, key->lct_exit,
1760                                key->lct_index, cfs_atomic_read(&key->lct_used),
1761                                key->lct_owner ? key->lct_owner->name : "",
1762                                key->lct_owner);
1763                         lu_ref_print(&key->lct_reference);
1764                 }
1765         }
1766 }
1767 EXPORT_SYMBOL(lu_context_keys_dump);
1768 #else  /* !__KERNEL__ */
1769 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1770 {
1771         return 0;
1772 }
1773 #endif /* __KERNEL__ */
1774
1775 int  cl_global_init(void);
1776 void cl_global_fini(void);
1777 int  lu_ref_global_init(void);
1778 void lu_ref_global_fini(void);
1779
1780 int dt_global_init(void);
1781 void dt_global_fini(void);
1782
1783 int llo_global_init(void);
1784 void llo_global_fini(void);
1785
1786 /**
1787  * Initialization of global lu_* data.
1788  */
1789 int lu_global_init(void)
1790 {
1791         int result;
1792
1793         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1794
1795         result = lu_ref_global_init();
1796         if (result != 0)
1797                 return result;
1798
1799         LU_CONTEXT_KEY_INIT(&lu_global_key);
1800         result = lu_context_key_register(&lu_global_key);
1801         if (result != 0)
1802                 return result;
1803         /*
1804          * At this level, we don't know what tags are needed, so allocate them
1805          * conservatively. This should not be too bad, because this
1806          * environment is global.
1807          */
1808         cfs_down(&lu_sites_guard);
1809         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1810         cfs_up(&lu_sites_guard);
1811         if (result != 0)
1812                 return result;
1813
1814         /*
1815          * seeks estimation: 3 seeks to read a record from oi, one to read
1816          * inode, one for ea. Unfortunately setting this high value results in
1817          * lu_object/inode cache consuming all the memory.
1818          */
1819         lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1820         if (lu_site_shrinker == NULL)
1821                 return -ENOMEM;
1822
1823         result = lu_time_global_init();
1824         if (result)
1825                 GOTO(out, result);
1826
1827 #ifdef __KERNEL__
1828         result = dt_global_init();
1829         if (result)
1830                 GOTO(out, result);
1831
1832         result = llo_global_init();
1833         if (result)
1834                 GOTO(out, result);
1835 #endif
1836         result = cl_global_init();
1837 out:
1838
1839         return result;
1840 }
1841
1842 /**
1843  * Dual to lu_global_init().
1844  */
1845 void lu_global_fini(void)
1846 {
1847         cl_global_fini();
1848 #ifdef __KERNEL__
1849         llo_global_fini();
1850         dt_global_fini();
1851 #endif
1852         lu_time_global_fini();
1853         if (lu_site_shrinker != NULL) {
1854                 cfs_remove_shrinker(lu_site_shrinker);
1855                 lu_site_shrinker = NULL;
1856         }
1857
1858         lu_context_key_degister(&lu_global_key);
1859
1860         /*
1861          * Tear shrinker environment down _after_ de-registering
1862          * lu_global_key, because the latter has a value in the former.
1863          */
1864         cfs_down(&lu_sites_guard);
1865         lu_env_fini(&lu_shrink_env);
1866         cfs_up(&lu_sites_guard);
1867
1868         lu_ref_global_fini();
1869 }
1870
1871 struct lu_buf LU_BUF_NULL = {
1872         .lb_buf = NULL,
1873         .lb_len = 0
1874 };
1875 EXPORT_SYMBOL(LU_BUF_NULL);
1876
1877 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1878 {
1879 #ifdef LPROCFS
1880         struct lprocfs_counter ret;
1881
1882         lprocfs_stats_collect(stats, idx, &ret);
1883         return (__u32)ret.lc_count;
1884 #else
1885         return 0;
1886 #endif
1887 }
1888
1889 /**
1890  * Output site statistical counters into a buffer. Suitable for
1891  * lprocfs_rd_*()-style functions.
1892  */
1893 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1894 {
1895         lu_site_stats_t stats;
1896
1897         memset(&stats, 0, sizeof(stats));
1898         lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1899
1900         return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
1901                         stats.lss_busy,
1902                         stats.lss_total,
1903                         stats.lss_populated,
1904                         CFS_HASH_NHLIST(s->ls_obj_hash),
1905                         stats.lss_max_search,
1906                         ls_stats_read(s->ls_stats, LU_SS_CREATED),
1907                         ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1908                         ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1909                         ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1910                         ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
1911                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
1912 }
1913 EXPORT_SYMBOL(lu_site_stats_print);
1914
1915 const char *lu_time_names[LU_TIME_NR] = {
1916         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1917         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1918         [LU_TIME_FIND_INSERT] = "find_insert"
1919 };
1920 EXPORT_SYMBOL(lu_time_names);
1921
1922 /**
1923  * Helper function to initialize a number of kmem slab caches at once.
1924  */
1925 int lu_kmem_init(struct lu_kmem_descr *caches)
1926 {
1927         int result;
1928
1929         for (result = 0; caches->ckd_cache != NULL; ++caches) {
1930                 *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
1931                                                           caches->ckd_size,
1932                                                           0, 0);
1933                 if (*caches->ckd_cache == NULL) {
1934                         result = -ENOMEM;
1935                         break;
1936                 }
1937         }
1938         return result;
1939 }
1940 EXPORT_SYMBOL(lu_kmem_init);
1941
1942 /**
1943  * Helper function to finalize a number of kmem slab cached at once. Dual to
1944  * lu_kmem_init().
1945  */
1946 void lu_kmem_fini(struct lu_kmem_descr *caches)
1947 {
1948         int rc;
1949
1950         for (; caches->ckd_cache != NULL; ++caches) {
1951                 if (*caches->ckd_cache != NULL) {
1952                         rc = cfs_mem_cache_destroy(*caches->ckd_cache);
1953                         LASSERTF(rc == 0, "couldn't destroy %s slab\n",
1954                                  caches->ckd_name);
1955                         *caches->ckd_cache = NULL;
1956                 }
1957         }
1958 }
1959 EXPORT_SYMBOL(lu_kmem_fini);