Whamcloud - gitweb
8ecd85c84988964d5a7090246dfe3ddee6c3bb19
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lu_object.c
37  *
38  * Lustre Object.
39  * These are the only exported functions, they provide some generic
40  * infrastructure for managing object devices
41  *
42  *   Author: Nikita Danilov <nikita.danilov@sun.com>
43  */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46 #ifndef EXPORT_SYMTAB
47 # define EXPORT_SYMTAB
48 #endif
49
50 #include <libcfs/libcfs.h>
51
52 #ifdef __KERNEL__
53 # include <linux/module.h>
54 #endif
55
56 /* hash_long() */
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
65 #include <lu_time.h>
66
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
68
69 /**
70  * Decrease reference counter on object. If last reference is freed, return
71  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72  * case, free object immediately.
73  */
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
75 {
76         struct lu_object_header *top;
77         struct lu_site          *site;
78         struct lu_object        *orig;
79         int                      kill_it;
80
81         top = o->lo_header;
82         site = o->lo_dev->ld_site;
83         orig = o;
84         kill_it = 0;
85         write_lock(&site->ls_guard);
86         if (atomic_dec_and_test(&top->loh_ref)) {
87                 /*
88                  * When last reference is released, iterate over object
89                  * layers, and notify them that object is no longer busy.
90                  */
91                 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
92                         if (o->lo_ops->loo_object_release != NULL)
93                                 o->lo_ops->loo_object_release(env, o);
94                 }
95                 -- site->ls_busy;
96                 if (lu_object_is_dying(top)) {
97                         /*
98                          * If object is dying (will not be cached), removed it
99                          * from hash table and LRU.
100                          *
101                          * This is done with hash table and LRU lists
102                          * locked. As the only way to acquire first reference
103                          * to previously unreferenced object is through
104                          * hash-table lookup (lu_object_find()), or LRU
105                          * scanning (lu_site_purge()), that are done under
106                          * hash-table and LRU lock, no race with concurrent
107                          * object lookup is possible and we can safely destroy
108                          * object below.
109                          */
110                         hlist_del_init(&top->loh_hash);
111                         list_del_init(&top->loh_lru);
112                         -- site->ls_total;
113                         kill_it = 1;
114                 }
115         }
116         write_unlock(&site->ls_guard);
117         if (kill_it)
118                 /*
119                  * Object was already removed from hash and lru above, can
120                  * kill it.
121                  */
122                 lu_object_free(env, orig);
123 }
124 EXPORT_SYMBOL(lu_object_put);
125
126 /**
127  * Allocate new object.
128  *
129  * This follows object creation protocol, described in the comment within
130  * struct lu_device_operations definition.
131  */
132 static struct lu_object *lu_object_alloc(const struct lu_env *env,
133                                          struct lu_device *dev,
134                                          const struct lu_fid *f,
135                                          const struct lu_object_conf *conf)
136 {
137         struct lu_object *scan;
138         struct lu_object *top;
139         struct list_head *layers;
140         int clean;
141         int result;
142         ENTRY;
143
144         /*
145          * Create top-level object slice. This will also create
146          * lu_object_header.
147          */
148         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
149         if (top == NULL)
150                 RETURN(ERR_PTR(-ENOMEM));
151         /*
152          * This is the only place where object fid is assigned. It's constant
153          * after this point.
154          */
155         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
156         top->lo_header->loh_fid  = *f;
157         layers = &top->lo_header->loh_layers;
158         do {
159                 /*
160                  * Call ->loo_object_init() repeatedly, until no more new
161                  * object slices are created.
162                  */
163                 clean = 1;
164                 list_for_each_entry(scan, layers, lo_linkage) {
165                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
166                                 continue;
167                         clean = 0;
168                         scan->lo_header = top->lo_header;
169                         result = scan->lo_ops->loo_object_init(env, scan, conf);
170                         if (result != 0) {
171                                 lu_object_free(env, top);
172                                 RETURN(ERR_PTR(result));
173                         }
174                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
175                 }
176         } while (!clean);
177
178         list_for_each_entry_reverse(scan, layers, lo_linkage) {
179                 if (scan->lo_ops->loo_object_start != NULL) {
180                         result = scan->lo_ops->loo_object_start(env, scan);
181                         if (result != 0) {
182                                 lu_object_free(env, top);
183                                 RETURN(ERR_PTR(result));
184                         }
185                 }
186         }
187
188         dev->ld_site->ls_stats.s_created ++;
189         RETURN(top);
190 }
191
192 /**
193  * Free an object.
194  */
195 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
196 {
197         struct list_head splice;
198         struct lu_object *scan;
199         struct lu_site          *site;
200         struct list_head        *layers;
201
202         site   = o->lo_dev->ld_site;
203         layers = &o->lo_header->loh_layers;
204         /*
205          * First call ->loo_object_delete() method to release all resources.
206          */
207         list_for_each_entry_reverse(scan, layers, lo_linkage) {
208                 if (scan->lo_ops->loo_object_delete != NULL)
209                         scan->lo_ops->loo_object_delete(env, scan);
210         }
211
212         /*
213          * Then, splice object layers into stand-alone list, and call
214          * ->loo_object_free() on all layers to free memory. Splice is
215          * necessary, because lu_object_header is freed together with the
216          * top-level slice.
217          */
218         CFS_INIT_LIST_HEAD(&splice);
219         list_splice_init(layers, &splice);
220         while (!list_empty(&splice)) {
221                 /*
222                  * Free layers in bottom-to-top order, so that object header
223                  * lives as long as possible and ->loo_object_free() methods
224                  * can look at its contents.
225                  */
226                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
227                 list_del_init(&o->lo_linkage);
228                 LASSERT(o->lo_ops->loo_object_free != NULL);
229                 o->lo_ops->loo_object_free(env, o);
230         }
231         cfs_waitq_broadcast(&site->ls_marche_funebre);
232 }
233
234 /**
235  * Free \a nr objects from the cold end of the site LRU list.
236  */
237 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
238 {
239         struct list_head         dispose;
240         struct lu_object_header *h;
241         struct lu_object_header *temp;
242
243         CFS_INIT_LIST_HEAD(&dispose);
244         /*
245          * Under LRU list lock, scan LRU list and move unreferenced objects to
246          * the dispose list, removing them from LRU and hash table.
247          */
248         write_lock(&s->ls_guard);
249         list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
250                 /*
251                  * Objects are sorted in lru order, and "busy" objects (ones
252                  * with h->loh_ref > 0) naturally tend to live near hot end
253                  * that we scan last. Unfortunately, sites usually have small
254                  * (less then ten) number of busy yet rarely accessed objects
255                  * (some global objects, accessed directly through pointers,
256                  * bypassing hash table). Currently algorithm scans them over
257                  * and over again. Probably we should move busy objects out of
258                  * LRU, or we can live with that.
259                  */
260                 if (nr-- == 0)
261                         break;
262                 if (atomic_read(&h->loh_ref) > 0)
263                         continue;
264                 hlist_del_init(&h->loh_hash);
265                 list_move(&h->loh_lru, &dispose);
266                 s->ls_total --;
267         }
268         write_unlock(&s->ls_guard);
269         /*
270          * Free everything on the dispose list. This is safe against races due
271          * to the reasons described in lu_object_put().
272          */
273         while (!list_empty(&dispose)) {
274                 h = container_of0(dispose.next,
275                                  struct lu_object_header, loh_lru);
276                 list_del_init(&h->loh_lru);
277                 lu_object_free(env, lu_object_top(h));
278                 s->ls_stats.s_lru_purged ++;
279         }
280         return nr;
281 }
282 EXPORT_SYMBOL(lu_site_purge);
283
284 /*
285  * Object printing.
286  *
287  * Code below has to jump through certain loops to output object description
288  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
289  * composes object description from strings that are parts of _lines_ of
290  * output (i.e., strings that are not terminated by newline). This doesn't fit
291  * very well into libcfs_debug_msg() interface that assumes that each message
292  * supplied to it is a self-contained output line.
293  *
294  * To work around this, strings are collected in a temporary buffer
295  * (implemented as a value of lu_cdebug_key key), until terminating newline
296  * character is detected.
297  *
298  */
299
300 enum {
301         /**
302          * Maximal line size.
303          *
304          * XXX overflow is not handled correctly.
305          */
306         LU_CDEBUG_LINE = 256
307 };
308
309 struct lu_cdebug_data {
310         /**
311          * Temporary buffer.
312          */
313         char lck_area[LU_CDEBUG_LINE];
314 };
315
316 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
317 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
318
319 /**
320  * Key, holding temporary buffer. This key is registered very early by
321  * lu_global_init().
322  */
323 struct lu_context_key lu_global_key = {
324         .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
325         .lct_init = lu_global_key_init,
326         .lct_fini = lu_global_key_fini
327 };
328
329 /**
330  * Printer function emitting messages through libcfs_debug_msg().
331  */
332 int lu_cdebug_printer(const struct lu_env *env,
333                       void *cookie, const char *format, ...)
334 {
335         struct lu_cdebug_print_info *info = cookie;
336         struct lu_cdebug_data       *key;
337         int used;
338         int complete;
339         va_list args;
340
341         va_start(args, format);
342
343         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
344         LASSERT(key != NULL);
345
346         used = strlen(key->lck_area);
347         complete = format[strlen(format) - 1] == '\n';
348         /*
349          * Append new chunk to the buffer.
350          */
351         vsnprintf(key->lck_area + used,
352                   ARRAY_SIZE(key->lck_area) - used, format, args);
353         if (complete) {
354                 libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
355                                  (char *)info->lpi_file, info->lpi_fn,
356                                  info->lpi_line, "%s", key->lck_area);
357                 key->lck_area[0] = 0;
358         }
359         va_end(args);
360         return 0;
361 }
362 EXPORT_SYMBOL(lu_cdebug_printer);
363
364 /*
365  * Print object header.
366  */
367 static void lu_object_header_print(const struct lu_env *env,
368                                    void *cookie, lu_printer_t printer,
369                                    const struct lu_object_header *hdr)
370 {
371         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
372                    hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
373                    PFID(&hdr->loh_fid),
374                    hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
375                    list_empty(&hdr->loh_lru) ? "" : " lru",
376                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
377 }
378
379 /*
380  * Print human readable representation of the @o to the @printer.
381  */
382 void lu_object_print(const struct lu_env *env, void *cookie,
383                      lu_printer_t printer, const struct lu_object *o)
384 {
385         static const char ruler[] = "........................................";
386         struct lu_object_header *top;
387         int depth;
388
389         top = o->lo_header;
390         lu_object_header_print(env, cookie, printer, top);
391         (*printer)(env, cookie, "\n");
392         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
393                 depth = o->lo_depth + 4;
394                 LASSERT(o->lo_ops->loo_object_print != NULL);
395                 /*
396                  * print `.' @depth times.
397                  */
398                 (*printer)(env, cookie, "%*.*s", depth, depth, ruler);
399                 o->lo_ops->loo_object_print(env, cookie, printer, o);
400                 (*printer)(env, cookie, "\n");
401         }
402 }
403 EXPORT_SYMBOL(lu_object_print);
404
405 /*
406  * Check object consistency.
407  */
408 int lu_object_invariant(const struct lu_object *o)
409 {
410         struct lu_object_header *top;
411
412         top = o->lo_header;
413         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
414                 if (o->lo_ops->loo_object_invariant != NULL &&
415                     !o->lo_ops->loo_object_invariant(o))
416                         return 0;
417         }
418         return 1;
419 }
420 EXPORT_SYMBOL(lu_object_invariant);
421
422 static struct lu_object *htable_lookup(struct lu_site *s,
423                                        const struct hlist_head *bucket,
424                                        const struct lu_fid *f,
425                                        cfs_waitlink_t *waiter)
426 {
427         struct lu_object_header *h;
428         struct hlist_node *scan;
429
430         hlist_for_each_entry(h, scan, bucket, loh_hash) {
431                 s->ls_stats.s_cache_check ++;
432                 if (likely(lu_fid_eq(&h->loh_fid, f))) {
433                         if (unlikely(lu_object_is_dying(h))) {
434                                 /*
435                                  * Lookup found an object being destroyed;
436                                  * this object cannot be returned (to assure
437                                  * that references to dying objects are
438                                  * eventually drained), and moreover, lookup
439                                  * has to wait until object is freed.
440                                  */
441                                 cfs_waitlink_init(waiter);
442                                 cfs_waitq_add(&s->ls_marche_funebre, waiter);
443                                 set_current_state(CFS_TASK_UNINT);
444                                 s->ls_stats.s_cache_death_race ++;
445                                 return ERR_PTR(-EAGAIN);
446                         }
447                         /* bump reference count... */
448                         if (atomic_add_return(1, &h->loh_ref) == 1)
449                                 ++ s->ls_busy;
450                         /* and move to the head of the LRU */
451                         /*
452                          * XXX temporary disable this to measure effects of
453                          * read-write locking.
454                          */
455                         /* list_move_tail(&h->loh_lru, &s->ls_lru); */
456                         s->ls_stats.s_cache_hit ++;
457                         return lu_object_top(h);
458                 }
459         }
460         s->ls_stats.s_cache_miss ++;
461         return NULL;
462 }
463
464 static __u32 fid_hash(const struct lu_fid *f, int bits)
465 {
466         /* all objects with same id and different versions will belong to same
467          * collisions list. */
468         return hash_long(fid_flatten(f), bits);
469 }
470
471 /**
472  * Search cache for an object with the fid \a f. If such object is found,
473  * return it. Otherwise, create new object, insert it into cache and return
474  * it. In any case, additional reference is acquired on the returned object.
475  */
476 struct lu_object *lu_object_find(const struct lu_env *env,
477                                  struct lu_device *dev, const struct lu_fid *f,
478                                  const struct lu_object_conf *conf)
479 {
480         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
481 }
482 EXPORT_SYMBOL(lu_object_find);
483
484 /**
485  * Core logic of lu_object_find*() functions.
486  */
487 static struct lu_object *lu_object_find_try(const struct lu_env *env,
488                                             struct lu_device *dev,
489                                             const struct lu_fid *f,
490                                             const struct lu_object_conf *conf,
491                                             cfs_waitlink_t *waiter)
492 {
493         struct lu_site    *s;
494         struct lu_object     *o;
495         struct lu_object     *shadow;
496         struct hlist_head *bucket;
497
498         /*
499          * This uses standard index maintenance protocol:
500          *
501          *     - search index under lock, and return object if found;
502          *     - otherwise, unlock index, allocate new object;
503          *     - lock index and search again;
504          *     - if nothing is found (usual case), insert newly created
505          *       object into index;
506          *     - otherwise (race: other thread inserted object), free
507          *       object just allocated.
508          *     - unlock index;
509          *     - return object.
510          *
511          * If dying object is found during index search, add @waiter to the
512          * site wait-queue and return ERR_PTR(-EAGAIN).
513          */
514
515         s = dev->ld_site;
516         bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
517
518         read_lock(&s->ls_guard);
519         o = htable_lookup(s, bucket, f, waiter);
520         read_unlock(&s->ls_guard);
521
522         if (o != NULL)
523                 return o;
524
525         /*
526          * Allocate new object. This may result in rather complicated
527          * operations, including fld queries, inode loading, etc.
528          */
529         o = lu_object_alloc(env, dev, f, conf);
530         if (unlikely(IS_ERR(o)))
531                 return o;
532
533         LASSERT(lu_fid_eq(lu_object_fid(o), f));
534
535         write_lock(&s->ls_guard);
536         shadow = htable_lookup(s, bucket, f, waiter);
537         if (likely(shadow == NULL)) {
538                 hlist_add_head(&o->lo_header->loh_hash, bucket);
539                 list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
540                 ++ s->ls_busy;
541                 ++ s->ls_total;
542                 shadow = o;
543                 o = NULL;
544         } else
545                 s->ls_stats.s_cache_race ++;
546         write_unlock(&s->ls_guard);
547         if (o != NULL)
548                 lu_object_free(env, o);
549         return shadow;
550 }
551
552 /**
553  * Much like lu_object_find(), but top level device of object is specifically
554  * \a dev rather than top level device of the site. This interface allows
555  * objects of different "stacking" to be created within the same site.
556  */
557 struct lu_object *lu_object_find_at(const struct lu_env *env,
558                                     struct lu_device *dev,
559                                     const struct lu_fid *f,
560                                     const struct lu_object_conf *conf)
561 {
562         struct lu_object *obj;
563         cfs_waitlink_t    wait;
564
565         while (1) {
566                 obj = lu_object_find_try(env, dev, f, conf, &wait);
567                 if (obj == ERR_PTR(-EAGAIN)) {
568                         /*
569                          * lu_object_find_try() already added waiter into the
570                          * wait queue.
571                          */
572                         cfs_waitq_wait(&wait, CFS_TASK_UNINT);
573                         cfs_waitq_del(&dev->ld_site->ls_marche_funebre, &wait);
574                 } else
575                         break;
576         }
577         return obj;
578 }
579 EXPORT_SYMBOL(lu_object_find_at);
580
581 /**
582  * Find object with given fid, and return its slice belonging to given device.
583  */
584 struct lu_object *lu_object_find_slice(const struct lu_env *env,
585                                        struct lu_device *dev,
586                                        const struct lu_fid *f,
587                                        const struct lu_object_conf *conf)
588 {
589         struct lu_object *top;
590         struct lu_object *obj;
591
592         top = lu_object_find(env, dev, f, conf);
593         if (!IS_ERR(top)) {
594                 obj = lu_object_locate(top->lo_header, dev->ld_type);
595                 if (obj == NULL)
596                         lu_object_put(env, top);
597         } else
598                 obj = top;
599         return obj;
600 }
601 EXPORT_SYMBOL(lu_object_find_slice);
602
603 /**
604  * Global list of all device types.
605  */
606 static CFS_LIST_HEAD(lu_device_types);
607
608 int lu_device_type_init(struct lu_device_type *ldt)
609 {
610         int result;
611
612         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
613         result = ldt->ldt_ops->ldto_init(ldt);
614         if (result == 0)
615                 list_add(&ldt->ldt_linkage, &lu_device_types);
616         return result;
617 }
618 EXPORT_SYMBOL(lu_device_type_init);
619
620 void lu_device_type_fini(struct lu_device_type *ldt)
621 {
622         list_del_init(&ldt->ldt_linkage);
623         ldt->ldt_ops->ldto_fini(ldt);
624 }
625 EXPORT_SYMBOL(lu_device_type_fini);
626
627 void lu_types_stop(void)
628 {
629         struct lu_device_type *ldt;
630
631         list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
632                 if (ldt->ldt_device_nr == 0)
633                         ldt->ldt_ops->ldto_stop(ldt);
634         }
635 }
636 EXPORT_SYMBOL(lu_types_stop);
637
638 /**
639  * Global list of all sites on this node
640  */
641 static CFS_LIST_HEAD(lu_sites);
642 static DECLARE_MUTEX(lu_sites_guard);
643
644 /**
645  * Global environment used by site shrinker.
646  */
647 static struct lu_env lu_shrink_env;
648
649 /**
650  * Print all objects in \a s.
651  */
652 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
653                    lu_printer_t printer)
654 {
655         int i;
656
657         for (i = 0; i < s->ls_hash_size; ++i) {
658                 struct lu_object_header *h;
659                 struct hlist_node       *scan;
660
661                 read_lock(&s->ls_guard);
662                 hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
663
664                         if (!list_empty(&h->loh_layers)) {
665                                 const struct lu_object *obj;
666
667                                 obj = lu_object_top(h);
668                                 lu_object_print(env, cookie, printer, obj);
669                         } else
670                                 lu_object_header_print(env, cookie, printer, h);
671                 }
672                 read_unlock(&s->ls_guard);
673         }
674 }
675 EXPORT_SYMBOL(lu_site_print);
676
677 enum {
678         LU_CACHE_PERCENT   = 20,
679 };
680
681 /**
682  * Return desired hash table order.
683  */
684 static int lu_htable_order(void)
685 {
686         unsigned long cache_size;
687         int bits;
688
689         /*
690          * Calculate hash table size, assuming that we want reasonable
691          * performance when 20% of total memory is occupied by cache of
692          * lu_objects.
693          *
694          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
695          */
696         cache_size = num_physpages;
697
698 #if BITS_PER_LONG == 32
699         /* limit hashtable size for lowmem systems to low RAM */
700         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
701                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
702 #endif
703
704         cache_size = cache_size / 100 * LU_CACHE_PERCENT *
705                 (CFS_PAGE_SIZE / 1024);
706
707         for (bits = 1; (1 << bits) < cache_size; ++bits) {
708                 ;
709         }
710         return bits;
711 }
712
713 static struct lock_class_key lu_site_guard_class;
714
715 /**
716  * Initialize site \a s, with \a d as the top level device.
717  */
718 int lu_site_init(struct lu_site *s, struct lu_device *top)
719 {
720         int bits;
721         int size;
722         int i;
723         ENTRY;
724
725         memset(s, 0, sizeof *s);
726         rwlock_init(&s->ls_guard);
727         lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
728         CFS_INIT_LIST_HEAD(&s->ls_lru);
729         CFS_INIT_LIST_HEAD(&s->ls_linkage);
730         cfs_waitq_init(&s->ls_marche_funebre);
731         s->ls_top_dev = top;
732         top->ld_site = s;
733         lu_device_get(top);
734         lu_ref_add(&top->ld_reference, "site-top", s);
735
736         for (bits = lu_htable_order(), size = 1 << bits;
737              (s->ls_hash =
738               cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL;
739              --bits, size >>= 1) {
740                 /*
741                  * Scale hash table down, until allocation succeeds.
742                  */
743                 ;
744         }
745
746         s->ls_hash_size = size;
747         s->ls_hash_bits = bits;
748         s->ls_hash_mask = size - 1;
749
750         for (i = 0; i < size; i++)
751                 INIT_HLIST_HEAD(&s->ls_hash[i]);
752
753         RETURN(0);
754 }
755 EXPORT_SYMBOL(lu_site_init);
756
757 /**
758  * Finalize \a s and release its resources.
759  */
760 void lu_site_fini(struct lu_site *s)
761 {
762         LASSERT(list_empty(&s->ls_lru));
763         LASSERT(s->ls_total == 0);
764
765         down(&lu_sites_guard);
766         list_del_init(&s->ls_linkage);
767         up(&lu_sites_guard);
768
769         if (s->ls_hash != NULL) {
770                 int i;
771                 for (i = 0; i < s->ls_hash_size; i++)
772                         LASSERT(hlist_empty(&s->ls_hash[i]));
773                 cfs_free_large(s->ls_hash);
774                 s->ls_hash = NULL;
775         }
776         if (s->ls_top_dev != NULL) {
777                 s->ls_top_dev->ld_site = NULL;
778                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
779                 lu_device_put(s->ls_top_dev);
780                 s->ls_top_dev = NULL;
781         }
782 }
783 EXPORT_SYMBOL(lu_site_fini);
784
785 /**
786  * Called when initialization of stack for this site is completed.
787  */
788 int lu_site_init_finish(struct lu_site *s)
789 {
790         int result;
791         down(&lu_sites_guard);
792         result = lu_context_refill(&lu_shrink_env.le_ctx);
793         if (result == 0)
794                 list_add(&s->ls_linkage, &lu_sites);
795         up(&lu_sites_guard);
796         return result;
797 }
798 EXPORT_SYMBOL(lu_site_init_finish);
799
800 /**
801  * Acquire additional reference on device \a d
802  */
803 void lu_device_get(struct lu_device *d)
804 {
805         atomic_inc(&d->ld_ref);
806 }
807 EXPORT_SYMBOL(lu_device_get);
808
809 /**
810  * Release reference on device \a d.
811  */
812 void lu_device_put(struct lu_device *d)
813 {
814         LASSERT(atomic_read(&d->ld_ref) > 0);
815         atomic_dec(&d->ld_ref);
816 }
817 EXPORT_SYMBOL(lu_device_put);
818
819 /**
820  * Initialize device \a d of type \a t.
821  */
822 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
823 {
824         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
825                 t->ldt_ops->ldto_start(t);
826         memset(d, 0, sizeof *d);
827         atomic_set(&d->ld_ref, 0);
828         d->ld_type = t;
829         lu_ref_init(&d->ld_reference);
830         return 0;
831 }
832 EXPORT_SYMBOL(lu_device_init);
833
834 /**
835  * Finalize device \a d.
836  */
837 void lu_device_fini(struct lu_device *d)
838 {
839         struct lu_device_type *t;
840
841         t = d->ld_type;
842         if (d->ld_obd != NULL)
843                 /* finish lprocfs */
844                 lprocfs_obd_cleanup(d->ld_obd);
845
846         lu_ref_fini(&d->ld_reference);
847         LASSERTF(atomic_read(&d->ld_ref) == 0,
848                  "Refcount is %u\n", atomic_read(&d->ld_ref));
849         LASSERT(t->ldt_device_nr > 0);
850         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
851                 t->ldt_ops->ldto_stop(t);
852 }
853 EXPORT_SYMBOL(lu_device_fini);
854
855 /**
856  * Initialize object \a o that is part of compound object \a h and was created
857  * by device \a d.
858  */
859 int lu_object_init(struct lu_object *o,
860                    struct lu_object_header *h, struct lu_device *d)
861 {
862         memset(o, 0, sizeof *o);
863         o->lo_header = h;
864         o->lo_dev    = d;
865         lu_device_get(d);
866         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
867         CFS_INIT_LIST_HEAD(&o->lo_linkage);
868         return 0;
869 }
870 EXPORT_SYMBOL(lu_object_init);
871
872 /**
873  * Finalize object and release its resources.
874  */
875 void lu_object_fini(struct lu_object *o)
876 {
877         struct lu_device *dev = o->lo_dev;
878
879         LASSERT(list_empty(&o->lo_linkage));
880
881         if (dev != NULL) {
882                 lu_ref_del_at(&dev->ld_reference,
883                               o->lo_dev_ref , "lu_object", o);
884                 lu_device_put(dev);
885                 o->lo_dev = NULL;
886         }
887 }
888 EXPORT_SYMBOL(lu_object_fini);
889
890 /**
891  * Add object \a o as first layer of compound object \a h
892  *
893  * This is typically called by the ->ldo_object_alloc() method of top-level
894  * device.
895  */
896 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
897 {
898         list_move(&o->lo_linkage, &h->loh_layers);
899 }
900 EXPORT_SYMBOL(lu_object_add_top);
901
902 /**
903  * Add object \a o as a layer of compound object, going after \a before.
904  *
905  * This is typically called by the ->ldo_object_alloc() method of \a
906  * before->lo_dev.
907  */
908 void lu_object_add(struct lu_object *before, struct lu_object *o)
909 {
910         list_move(&o->lo_linkage, &before->lo_linkage);
911 }
912 EXPORT_SYMBOL(lu_object_add);
913
914 /**
915  * Initialize compound object.
916  */
917 int lu_object_header_init(struct lu_object_header *h)
918 {
919         memset(h, 0, sizeof *h);
920         atomic_set(&h->loh_ref, 1);
921         INIT_HLIST_NODE(&h->loh_hash);
922         CFS_INIT_LIST_HEAD(&h->loh_lru);
923         CFS_INIT_LIST_HEAD(&h->loh_layers);
924         lu_ref_init(&h->loh_reference);
925         return 0;
926 }
927 EXPORT_SYMBOL(lu_object_header_init);
928
929 /**
930  * Finalize compound object.
931  */
932 void lu_object_header_fini(struct lu_object_header *h)
933 {
934         LASSERT(list_empty(&h->loh_layers));
935         LASSERT(list_empty(&h->loh_lru));
936         LASSERT(hlist_unhashed(&h->loh_hash));
937         lu_ref_fini(&h->loh_reference);
938 }
939 EXPORT_SYMBOL(lu_object_header_fini);
940
941 /**
942  * Given a compound object, find its slice, corresponding to the device type
943  * \a dtype.
944  */
945 struct lu_object *lu_object_locate(struct lu_object_header *h,
946                                    const struct lu_device_type *dtype)
947 {
948         struct lu_object *o;
949
950         list_for_each_entry(o, &h->loh_layers, lo_linkage) {
951                 if (o->lo_dev->ld_type == dtype)
952                         return o;
953         }
954         return NULL;
955 }
956 EXPORT_SYMBOL(lu_object_locate);
957
958
959
960 /**
961  * Finalize and free devices in the device stack.
962  * 
963  * Finalize device stack by purging object cache, and calling
964  * lu_device_type_operations::ldto_device_fini() and
965  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
966  */
967 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
968 {
969         struct lu_site   *site = top->ld_site;
970         struct lu_device *scan;
971         struct lu_device *next;
972
973         lu_site_purge(env, site, ~0);
974         for (scan = top; scan != NULL; scan = next) {
975                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
976                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
977                 lu_device_put(scan);
978         }
979
980         /* purge again. */
981         lu_site_purge(env, site, ~0);
982
983         if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
984                 /*
985                  * Uh-oh, objects still exist.
986                  */
987                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
988
989                 lu_site_print(env, site, &cookie, lu_cdebug_printer);
990         }
991
992         for (scan = top; scan != NULL; scan = next) {
993                 const struct lu_device_type *ldt = scan->ld_type;
994                 struct obd_type             *type;
995
996                 next = ldt->ldt_ops->ldto_device_free(env, scan);
997                 type = ldt->ldt_obd_type;
998                 if (type != NULL) {
999                 type->typ_refcnt--;
1000                 class_put_type(type);
1001         }
1002         }
1003 }
1004 EXPORT_SYMBOL(lu_stack_fini);
1005
1006 enum {
1007         /**
1008          * Maximal number of tld slots.
1009          */
1010         LU_CONTEXT_KEY_NR = 32
1011 };
1012
1013 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1014
1015 static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
1016
1017 /**
1018  * Global counter incremented whenever key is registered, unregistered,
1019  * revived or quiesced. This is used to void unnecessary calls to
1020  * lu_context_refill(). No locking is provided, as initialization and shutdown
1021  * are supposed to be externally serialized.
1022  */
1023 static unsigned key_set_version = 0;
1024
1025 /**
1026  * Register new key.
1027  */
1028 int lu_context_key_register(struct lu_context_key *key)
1029 {
1030         int result;
1031         int i;
1032
1033         LASSERT(key->lct_init != NULL);
1034         LASSERT(key->lct_fini != NULL);
1035         LASSERT(key->lct_tags != 0);
1036         LASSERT(key->lct_owner != NULL);
1037
1038         result = -ENFILE;
1039         spin_lock(&lu_keys_guard);
1040         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1041                 if (lu_keys[i] == NULL) {
1042                         key->lct_index = i;
1043                         atomic_set(&key->lct_used, 1);
1044                         lu_keys[i] = key;
1045                         lu_ref_init(&key->lct_reference);
1046                         result = 0;
1047                         ++key_set_version;
1048                         break;
1049                 }
1050         }
1051         spin_unlock(&lu_keys_guard);
1052         return result;
1053 }
1054 EXPORT_SYMBOL(lu_context_key_register);
1055
1056 static void key_fini(struct lu_context *ctx, int index)
1057 {
1058         if (ctx->lc_value[index] != NULL) {
1059                 struct lu_context_key *key;
1060
1061                 key = lu_keys[index];
1062                 LASSERT(key != NULL);
1063                 LASSERT(key->lct_fini != NULL);
1064                 LASSERT(atomic_read(&key->lct_used) > 1);
1065
1066                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1067                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1068                 atomic_dec(&key->lct_used);
1069                 LASSERT(key->lct_owner != NULL);
1070                 if (!(ctx->lc_tags & LCT_NOREF)) {
1071                         LASSERT(module_refcount(key->lct_owner) > 0);
1072                         module_put(key->lct_owner);
1073                 }
1074                 ctx->lc_value[index] = NULL;
1075         }
1076 }
1077
1078 /**
1079  * Deregister key.
1080  */
1081 void lu_context_key_degister(struct lu_context_key *key)
1082 {
1083         LASSERT(atomic_read(&key->lct_used) >= 1);
1084         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1085
1086         ++key_set_version;
1087         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1088
1089         if (atomic_read(&key->lct_used) > 1)
1090                 CERROR("key has instances.\n");
1091         spin_lock(&lu_keys_guard);
1092         lu_keys[key->lct_index] = NULL;
1093         spin_unlock(&lu_keys_guard);
1094 }
1095 EXPORT_SYMBOL(lu_context_key_degister);
1096
1097 /**
1098  * Register a number of keys. This has to be called after all keys have been
1099  * initialized by a call to LU_CONTEXT_KEY_INIT().
1100  */
1101 int lu_context_key_register_many(struct lu_context_key *k, ...)
1102 {
1103         struct lu_context_key *key = k;
1104         va_list args;
1105         int result;
1106
1107         va_start(args, k);
1108         do {
1109                 result = lu_context_key_register(key);
1110                 if (result)
1111                         break;
1112                 key = va_arg(args, struct lu_context_key *);
1113         } while (key != NULL);
1114         va_end(args);
1115
1116         if (result != 0) {
1117                 va_start(args, k);
1118                 while (k != key) {
1119                         lu_context_key_degister(k);
1120                         k = va_arg(args, struct lu_context_key *);
1121                 }
1122                 va_end(args);
1123         }
1124
1125         return result;
1126 }
1127 EXPORT_SYMBOL(lu_context_key_register_many);
1128
1129 /**
1130  * De-register a number of keys. This is a dual to
1131  * lu_context_key_register_many().
1132  */
1133 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1134 {
1135         va_list args;
1136
1137         va_start(args, k);
1138         do {
1139                 lu_context_key_degister(k);
1140                 k = va_arg(args, struct lu_context_key*);
1141         } while (k != NULL);
1142         va_end(args);
1143 }
1144 EXPORT_SYMBOL(lu_context_key_degister_many);
1145
1146 /**
1147  * Revive a number of keys.
1148  */
1149 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1150 {
1151         va_list args;
1152
1153         va_start(args, k);
1154         do {
1155                 lu_context_key_revive(k);
1156                 k = va_arg(args, struct lu_context_key*);
1157         } while (k != NULL);
1158         va_end(args);
1159 }
1160 EXPORT_SYMBOL(lu_context_key_revive_many);
1161
1162 /**
1163  * Quiescent a number of keys.
1164  */
1165 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1166 {
1167         va_list args;
1168
1169         va_start(args, k);
1170         do {
1171                 lu_context_key_quiesce(k);
1172                 k = va_arg(args, struct lu_context_key*);
1173         } while (k != NULL);
1174         va_end(args);
1175 }
1176 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1177
1178 /**
1179  * Return value associated with key \a key in context \a ctx.
1180  */
1181 void *lu_context_key_get(const struct lu_context *ctx,
1182                          const struct lu_context_key *key)
1183 {
1184         LINVRNT(ctx->lc_state == LCS_ENTERED);
1185         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1186         return ctx->lc_value[key->lct_index];
1187 }
1188 EXPORT_SYMBOL(lu_context_key_get);
1189
1190 /**
1191  * List of remembered contexts. XXX document me.
1192  */
1193 static CFS_LIST_HEAD(lu_context_remembered);
1194
1195 /**
1196  * Destroy \a key in all remembered contexts. This is used to destroy key
1197  * values in "shared" contexts (like service threads), when a module owning
1198  * the key is about to be unloaded.
1199  */
1200 void lu_context_key_quiesce(struct lu_context_key *key)
1201 {
1202         struct lu_context *ctx;
1203
1204         if (!(key->lct_tags & LCT_QUIESCENT)) {
1205                 key->lct_tags |= LCT_QUIESCENT;
1206                 /*
1207                  * XXX memory barrier has to go here.
1208                  */
1209                 spin_lock(&lu_keys_guard);
1210                 list_for_each_entry(ctx, &lu_context_remembered, lc_remember)
1211                         key_fini(ctx, key->lct_index);
1212                 spin_unlock(&lu_keys_guard);
1213                 ++key_set_version;
1214         }
1215 }
1216 EXPORT_SYMBOL(lu_context_key_quiesce);
1217
1218 void lu_context_key_revive(struct lu_context_key *key)
1219 {
1220         key->lct_tags &= ~LCT_QUIESCENT;
1221         ++key_set_version;
1222 }
1223 EXPORT_SYMBOL(lu_context_key_revive);
1224
1225 static void keys_fini(struct lu_context *ctx)
1226 {
1227         int i;
1228
1229         if (ctx->lc_value != NULL) {
1230                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1231                         key_fini(ctx, i);
1232                 OBD_FREE(ctx->lc_value,
1233                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1234                 ctx->lc_value = NULL;
1235         }
1236 }
1237
1238 static int keys_fill(struct lu_context *ctx)
1239 {
1240         int i;
1241
1242         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1243                 struct lu_context_key *key;
1244
1245                 key = lu_keys[i];
1246                 if (ctx->lc_value[i] == NULL && key != NULL &&
1247                     (key->lct_tags & ctx->lc_tags) &&
1248                     /*
1249                      * Don't create values for a LCT_QUIESCENT key, as this
1250                      * will pin module owning a key.
1251                      */
1252                     !(key->lct_tags & LCT_QUIESCENT)) {
1253                         void *value;
1254
1255                         LINVRNT(key->lct_init != NULL);
1256                         LINVRNT(key->lct_index == i);
1257
1258                         value = key->lct_init(ctx, key);
1259                         if (unlikely(IS_ERR(value)))
1260                                 return PTR_ERR(value);
1261                         LASSERT(key->lct_owner != NULL);
1262                         if (!(ctx->lc_tags & LCT_NOREF))
1263                                 try_module_get(key->lct_owner);
1264                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1265                         atomic_inc(&key->lct_used);
1266                         /*
1267                          * This is the only place in the code, where an
1268                          * element of ctx->lc_value[] array is set to non-NULL
1269                          * value.
1270                          */
1271                         ctx->lc_value[i] = value;
1272                         if (key->lct_exit != NULL)
1273                                 ctx->lc_tags |= LCT_HAS_EXIT;
1274                 }
1275                 ctx->lc_version = key_set_version;
1276         }
1277         return 0;
1278 }
1279
1280 static int keys_init(struct lu_context *ctx)
1281 {
1282         int result;
1283
1284         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1285         if (likely(ctx->lc_value != NULL))
1286                 result = keys_fill(ctx);
1287         else
1288                 result = -ENOMEM;
1289
1290         if (result != 0)
1291                 keys_fini(ctx);
1292         return result;
1293 }
1294
1295 /**
1296  * Initialize context data-structure. Create values for all keys.
1297  */
1298 int lu_context_init(struct lu_context *ctx, __u32 tags)
1299 {
1300         memset(ctx, 0, sizeof *ctx);
1301         ctx->lc_state = LCS_INITIALIZED;
1302         ctx->lc_tags = tags;
1303         if (tags & LCT_REMEMBER) {
1304                 spin_lock(&lu_keys_guard);
1305                 list_add(&ctx->lc_remember, &lu_context_remembered);
1306                 spin_unlock(&lu_keys_guard);
1307         } else
1308                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1309         return keys_init(ctx);
1310 }
1311 EXPORT_SYMBOL(lu_context_init);
1312
1313 /**
1314  * Finalize context data-structure. Destroy key values.
1315  */
1316 void lu_context_fini(struct lu_context *ctx)
1317 {
1318         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1319         ctx->lc_state = LCS_FINALIZED;
1320         keys_fini(ctx);
1321         spin_lock(&lu_keys_guard);
1322         list_del_init(&ctx->lc_remember);
1323         spin_unlock(&lu_keys_guard);
1324 }
1325 EXPORT_SYMBOL(lu_context_fini);
1326
1327 /**
1328  * Called before entering context.
1329  */
1330 void lu_context_enter(struct lu_context *ctx)
1331 {
1332         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1333         ctx->lc_state = LCS_ENTERED;
1334 }
1335 EXPORT_SYMBOL(lu_context_enter);
1336
1337 /**
1338  * Called after exiting from \a ctx
1339  */
1340 void lu_context_exit(struct lu_context *ctx)
1341 {
1342         int i;
1343
1344         LINVRNT(ctx->lc_state == LCS_ENTERED);
1345         ctx->lc_state = LCS_LEFT;
1346         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1347                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1348                         if (ctx->lc_value[i] != NULL) {
1349                                 struct lu_context_key *key;
1350
1351                                 key = lu_keys[i];
1352                                 LASSERT(key != NULL);
1353                                 if (key->lct_exit != NULL)
1354                                         key->lct_exit(ctx,
1355                                                       key, ctx->lc_value[i]);
1356                         }
1357                 }
1358         }
1359 }
1360 EXPORT_SYMBOL(lu_context_exit);
1361
1362 /**
1363  * Allocate for context all missing keys that were registered after context
1364  * creation.
1365  */
1366 int lu_context_refill(struct lu_context *ctx)
1367 {
1368         LINVRNT(ctx->lc_value != NULL);
1369         return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1370 }
1371 EXPORT_SYMBOL(lu_context_refill);
1372
1373 static int lu_env_setup(struct lu_env *env, struct lu_context *ses,
1374                         __u32 tags, int noref)
1375 {
1376         int result;
1377
1378         LINVRNT(ergo(!noref, !(tags & LCT_NOREF)));
1379
1380         env->le_ses = ses;
1381         result = lu_context_init(&env->le_ctx, tags);
1382         if (likely(result == 0))
1383                 lu_context_enter(&env->le_ctx);
1384         return result;
1385 }
1386
1387 static int lu_env_init_noref(struct lu_env *env, struct lu_context *ses,
1388                              __u32 tags)
1389 {
1390         return lu_env_setup(env, ses, tags, 1);
1391 }
1392
1393 int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags)
1394 {
1395         return lu_env_setup(env, ses, tags, 0);
1396 }
1397 EXPORT_SYMBOL(lu_env_init);
1398
1399 void lu_env_fini(struct lu_env *env)
1400 {
1401         lu_context_exit(&env->le_ctx);
1402         lu_context_fini(&env->le_ctx);
1403         env->le_ses = NULL;
1404 }
1405 EXPORT_SYMBOL(lu_env_fini);
1406
1407 int lu_env_refill(struct lu_env *env)
1408 {
1409         int result;
1410
1411         result = lu_context_refill(&env->le_ctx);
1412         if (result == 0 && env->le_ses != NULL)
1413                 result = lu_context_refill(env->le_ses);
1414         return result;
1415 }
1416 EXPORT_SYMBOL(lu_env_refill);
1417
1418 static struct shrinker *lu_site_shrinker = NULL;
1419
1420 #ifdef __KERNEL__
1421 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1422 {
1423         struct lu_site *s;
1424         struct lu_site *tmp;
1425         int cached = 0;
1426         int remain = nr;
1427         CFS_LIST_HEAD(splice);
1428
1429         if (nr != 0 && !(gfp_mask & __GFP_FS))
1430                 return -1;
1431
1432         down(&lu_sites_guard);
1433         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1434                 if (nr != 0) {
1435                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1436                         /*
1437                          * Move just shrunk site to the tail of site list to
1438                          * assure shrinking fairness.
1439                          */
1440                         list_move_tail(&s->ls_linkage, &splice);
1441                 }
1442                 read_lock(&s->ls_guard);
1443                 cached += s->ls_total - s->ls_busy;
1444                 read_unlock(&s->ls_guard);
1445                 if (remain <= 0)
1446                         break;
1447         }
1448         list_splice(&splice, lu_sites.prev);
1449         up(&lu_sites_guard);
1450         return cached;
1451 }
1452
1453 #else  /* !__KERNEL__ */
1454 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1455 {
1456         return 0;
1457 }
1458 #endif /* __KERNEL__ */
1459
1460 int  lu_ref_global_init(void);
1461 void lu_ref_global_fini(void);
1462
1463 /**
1464  * Initialization of global lu_* data.
1465  */
1466 int lu_global_init(void)
1467 {
1468         int result;
1469
1470         CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
1471
1472         LU_CONTEXT_KEY_INIT(&lu_global_key);
1473         result = lu_context_key_register(&lu_global_key);
1474         if (result != 0)
1475                 return result;
1476                 /*
1477          * At this level, we don't know what tags are needed, so allocate them
1478          * conservatively. This should not be too bad, because this
1479          * environment is global.
1480                  */
1481                 down(&lu_sites_guard);
1482                 result = lu_env_init_noref(&lu_shrink_env, NULL, LCT_SHRINKER);
1483                 up(&lu_sites_guard);
1484         if (result != 0)
1485                 return result;
1486
1487         result = lu_ref_global_init();
1488         if (result != 0)
1489                 return result;
1490                         /*
1491          * seeks estimation: 3 seeks to read a record from oi, one to read
1492          * inode, one for ea. Unfortunately setting this high value results in
1493          * lu_object/inode cache consuming all the memory.
1494          */
1495         lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink);
1496         if (lu_site_shrinker == NULL)
1497                 return -ENOMEM;
1498
1499                                 result = lu_time_global_init();
1500         return result;
1501 }
1502
1503 /**
1504  * Dual to lu_global_init().
1505  */
1506 void lu_global_fini(void)
1507 {
1508         lu_time_global_fini();
1509         if (lu_site_shrinker != NULL) {
1510                 remove_shrinker(lu_site_shrinker);
1511                 lu_site_shrinker = NULL;
1512         }
1513
1514         lu_context_key_degister(&lu_global_key);
1515
1516         /*
1517          * Tear shrinker environment down _after_ de-registering
1518          * lu_global_key, because the latter has a value in the former.
1519          */
1520         down(&lu_sites_guard);
1521         lu_env_fini(&lu_shrink_env);
1522         up(&lu_sites_guard);
1523
1524         lu_ref_global_fini();
1525 }
1526
1527 struct lu_buf LU_BUF_NULL = {
1528         .lb_buf = NULL,
1529         .lb_len = 0
1530 };
1531 EXPORT_SYMBOL(LU_BUF_NULL);
1532
1533 /*
1534  * XXX: Functions below logically belong to fid module, but they are used by
1535  * dt_store_open(). Put them here until better place is found.
1536  */
1537
1538 void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid,
1539               struct lu_fid *befider)
1540 {
1541         int recsize;
1542         __u64 seq;
1543         __u32 oid;
1544
1545         seq = fid_seq(fid);
1546         oid = fid_oid(fid);
1547
1548         /*
1549          * Two cases: compact 6 bytes representation for a common case, and
1550          * full 17 byte representation for "unusual" fid.
1551          */
1552
1553         /*
1554          * Check that usual case is really usual.
1555          */
1556         CLASSERT(LUSTRE_SEQ_MAX_WIDTH < 0xffffull);
1557
1558         if (fid_is_igif(fid) ||
1559             seq > 0xffffffull || oid > 0xffff || fid_ver(fid) != 0) {
1560                 fid_cpu_to_be(befider, fid);
1561                 recsize = sizeof *befider;
1562         } else {
1563                 unsigned char *small_befider;
1564
1565                 small_befider = (char *)befider;
1566
1567                 small_befider[0] = seq >> 16;
1568                 small_befider[1] = seq >> 8;
1569                 small_befider[2] = seq;
1570
1571                 small_befider[3] = oid >> 8;
1572                 small_befider[4] = oid;
1573
1574                 recsize = 5;
1575         }
1576         memcpy(pack->fp_area, befider, recsize);
1577         pack->fp_len = recsize + 1;
1578 }
1579 EXPORT_SYMBOL(fid_pack);
1580
1581 int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid)
1582 {
1583         int result;
1584
1585         result = 0;
1586         switch (pack->fp_len) {
1587         case sizeof *fid + 1:
1588                 memcpy(fid, pack->fp_area, sizeof *fid);
1589                 fid_be_to_cpu(fid, fid);
1590                 break;
1591         case 6: {
1592                 const unsigned char *area;
1593
1594                 area = pack->fp_area;
1595                 fid->f_seq = (area[0] << 16) | (area[1] << 8) | area[2];
1596                 fid->f_oid = (area[3] << 8) | area[4];
1597                 fid->f_ver = 0;
1598                 break;
1599         }
1600         default:
1601                 CERROR("Unexpected packed fid size: %d\n", pack->fp_len);
1602                 result = -EIO;
1603         }
1604         return result;
1605 }
1606 EXPORT_SYMBOL(fid_unpack);
1607
1608 const char *lu_time_names[LU_TIME_NR] = {
1609         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1610         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1611         [LU_TIME_FIND_INSERT] = "find_insert"
1612 };
1613 EXPORT_SYMBOL(lu_time_names);
1614
1615 /**
1616  * Helper function to initialize a number of kmem slab caches at once.
1617  */
1618 int lu_kmem_init(struct lu_kmem_descr *caches)
1619 {
1620         int result;
1621
1622         for (result = 0; caches->ckd_cache != NULL; ++caches) {
1623                 *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
1624                                                           caches->ckd_size,
1625                                                           0, 0);
1626                 if (*caches->ckd_cache == NULL) {
1627                         result = -ENOMEM;
1628                         break;
1629                 }
1630         }
1631         return result;
1632 }
1633 EXPORT_SYMBOL(lu_kmem_init);
1634
1635 /**
1636  * Helper function to finalize a number of kmem slab cached at once. Dual to
1637  * lu_kmem_init().
1638  */
1639 void lu_kmem_fini(struct lu_kmem_descr *caches)
1640 {
1641         int rc;
1642
1643         for (; caches->ckd_cache != NULL; ++caches) {
1644                 if (*caches->ckd_cache != NULL) {
1645                         rc = cfs_mem_cache_destroy(*caches->ckd_cache);
1646                         LASSERTF(rc == 0, "couldn't destroy %s slab\n",
1647                                  caches->ckd_name);
1648                         *caches->ckd_cache = NULL;
1649                 }
1650         }
1651 }
1652 EXPORT_SYMBOL(lu_kmem_fini);
1653