Whamcloud - gitweb
b=17167 libcfs: ensure all libcfs exported symbols to have cfs_ prefix
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lu_object.c
37  *
38  * Lustre Object.
39  * These are the only exported functions, they provide some generic
40  * infrastructure for managing object devices
41  *
42  *   Author: Nikita Danilov <nikita.danilov@sun.com>
43  */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46 #ifndef EXPORT_SYMTAB
47 # define EXPORT_SYMTAB
48 #endif
49
50 #include <libcfs/libcfs.h>
51
52 #ifdef __KERNEL__
53 # include <linux/module.h>
54 #endif
55
56 /* hash_long() */
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
65 #include <lu_time.h>
66
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
68
69 /**
70  * Decrease reference counter on object. If last reference is freed, return
71  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72  * case, free object immediately.
73  */
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
75 {
76         struct lu_object_header *top;
77         struct lu_site          *site;
78         struct lu_object        *orig;
79         int                      kill_it;
80
81         top = o->lo_header;
82         site = o->lo_dev->ld_site;
83         orig = o;
84         kill_it = 0;
85         cfs_write_lock(&site->ls_guard);
86         if (cfs_atomic_dec_and_test(&top->loh_ref)) {
87                 /*
88                  * When last reference is released, iterate over object
89                  * layers, and notify them that object is no longer busy.
90                  */
91                 cfs_list_for_each_entry_reverse(o, &top->loh_layers,
92                                                 lo_linkage) {
93                         if (o->lo_ops->loo_object_release != NULL)
94                                 o->lo_ops->loo_object_release(env, o);
95                 }
96                 -- site->ls_busy;
97                 if (lu_object_is_dying(top)) {
98                         /*
99                          * If object is dying (will not be cached), removed it
100                          * from hash table and LRU.
101                          *
102                          * This is done with hash table and LRU lists
103                          * locked. As the only way to acquire first reference
104                          * to previously unreferenced object is through
105                          * hash-table lookup (lu_object_find()), or LRU
106                          * scanning (lu_site_purge()), that are done under
107                          * hash-table and LRU lock, no race with concurrent
108                          * object lookup is possible and we can safely destroy
109                          * object below.
110                          */
111                         cfs_hlist_del_init(&top->loh_hash);
112                         cfs_list_del_init(&top->loh_lru);
113                         -- site->ls_total;
114                         kill_it = 1;
115                 }
116         }
117         cfs_write_unlock(&site->ls_guard);
118         if (kill_it)
119                 /*
120                  * Object was already removed from hash and lru above, can
121                  * kill it.
122                  */
123                 lu_object_free(env, orig);
124 }
125 EXPORT_SYMBOL(lu_object_put);
126
127 /**
128  * Allocate new object.
129  *
130  * This follows object creation protocol, described in the comment within
131  * struct lu_device_operations definition.
132  */
133 static struct lu_object *lu_object_alloc(const struct lu_env *env,
134                                          struct lu_device *dev,
135                                          const struct lu_fid *f,
136                                          const struct lu_object_conf *conf)
137 {
138         struct lu_object *scan;
139         struct lu_object *top;
140         cfs_list_t *layers;
141         int clean;
142         int result;
143         ENTRY;
144
145         /*
146          * Create top-level object slice. This will also create
147          * lu_object_header.
148          */
149         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
150         if (top == NULL)
151                 RETURN(ERR_PTR(-ENOMEM));
152         /*
153          * This is the only place where object fid is assigned. It's constant
154          * after this point.
155          */
156         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
157         top->lo_header->loh_fid  = *f;
158         layers = &top->lo_header->loh_layers;
159         do {
160                 /*
161                  * Call ->loo_object_init() repeatedly, until no more new
162                  * object slices are created.
163                  */
164                 clean = 1;
165                 cfs_list_for_each_entry(scan, layers, lo_linkage) {
166                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
167                                 continue;
168                         clean = 0;
169                         scan->lo_header = top->lo_header;
170                         result = scan->lo_ops->loo_object_init(env, scan, conf);
171                         if (result != 0) {
172                                 lu_object_free(env, top);
173                                 RETURN(ERR_PTR(result));
174                         }
175                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
176                 }
177         } while (!clean);
178
179         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
180                 if (scan->lo_ops->loo_object_start != NULL) {
181                         result = scan->lo_ops->loo_object_start(env, scan);
182                         if (result != 0) {
183                                 lu_object_free(env, top);
184                                 RETURN(ERR_PTR(result));
185                         }
186                 }
187         }
188
189         dev->ld_site->ls_stats.s_created ++;
190         RETURN(top);
191 }
192
193 /**
194  * Free an object.
195  */
196 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
197 {
198         cfs_list_t            splice;
199         struct lu_object     *scan;
200         struct lu_site       *site;
201         cfs_list_t           *layers;
202
203         site   = o->lo_dev->ld_site;
204         layers = &o->lo_header->loh_layers;
205         /*
206          * First call ->loo_object_delete() method to release all resources.
207          */
208         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
209                 if (scan->lo_ops->loo_object_delete != NULL)
210                         scan->lo_ops->loo_object_delete(env, scan);
211         }
212
213         /*
214          * Then, splice object layers into stand-alone list, and call
215          * ->loo_object_free() on all layers to free memory. Splice is
216          * necessary, because lu_object_header is freed together with the
217          * top-level slice.
218          */
219         CFS_INIT_LIST_HEAD(&splice);
220         cfs_list_splice_init(layers, &splice);
221         while (!cfs_list_empty(&splice)) {
222                 /*
223                  * Free layers in bottom-to-top order, so that object header
224                  * lives as long as possible and ->loo_object_free() methods
225                  * can look at its contents.
226                  */
227                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
228                 cfs_list_del_init(&o->lo_linkage);
229                 LASSERT(o->lo_ops->loo_object_free != NULL);
230                 o->lo_ops->loo_object_free(env, o);
231         }
232         cfs_waitq_broadcast(&site->ls_marche_funebre);
233 }
234
235 /**
236  * Free \a nr objects from the cold end of the site LRU list.
237  */
238 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
239 {
240         cfs_list_t               dispose;
241         struct lu_object_header *h;
242         struct lu_object_header *temp;
243
244         CFS_INIT_LIST_HEAD(&dispose);
245         /*
246          * Under LRU list lock, scan LRU list and move unreferenced objects to
247          * the dispose list, removing them from LRU and hash table.
248          */
249         cfs_write_lock(&s->ls_guard);
250         cfs_list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
251                 /*
252                  * Objects are sorted in lru order, and "busy" objects (ones
253                  * with h->loh_ref > 0) naturally tend to live near hot end
254                  * that we scan last. Unfortunately, sites usually have small
255                  * (less then ten) number of busy yet rarely accessed objects
256                  * (some global objects, accessed directly through pointers,
257                  * bypassing hash table). Currently algorithm scans them over
258                  * and over again. Probably we should move busy objects out of
259                  * LRU, or we can live with that.
260                  */
261                 if (nr-- == 0)
262                         break;
263                 if (cfs_atomic_read(&h->loh_ref) > 0)
264                         continue;
265                 cfs_hlist_del_init(&h->loh_hash);
266                 cfs_list_move(&h->loh_lru, &dispose);
267                 s->ls_total --;
268         }
269         cfs_write_unlock(&s->ls_guard);
270         /*
271          * Free everything on the dispose list. This is safe against races due
272          * to the reasons described in lu_object_put().
273          */
274         while (!cfs_list_empty(&dispose)) {
275                 h = container_of0(dispose.next,
276                                  struct lu_object_header, loh_lru);
277                 cfs_list_del_init(&h->loh_lru);
278                 lu_object_free(env, lu_object_top(h));
279                 s->ls_stats.s_lru_purged ++;
280         }
281         return nr;
282 }
283 EXPORT_SYMBOL(lu_site_purge);
284
285 /*
286  * Object printing.
287  *
288  * Code below has to jump through certain loops to output object description
289  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
290  * composes object description from strings that are parts of _lines_ of
291  * output (i.e., strings that are not terminated by newline). This doesn't fit
292  * very well into libcfs_debug_msg() interface that assumes that each message
293  * supplied to it is a self-contained output line.
294  *
295  * To work around this, strings are collected in a temporary buffer
296  * (implemented as a value of lu_cdebug_key key), until terminating newline
297  * character is detected.
298  *
299  */
300
301 enum {
302         /**
303          * Maximal line size.
304          *
305          * XXX overflow is not handled correctly.
306          */
307         LU_CDEBUG_LINE = 256
308 };
309
310 struct lu_cdebug_data {
311         /**
312          * Temporary buffer.
313          */
314         char lck_area[LU_CDEBUG_LINE];
315 };
316
317 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
318 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
319
320 /**
321  * Key, holding temporary buffer. This key is registered very early by
322  * lu_global_init().
323  */
324 struct lu_context_key lu_global_key = {
325         .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
326         .lct_init = lu_global_key_init,
327         .lct_fini = lu_global_key_fini
328 };
329
330 /**
331  * Printer function emitting messages through libcfs_debug_msg().
332  */
333 int lu_cdebug_printer(const struct lu_env *env,
334                       void *cookie, const char *format, ...)
335 {
336         struct lu_cdebug_print_info *info = cookie;
337         struct lu_cdebug_data       *key;
338         int used;
339         int complete;
340         va_list args;
341
342         va_start(args, format);
343
344         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
345         LASSERT(key != NULL);
346
347         used = strlen(key->lck_area);
348         complete = format[strlen(format) - 1] == '\n';
349         /*
350          * Append new chunk to the buffer.
351          */
352         vsnprintf(key->lck_area + used,
353                   ARRAY_SIZE(key->lck_area) - used, format, args);
354         if (complete) {
355                 if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
356                         libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
357                                          (char *)info->lpi_file, info->lpi_fn,
358                                          info->lpi_line, "%s", key->lck_area);
359                 key->lck_area[0] = 0;
360         }
361         va_end(args);
362         return 0;
363 }
364 EXPORT_SYMBOL(lu_cdebug_printer);
365
366 /**
367  * Print object header.
368  */
369 void lu_object_header_print(const struct lu_env *env, void *cookie,
370                             lu_printer_t printer,
371                             const struct lu_object_header *hdr)
372 {
373         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
374                    hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
375                    PFID(&hdr->loh_fid),
376                    cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
377                    cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
378                    "" : " lru",
379                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
380 }
381 EXPORT_SYMBOL(lu_object_header_print);
382
383 /**
384  * Print human readable representation of the \a o to the \a printer.
385  */
386 void lu_object_print(const struct lu_env *env, void *cookie,
387                      lu_printer_t printer, const struct lu_object *o)
388 {
389         static const char ruler[] = "........................................";
390         struct lu_object_header *top;
391         int depth;
392
393         top = o->lo_header;
394         lu_object_header_print(env, cookie, printer, top);
395         (*printer)(env, cookie, "{ \n");
396         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
397                 depth = o->lo_depth + 4;
398
399                 /*
400                  * print `.' \a depth times followed by type name and address
401                  */
402                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
403                            o->lo_dev->ld_type->ldt_name, o);
404                 if (o->lo_ops->loo_object_print != NULL)
405                         o->lo_ops->loo_object_print(env, cookie, printer, o);
406                 (*printer)(env, cookie, "\n");
407         }
408         (*printer)(env, cookie, "} header@%p\n", top);
409 }
410 EXPORT_SYMBOL(lu_object_print);
411
412 /**
413  * Check object consistency.
414  */
415 int lu_object_invariant(const struct lu_object *o)
416 {
417         struct lu_object_header *top;
418
419         top = o->lo_header;
420         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
421                 if (o->lo_ops->loo_object_invariant != NULL &&
422                     !o->lo_ops->loo_object_invariant(o))
423                         return 0;
424         }
425         return 1;
426 }
427 EXPORT_SYMBOL(lu_object_invariant);
428
429 static struct lu_object *htable_lookup(struct lu_site *s,
430                                        const cfs_hlist_head_t *bucket,
431                                        const struct lu_fid *f,
432                                        cfs_waitlink_t *waiter)
433 {
434         struct lu_object_header *h;
435         cfs_hlist_node_t *scan;
436
437         cfs_hlist_for_each_entry(h, scan, bucket, loh_hash) {
438                 s->ls_stats.s_cache_check ++;
439                 if (likely(lu_fid_eq(&h->loh_fid, f))) {
440                         if (unlikely(lu_object_is_dying(h))) {
441                                 /*
442                                  * Lookup found an object being destroyed;
443                                  * this object cannot be returned (to assure
444                                  * that references to dying objects are
445                                  * eventually drained), and moreover, lookup
446                                  * has to wait until object is freed.
447                                  */
448                                 cfs_waitlink_init(waiter);
449                                 cfs_waitq_add(&s->ls_marche_funebre, waiter);
450                                 cfs_set_current_state(CFS_TASK_UNINT);
451                                 s->ls_stats.s_cache_death_race ++;
452                                 return ERR_PTR(-EAGAIN);
453                         }
454                         /* bump reference count... */
455                         if (cfs_atomic_add_return(1, &h->loh_ref) == 1)
456                                 ++ s->ls_busy;
457                         /* and move to the head of the LRU */
458                         /*
459                          * XXX temporary disable this to measure effects of
460                          * read-write locking.
461                          */
462                         /* list_move_tail(&h->loh_lru, &s->ls_lru); */
463                         s->ls_stats.s_cache_hit ++;
464                         return lu_object_top(h);
465                 }
466         }
467         s->ls_stats.s_cache_miss ++;
468         return NULL;
469 }
470
471 static __u32 fid_hash(const struct lu_fid *f, int bits)
472 {
473         /* all objects with same id and different versions will belong to same
474          * collisions list. */
475         return cfs_hash_long(fid_flatten(f), bits);
476 }
477
478 /**
479  * Search cache for an object with the fid \a f. If such object is found,
480  * return it. Otherwise, create new object, insert it into cache and return
481  * it. In any case, additional reference is acquired on the returned object.
482  */
483 struct lu_object *lu_object_find(const struct lu_env *env,
484                                  struct lu_device *dev, const struct lu_fid *f,
485                                  const struct lu_object_conf *conf)
486 {
487         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
488 }
489 EXPORT_SYMBOL(lu_object_find);
490
491 /**
492  * Core logic of lu_object_find*() functions.
493  */
494 static struct lu_object *lu_object_find_try(const struct lu_env *env,
495                                             struct lu_device *dev,
496                                             const struct lu_fid *f,
497                                             const struct lu_object_conf *conf,
498                                             cfs_waitlink_t *waiter)
499 {
500         struct lu_site        *s;
501         struct lu_object      *o;
502         struct lu_object      *shadow;
503         cfs_hlist_head_t      *bucket;
504
505         /*
506          * This uses standard index maintenance protocol:
507          *
508          *     - search index under lock, and return object if found;
509          *     - otherwise, unlock index, allocate new object;
510          *     - lock index and search again;
511          *     - if nothing is found (usual case), insert newly created
512          *       object into index;
513          *     - otherwise (race: other thread inserted object), free
514          *       object just allocated.
515          *     - unlock index;
516          *     - return object.
517          *
518          * If dying object is found during index search, add @waiter to the
519          * site wait-queue and return ERR_PTR(-EAGAIN).
520          */
521
522         s = dev->ld_site;
523         bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
524
525         cfs_read_lock(&s->ls_guard);
526         o = htable_lookup(s, bucket, f, waiter);
527         cfs_read_unlock(&s->ls_guard);
528
529         if (o != NULL)
530                 return o;
531
532         /*
533          * Allocate new object. This may result in rather complicated
534          * operations, including fld queries, inode loading, etc.
535          */
536         o = lu_object_alloc(env, dev, f, conf);
537         if (unlikely(IS_ERR(o)))
538                 return o;
539
540         LASSERT(lu_fid_eq(lu_object_fid(o), f));
541
542         cfs_write_lock(&s->ls_guard);
543         shadow = htable_lookup(s, bucket, f, waiter);
544         if (likely(shadow == NULL)) {
545                 cfs_hlist_add_head(&o->lo_header->loh_hash, bucket);
546                 cfs_list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
547                 ++ s->ls_busy;
548                 ++ s->ls_total;
549                 shadow = o;
550                 o = NULL;
551         } else
552                 s->ls_stats.s_cache_race ++;
553         cfs_write_unlock(&s->ls_guard);
554         if (o != NULL)
555                 lu_object_free(env, o);
556         return shadow;
557 }
558
559 /**
560  * Much like lu_object_find(), but top level device of object is specifically
561  * \a dev rather than top level device of the site. This interface allows
562  * objects of different "stacking" to be created within the same site.
563  */
564 struct lu_object *lu_object_find_at(const struct lu_env *env,
565                                     struct lu_device *dev,
566                                     const struct lu_fid *f,
567                                     const struct lu_object_conf *conf)
568 {
569         struct lu_object *obj;
570         cfs_waitlink_t    wait;
571
572         while (1) {
573                 obj = lu_object_find_try(env, dev, f, conf, &wait);
574                 if (obj == ERR_PTR(-EAGAIN)) {
575                         /*
576                          * lu_object_find_try() already added waiter into the
577                          * wait queue.
578                          */
579                         cfs_waitq_wait(&wait, CFS_TASK_UNINT);
580                         cfs_waitq_del(&dev->ld_site->ls_marche_funebre, &wait);
581                 } else
582                         break;
583         }
584         return obj;
585 }
586 EXPORT_SYMBOL(lu_object_find_at);
587
588 /**
589  * Find object with given fid, and return its slice belonging to given device.
590  */
591 struct lu_object *lu_object_find_slice(const struct lu_env *env,
592                                        struct lu_device *dev,
593                                        const struct lu_fid *f,
594                                        const struct lu_object_conf *conf)
595 {
596         struct lu_object *top;
597         struct lu_object *obj;
598
599         top = lu_object_find(env, dev, f, conf);
600         if (!IS_ERR(top)) {
601                 obj = lu_object_locate(top->lo_header, dev->ld_type);
602                 if (obj == NULL)
603                         lu_object_put(env, top);
604         } else
605                 obj = top;
606         return obj;
607 }
608 EXPORT_SYMBOL(lu_object_find_slice);
609
610 /**
611  * Global list of all device types.
612  */
613 static CFS_LIST_HEAD(lu_device_types);
614
615 int lu_device_type_init(struct lu_device_type *ldt)
616 {
617         int result;
618
619         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
620         result = ldt->ldt_ops->ldto_init(ldt);
621         if (result == 0)
622                 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
623         return result;
624 }
625 EXPORT_SYMBOL(lu_device_type_init);
626
627 void lu_device_type_fini(struct lu_device_type *ldt)
628 {
629         cfs_list_del_init(&ldt->ldt_linkage);
630         ldt->ldt_ops->ldto_fini(ldt);
631 }
632 EXPORT_SYMBOL(lu_device_type_fini);
633
634 void lu_types_stop(void)
635 {
636         struct lu_device_type *ldt;
637
638         cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
639                 if (ldt->ldt_device_nr == 0)
640                         ldt->ldt_ops->ldto_stop(ldt);
641         }
642 }
643 EXPORT_SYMBOL(lu_types_stop);
644
645 /**
646  * Global list of all sites on this node
647  */
648 static CFS_LIST_HEAD(lu_sites);
649 static CFS_DECLARE_MUTEX(lu_sites_guard);
650
651 /**
652  * Global environment used by site shrinker.
653  */
654 static struct lu_env lu_shrink_env;
655
656 /**
657  * Print all objects in \a s.
658  */
659 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
660                    lu_printer_t printer)
661 {
662         int i;
663
664         for (i = 0; i < s->ls_hash_size; ++i) {
665                 struct lu_object_header *h;
666                 cfs_hlist_node_t        *scan;
667
668                 cfs_read_lock(&s->ls_guard);
669                 cfs_hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
670
671                         if (!cfs_list_empty(&h->loh_layers)) {
672                                 const struct lu_object *obj;
673
674                                 obj = lu_object_top(h);
675                                 lu_object_print(env, cookie, printer, obj);
676                         } else
677                                 lu_object_header_print(env, cookie, printer, h);
678                 }
679                 cfs_read_unlock(&s->ls_guard);
680         }
681 }
682 EXPORT_SYMBOL(lu_site_print);
683
684 enum {
685         LU_CACHE_PERCENT   = 20,
686 };
687
688 /**
689  * Return desired hash table order.
690  */
691 static int lu_htable_order(void)
692 {
693         unsigned long cache_size;
694         int bits;
695
696         /*
697          * Calculate hash table size, assuming that we want reasonable
698          * performance when 20% of total memory is occupied by cache of
699          * lu_objects.
700          *
701          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
702          */
703         cache_size = cfs_num_physpages;
704
705 #if BITS_PER_LONG == 32
706         /* limit hashtable size for lowmem systems to low RAM */
707         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
708                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
709 #endif
710
711         cache_size = cache_size / 100 * LU_CACHE_PERCENT *
712                 (CFS_PAGE_SIZE / 1024);
713
714         for (bits = 1; (1 << bits) < cache_size; ++bits) {
715                 ;
716         }
717         return bits;
718 }
719
720 static cfs_lock_class_key_t lu_site_guard_class;
721
722 /**
723  * Initialize site \a s, with \a d as the top level device.
724  */
725 int lu_site_init(struct lu_site *s, struct lu_device *top)
726 {
727         int bits;
728         int size;
729         int i;
730         ENTRY;
731
732         memset(s, 0, sizeof *s);
733         cfs_rwlock_init(&s->ls_guard);
734         cfs_lockdep_set_class(&s->ls_guard, &lu_site_guard_class);
735         CFS_INIT_LIST_HEAD(&s->ls_lru);
736         CFS_INIT_LIST_HEAD(&s->ls_linkage);
737         cfs_waitq_init(&s->ls_marche_funebre);
738         s->ls_top_dev = top;
739         top->ld_site = s;
740         lu_device_get(top);
741         lu_ref_add(&top->ld_reference, "site-top", s);
742
743         for (bits = lu_htable_order(), size = 1 << bits;
744              (s->ls_hash =
745               cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL;
746              --bits, size >>= 1) {
747                 /*
748                  * Scale hash table down, until allocation succeeds.
749                  */
750                 ;
751         }
752
753         s->ls_hash_size = size;
754         s->ls_hash_bits = bits;
755         s->ls_hash_mask = size - 1;
756
757         for (i = 0; i < size; i++)
758                 CFS_INIT_HLIST_HEAD(&s->ls_hash[i]);
759
760         RETURN(0);
761 }
762 EXPORT_SYMBOL(lu_site_init);
763
764 /**
765  * Finalize \a s and release its resources.
766  */
767 void lu_site_fini(struct lu_site *s)
768 {
769         LASSERT(cfs_list_empty(&s->ls_lru));
770         LASSERT(s->ls_total == 0);
771
772         cfs_down(&lu_sites_guard);
773         cfs_list_del_init(&s->ls_linkage);
774         cfs_up(&lu_sites_guard);
775
776         if (s->ls_hash != NULL) {
777                 int i;
778                 for (i = 0; i < s->ls_hash_size; i++)
779                         LASSERT(cfs_hlist_empty(&s->ls_hash[i]));
780                 cfs_free_large(s->ls_hash);
781                 s->ls_hash = NULL;
782         }
783         if (s->ls_top_dev != NULL) {
784                 s->ls_top_dev->ld_site = NULL;
785                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
786                 lu_device_put(s->ls_top_dev);
787                 s->ls_top_dev = NULL;
788         }
789 }
790 EXPORT_SYMBOL(lu_site_fini);
791
792 /**
793  * Called when initialization of stack for this site is completed.
794  */
795 int lu_site_init_finish(struct lu_site *s)
796 {
797         int result;
798         cfs_down(&lu_sites_guard);
799         result = lu_context_refill(&lu_shrink_env.le_ctx);
800         if (result == 0)
801                 cfs_list_add(&s->ls_linkage, &lu_sites);
802         cfs_up(&lu_sites_guard);
803         return result;
804 }
805 EXPORT_SYMBOL(lu_site_init_finish);
806
807 /**
808  * Acquire additional reference on device \a d
809  */
810 void lu_device_get(struct lu_device *d)
811 {
812         cfs_atomic_inc(&d->ld_ref);
813 }
814 EXPORT_SYMBOL(lu_device_get);
815
816 /**
817  * Release reference on device \a d.
818  */
819 void lu_device_put(struct lu_device *d)
820 {
821         LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
822         cfs_atomic_dec(&d->ld_ref);
823 }
824 EXPORT_SYMBOL(lu_device_put);
825
826 /**
827  * Initialize device \a d of type \a t.
828  */
829 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
830 {
831         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
832                 t->ldt_ops->ldto_start(t);
833         memset(d, 0, sizeof *d);
834         cfs_atomic_set(&d->ld_ref, 0);
835         d->ld_type = t;
836         lu_ref_init(&d->ld_reference);
837         return 0;
838 }
839 EXPORT_SYMBOL(lu_device_init);
840
841 /**
842  * Finalize device \a d.
843  */
844 void lu_device_fini(struct lu_device *d)
845 {
846         struct lu_device_type *t;
847
848         t = d->ld_type;
849         if (d->ld_obd != NULL) {
850                 d->ld_obd->obd_lu_dev = NULL;
851                 d->ld_obd = NULL;
852         }
853
854         lu_ref_fini(&d->ld_reference);
855         LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
856                  "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
857         LASSERT(t->ldt_device_nr > 0);
858         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
859                 t->ldt_ops->ldto_stop(t);
860 }
861 EXPORT_SYMBOL(lu_device_fini);
862
863 /**
864  * Initialize object \a o that is part of compound object \a h and was created
865  * by device \a d.
866  */
867 int lu_object_init(struct lu_object *o,
868                    struct lu_object_header *h, struct lu_device *d)
869 {
870         memset(o, 0, sizeof *o);
871         o->lo_header = h;
872         o->lo_dev    = d;
873         lu_device_get(d);
874         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
875         CFS_INIT_LIST_HEAD(&o->lo_linkage);
876         return 0;
877 }
878 EXPORT_SYMBOL(lu_object_init);
879
880 /**
881  * Finalize object and release its resources.
882  */
883 void lu_object_fini(struct lu_object *o)
884 {
885         struct lu_device *dev = o->lo_dev;
886
887         LASSERT(cfs_list_empty(&o->lo_linkage));
888
889         if (dev != NULL) {
890                 lu_ref_del_at(&dev->ld_reference,
891                               o->lo_dev_ref , "lu_object", o);
892                 lu_device_put(dev);
893                 o->lo_dev = NULL;
894         }
895 }
896 EXPORT_SYMBOL(lu_object_fini);
897
898 /**
899  * Add object \a o as first layer of compound object \a h
900  *
901  * This is typically called by the ->ldo_object_alloc() method of top-level
902  * device.
903  */
904 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
905 {
906         cfs_list_move(&o->lo_linkage, &h->loh_layers);
907 }
908 EXPORT_SYMBOL(lu_object_add_top);
909
910 /**
911  * Add object \a o as a layer of compound object, going after \a before.
912  *
913  * This is typically called by the ->ldo_object_alloc() method of \a
914  * before->lo_dev.
915  */
916 void lu_object_add(struct lu_object *before, struct lu_object *o)
917 {
918         cfs_list_move(&o->lo_linkage, &before->lo_linkage);
919 }
920 EXPORT_SYMBOL(lu_object_add);
921
922 /**
923  * Initialize compound object.
924  */
925 int lu_object_header_init(struct lu_object_header *h)
926 {
927         memset(h, 0, sizeof *h);
928         cfs_atomic_set(&h->loh_ref, 1);
929         CFS_INIT_HLIST_NODE(&h->loh_hash);
930         CFS_INIT_LIST_HEAD(&h->loh_lru);
931         CFS_INIT_LIST_HEAD(&h->loh_layers);
932         lu_ref_init(&h->loh_reference);
933         return 0;
934 }
935 EXPORT_SYMBOL(lu_object_header_init);
936
937 /**
938  * Finalize compound object.
939  */
940 void lu_object_header_fini(struct lu_object_header *h)
941 {
942         LASSERT(cfs_list_empty(&h->loh_layers));
943         LASSERT(cfs_list_empty(&h->loh_lru));
944         LASSERT(cfs_hlist_unhashed(&h->loh_hash));
945         lu_ref_fini(&h->loh_reference);
946 }
947 EXPORT_SYMBOL(lu_object_header_fini);
948
949 /**
950  * Given a compound object, find its slice, corresponding to the device type
951  * \a dtype.
952  */
953 struct lu_object *lu_object_locate(struct lu_object_header *h,
954                                    const struct lu_device_type *dtype)
955 {
956         struct lu_object *o;
957
958         cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
959                 if (o->lo_dev->ld_type == dtype)
960                         return o;
961         }
962         return NULL;
963 }
964 EXPORT_SYMBOL(lu_object_locate);
965
966
967
968 /**
969  * Finalize and free devices in the device stack.
970  *
971  * Finalize device stack by purging object cache, and calling
972  * lu_device_type_operations::ldto_device_fini() and
973  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
974  */
975 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
976 {
977         struct lu_site   *site = top->ld_site;
978         struct lu_device *scan;
979         struct lu_device *next;
980
981         lu_site_purge(env, site, ~0);
982         for (scan = top; scan != NULL; scan = next) {
983                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
984                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
985                 lu_device_put(scan);
986         }
987
988         /* purge again. */
989         lu_site_purge(env, site, ~0);
990
991         if (!cfs_list_empty(&site->ls_lru) || site->ls_total != 0) {
992                 /*
993                  * Uh-oh, objects still exist.
994                  */
995                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
996
997                 lu_site_print(env, site, &cookie, lu_cdebug_printer);
998         }
999
1000         for (scan = top; scan != NULL; scan = next) {
1001                 const struct lu_device_type *ldt = scan->ld_type;
1002                 struct obd_type             *type;
1003
1004                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1005                 type = ldt->ldt_obd_type;
1006                 if (type != NULL) {
1007                         type->typ_refcnt--;
1008                         class_put_type(type);
1009                 }
1010         }
1011 }
1012 EXPORT_SYMBOL(lu_stack_fini);
1013
1014 enum {
1015         /**
1016          * Maximal number of tld slots.
1017          */
1018         LU_CONTEXT_KEY_NR = 32
1019 };
1020
1021 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1022
1023 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1024
1025 /**
1026  * Global counter incremented whenever key is registered, unregistered,
1027  * revived or quiesced. This is used to void unnecessary calls to
1028  * lu_context_refill(). No locking is provided, as initialization and shutdown
1029  * are supposed to be externally serialized.
1030  */
1031 static unsigned key_set_version = 0;
1032
1033 /**
1034  * Register new key.
1035  */
1036 int lu_context_key_register(struct lu_context_key *key)
1037 {
1038         int result;
1039         int i;
1040
1041         LASSERT(key->lct_init != NULL);
1042         LASSERT(key->lct_fini != NULL);
1043         LASSERT(key->lct_tags != 0);
1044         LASSERT(key->lct_owner != NULL);
1045
1046         result = -ENFILE;
1047         cfs_spin_lock(&lu_keys_guard);
1048         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1049                 if (lu_keys[i] == NULL) {
1050                         key->lct_index = i;
1051                         cfs_atomic_set(&key->lct_used, 1);
1052                         lu_keys[i] = key;
1053                         lu_ref_init(&key->lct_reference);
1054                         result = 0;
1055                         ++key_set_version;
1056                         break;
1057                 }
1058         }
1059         cfs_spin_unlock(&lu_keys_guard);
1060         return result;
1061 }
1062 EXPORT_SYMBOL(lu_context_key_register);
1063
1064 static void key_fini(struct lu_context *ctx, int index)
1065 {
1066         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1067                 struct lu_context_key *key;
1068
1069                 key = lu_keys[index];
1070                 LASSERT(key != NULL);
1071                 LASSERT(key->lct_fini != NULL);
1072                 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1073
1074                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1075                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1076                 cfs_atomic_dec(&key->lct_used);
1077                 LASSERT(key->lct_owner != NULL);
1078                 if (!(ctx->lc_tags & LCT_NOREF)) {
1079                         LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1080                         cfs_module_put(key->lct_owner);
1081                 }
1082                 ctx->lc_value[index] = NULL;
1083         }
1084 }
1085
1086 /**
1087  * Deregister key.
1088  */
1089 void lu_context_key_degister(struct lu_context_key *key)
1090 {
1091         LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1092         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1093
1094         lu_context_key_quiesce(key);
1095
1096         ++key_set_version;
1097         cfs_spin_lock(&lu_keys_guard);
1098         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1099         if (lu_keys[key->lct_index]) {
1100                 lu_keys[key->lct_index] = NULL;
1101                 lu_ref_fini(&key->lct_reference);
1102         }
1103         cfs_spin_unlock(&lu_keys_guard);
1104
1105         LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1106                  "key has instances: %d\n",
1107                  cfs_atomic_read(&key->lct_used));
1108 }
1109 EXPORT_SYMBOL(lu_context_key_degister);
1110
1111 /**
1112  * Register a number of keys. This has to be called after all keys have been
1113  * initialized by a call to LU_CONTEXT_KEY_INIT().
1114  */
1115 int lu_context_key_register_many(struct lu_context_key *k, ...)
1116 {
1117         struct lu_context_key *key = k;
1118         va_list args;
1119         int result;
1120
1121         va_start(args, k);
1122         do {
1123                 result = lu_context_key_register(key);
1124                 if (result)
1125                         break;
1126                 key = va_arg(args, struct lu_context_key *);
1127         } while (key != NULL);
1128         va_end(args);
1129
1130         if (result != 0) {
1131                 va_start(args, k);
1132                 while (k != key) {
1133                         lu_context_key_degister(k);
1134                         k = va_arg(args, struct lu_context_key *);
1135                 }
1136                 va_end(args);
1137         }
1138
1139         return result;
1140 }
1141 EXPORT_SYMBOL(lu_context_key_register_many);
1142
1143 /**
1144  * De-register a number of keys. This is a dual to
1145  * lu_context_key_register_many().
1146  */
1147 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1148 {
1149         va_list args;
1150
1151         va_start(args, k);
1152         do {
1153                 lu_context_key_degister(k);
1154                 k = va_arg(args, struct lu_context_key*);
1155         } while (k != NULL);
1156         va_end(args);
1157 }
1158 EXPORT_SYMBOL(lu_context_key_degister_many);
1159
1160 /**
1161  * Revive a number of keys.
1162  */
1163 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1164 {
1165         va_list args;
1166
1167         va_start(args, k);
1168         do {
1169                 lu_context_key_revive(k);
1170                 k = va_arg(args, struct lu_context_key*);
1171         } while (k != NULL);
1172         va_end(args);
1173 }
1174 EXPORT_SYMBOL(lu_context_key_revive_many);
1175
1176 /**
1177  * Quiescent a number of keys.
1178  */
1179 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1180 {
1181         va_list args;
1182
1183         va_start(args, k);
1184         do {
1185                 lu_context_key_quiesce(k);
1186                 k = va_arg(args, struct lu_context_key*);
1187         } while (k != NULL);
1188         va_end(args);
1189 }
1190 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1191
1192 /**
1193  * Return value associated with key \a key in context \a ctx.
1194  */
1195 void *lu_context_key_get(const struct lu_context *ctx,
1196                          const struct lu_context_key *key)
1197 {
1198         LINVRNT(ctx->lc_state == LCS_ENTERED);
1199         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1200         LASSERT(lu_keys[key->lct_index] == key);
1201         return ctx->lc_value[key->lct_index];
1202 }
1203 EXPORT_SYMBOL(lu_context_key_get);
1204
1205 /**
1206  * List of remembered contexts. XXX document me.
1207  */
1208 static CFS_LIST_HEAD(lu_context_remembered);
1209
1210 /**
1211  * Destroy \a key in all remembered contexts. This is used to destroy key
1212  * values in "shared" contexts (like service threads), when a module owning
1213  * the key is about to be unloaded.
1214  */
1215 void lu_context_key_quiesce(struct lu_context_key *key)
1216 {
1217         struct lu_context *ctx;
1218         extern unsigned cl_env_cache_purge(unsigned nr);
1219
1220         if (!(key->lct_tags & LCT_QUIESCENT)) {
1221                 /*
1222                  * XXX layering violation.
1223                  */
1224                 cl_env_cache_purge(~0);
1225                 key->lct_tags |= LCT_QUIESCENT;
1226                 /*
1227                  * XXX memory barrier has to go here.
1228                  */
1229                 cfs_spin_lock(&lu_keys_guard);
1230                 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1231                                         lc_remember)
1232                         key_fini(ctx, key->lct_index);
1233                 cfs_spin_unlock(&lu_keys_guard);
1234                 ++key_set_version;
1235         }
1236 }
1237 EXPORT_SYMBOL(lu_context_key_quiesce);
1238
1239 void lu_context_key_revive(struct lu_context_key *key)
1240 {
1241         key->lct_tags &= ~LCT_QUIESCENT;
1242         ++key_set_version;
1243 }
1244 EXPORT_SYMBOL(lu_context_key_revive);
1245
1246 static void keys_fini(struct lu_context *ctx)
1247 {
1248         int i;
1249
1250         cfs_spin_lock(&lu_keys_guard);
1251         if (ctx->lc_value != NULL) {
1252                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1253                         key_fini(ctx, i);
1254                 OBD_FREE(ctx->lc_value,
1255                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1256                 ctx->lc_value = NULL;
1257         }
1258         cfs_spin_unlock(&lu_keys_guard);
1259 }
1260
1261 static int keys_fill(struct lu_context *ctx)
1262 {
1263         int i;
1264
1265         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1266                 struct lu_context_key *key;
1267
1268                 key = lu_keys[i];
1269                 if (ctx->lc_value[i] == NULL && key != NULL &&
1270                     (key->lct_tags & ctx->lc_tags) &&
1271                     /*
1272                      * Don't create values for a LCT_QUIESCENT key, as this
1273                      * will pin module owning a key.
1274                      */
1275                     !(key->lct_tags & LCT_QUIESCENT)) {
1276                         void *value;
1277
1278                         LINVRNT(key->lct_init != NULL);
1279                         LINVRNT(key->lct_index == i);
1280
1281                         value = key->lct_init(ctx, key);
1282                         if (unlikely(IS_ERR(value)))
1283                                 return PTR_ERR(value);
1284
1285                         LASSERT(key->lct_owner != NULL);
1286                         if (!(ctx->lc_tags & LCT_NOREF))
1287                                 cfs_try_module_get(key->lct_owner);
1288                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1289                         cfs_atomic_inc(&key->lct_used);
1290                         /*
1291                          * This is the only place in the code, where an
1292                          * element of ctx->lc_value[] array is set to non-NULL
1293                          * value.
1294                          */
1295                         ctx->lc_value[i] = value;
1296                         if (key->lct_exit != NULL)
1297                                 ctx->lc_tags |= LCT_HAS_EXIT;
1298                 }
1299                 ctx->lc_version = key_set_version;
1300         }
1301         return 0;
1302 }
1303
1304 static int keys_init(struct lu_context *ctx)
1305 {
1306         int result;
1307
1308         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1309         if (likely(ctx->lc_value != NULL))
1310                 result = keys_fill(ctx);
1311         else
1312                 result = -ENOMEM;
1313
1314         if (result != 0)
1315                 keys_fini(ctx);
1316         return result;
1317 }
1318
1319 /**
1320  * Initialize context data-structure. Create values for all keys.
1321  */
1322 int lu_context_init(struct lu_context *ctx, __u32 tags)
1323 {
1324         memset(ctx, 0, sizeof *ctx);
1325         ctx->lc_state = LCS_INITIALIZED;
1326         ctx->lc_tags = tags;
1327         if (tags & LCT_REMEMBER) {
1328                 cfs_spin_lock(&lu_keys_guard);
1329                 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1330                 cfs_spin_unlock(&lu_keys_guard);
1331         } else
1332                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1333         return keys_init(ctx);
1334 }
1335 EXPORT_SYMBOL(lu_context_init);
1336
1337 /**
1338  * Finalize context data-structure. Destroy key values.
1339  */
1340 void lu_context_fini(struct lu_context *ctx)
1341 {
1342         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1343         ctx->lc_state = LCS_FINALIZED;
1344         keys_fini(ctx);
1345         cfs_spin_lock(&lu_keys_guard);
1346         cfs_list_del_init(&ctx->lc_remember);
1347         cfs_spin_unlock(&lu_keys_guard);
1348 }
1349 EXPORT_SYMBOL(lu_context_fini);
1350
1351 /**
1352  * Called before entering context.
1353  */
1354 void lu_context_enter(struct lu_context *ctx)
1355 {
1356         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1357         ctx->lc_state = LCS_ENTERED;
1358 }
1359 EXPORT_SYMBOL(lu_context_enter);
1360
1361 /**
1362  * Called after exiting from \a ctx
1363  */
1364 void lu_context_exit(struct lu_context *ctx)
1365 {
1366         int i;
1367
1368         LINVRNT(ctx->lc_state == LCS_ENTERED);
1369         ctx->lc_state = LCS_LEFT;
1370         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1371                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1372                         if (ctx->lc_value[i] != NULL) {
1373                                 struct lu_context_key *key;
1374
1375                                 key = lu_keys[i];
1376                                 LASSERT(key != NULL);
1377                                 if (key->lct_exit != NULL)
1378                                         key->lct_exit(ctx,
1379                                                       key, ctx->lc_value[i]);
1380                         }
1381                 }
1382         }
1383 }
1384 EXPORT_SYMBOL(lu_context_exit);
1385
1386 /**
1387  * Allocate for context all missing keys that were registered after context
1388  * creation.
1389  */
1390 int lu_context_refill(struct lu_context *ctx)
1391 {
1392         LINVRNT(ctx->lc_value != NULL);
1393         return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1394 }
1395 EXPORT_SYMBOL(lu_context_refill);
1396
1397 int lu_env_init(struct lu_env *env, __u32 tags)
1398 {
1399         int result;
1400
1401         env->le_ses = NULL;
1402         result = lu_context_init(&env->le_ctx, tags);
1403         if (likely(result == 0))
1404                 lu_context_enter(&env->le_ctx);
1405         return result;
1406 }
1407 EXPORT_SYMBOL(lu_env_init);
1408
1409 void lu_env_fini(struct lu_env *env)
1410 {
1411         lu_context_exit(&env->le_ctx);
1412         lu_context_fini(&env->le_ctx);
1413         env->le_ses = NULL;
1414 }
1415 EXPORT_SYMBOL(lu_env_fini);
1416
1417 int lu_env_refill(struct lu_env *env)
1418 {
1419         int result;
1420
1421         result = lu_context_refill(&env->le_ctx);
1422         if (result == 0 && env->le_ses != NULL)
1423                 result = lu_context_refill(env->le_ses);
1424         return result;
1425 }
1426 EXPORT_SYMBOL(lu_env_refill);
1427
1428 static struct cfs_shrinker *lu_site_shrinker = NULL;
1429
1430 #ifdef __KERNEL__
1431 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1432 {
1433         struct lu_site *s;
1434         struct lu_site *tmp;
1435         int cached = 0;
1436         int remain = nr;
1437         CFS_LIST_HEAD(splice);
1438
1439         if (nr != 0) {
1440                 if (!(gfp_mask & __GFP_FS))
1441                         return -1;
1442                 CDEBUG(D_INODE, "Shrink %d objects\n", nr);
1443         }
1444
1445         cfs_down(&lu_sites_guard);
1446         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1447                 if (nr != 0) {
1448                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1449                         /*
1450                          * Move just shrunk site to the tail of site list to
1451                          * assure shrinking fairness.
1452                          */
1453                         cfs_list_move_tail(&s->ls_linkage, &splice);
1454                 }
1455                 cfs_read_lock(&s->ls_guard);
1456                 cached += s->ls_total - s->ls_busy;
1457                 cfs_read_unlock(&s->ls_guard);
1458                 if (nr && remain <= 0)
1459                         break;
1460         }
1461         cfs_list_splice(&splice, lu_sites.prev);
1462         cfs_up(&lu_sites_guard);
1463
1464         cached = (cached / 100) * sysctl_vfs_cache_pressure;
1465         if (nr == 0)
1466                 CDEBUG(D_INODE, "%d objects cached\n", cached);
1467         return cached;
1468 }
1469
1470 /*
1471  * Debugging stuff.
1472  */
1473
1474 /**
1475  * Environment to be used in debugger, contains all tags.
1476  */
1477 struct lu_env lu_debugging_env;
1478
1479 /**
1480  * Debugging printer function using printk().
1481  */
1482 int lu_printk_printer(const struct lu_env *env,
1483                       void *unused, const char *format, ...)
1484 {
1485         va_list args;
1486
1487         va_start(args, format);
1488         vprintk(format, args);
1489         va_end(args);
1490         return 0;
1491 }
1492
1493 void lu_debugging_setup(void)
1494 {
1495         lu_env_init(&lu_debugging_env, ~0);
1496 }
1497
1498 void lu_context_keys_dump(void)
1499 {
1500         int i;
1501
1502         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1503                 struct lu_context_key *key;
1504
1505                 key = lu_keys[i];
1506                 if (key != NULL) {
1507                         CERROR("[%i]: %p %x (%p,%p,%p) %i %i \"%s\"@%p\n",
1508                                i, key, key->lct_tags,
1509                                key->lct_init, key->lct_fini, key->lct_exit,
1510                                key->lct_index, cfs_atomic_read(&key->lct_used),
1511                                key->lct_owner ? key->lct_owner->name : "",
1512                                key->lct_owner);
1513                         lu_ref_print(&key->lct_reference);
1514                 }
1515         }
1516 }
1517 EXPORT_SYMBOL(lu_context_keys_dump);
1518 #else  /* !__KERNEL__ */
1519 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1520 {
1521         return 0;
1522 }
1523 #endif /* __KERNEL__ */
1524
1525 int  cl_global_init(void);
1526 void cl_global_fini(void);
1527 int  lu_ref_global_init(void);
1528 void lu_ref_global_fini(void);
1529
1530 int dt_global_init(void);
1531 void dt_global_fini(void);
1532
1533 int llo_global_init(void);
1534 void llo_global_fini(void);
1535
1536 /**
1537  * Initialization of global lu_* data.
1538  */
1539 int lu_global_init(void)
1540 {
1541         int result;
1542
1543         CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
1544
1545         result = lu_ref_global_init();
1546         if (result != 0)
1547                 return result;
1548
1549         LU_CONTEXT_KEY_INIT(&lu_global_key);
1550         result = lu_context_key_register(&lu_global_key);
1551         if (result != 0)
1552                 return result;
1553         /*
1554          * At this level, we don't know what tags are needed, so allocate them
1555          * conservatively. This should not be too bad, because this
1556          * environment is global.
1557          */
1558         cfs_down(&lu_sites_guard);
1559         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1560         cfs_up(&lu_sites_guard);
1561         if (result != 0)
1562                 return result;
1563
1564         /*
1565          * seeks estimation: 3 seeks to read a record from oi, one to read
1566          * inode, one for ea. Unfortunately setting this high value results in
1567          * lu_object/inode cache consuming all the memory.
1568          */
1569         lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1570         if (lu_site_shrinker == NULL)
1571                 return -ENOMEM;
1572
1573         result = lu_time_global_init();
1574         if (result)
1575                 GOTO(out, result);
1576
1577 #ifdef __KERNEL__
1578         result = dt_global_init();
1579         if (result)
1580                 GOTO(out, result);
1581
1582         result = llo_global_init();
1583         if (result)
1584                 GOTO(out, result);
1585 #endif
1586         result = cl_global_init();
1587 out:
1588
1589         return result;
1590 }
1591
1592 /**
1593  * Dual to lu_global_init().
1594  */
1595 void lu_global_fini(void)
1596 {
1597         cl_global_fini();
1598 #ifdef __KERNEL__
1599         llo_global_fini();
1600         dt_global_fini();
1601 #endif
1602         lu_time_global_fini();
1603         if (lu_site_shrinker != NULL) {
1604                 cfs_remove_shrinker(lu_site_shrinker);
1605                 lu_site_shrinker = NULL;
1606         }
1607
1608         lu_context_key_degister(&lu_global_key);
1609
1610         /*
1611          * Tear shrinker environment down _after_ de-registering
1612          * lu_global_key, because the latter has a value in the former.
1613          */
1614         cfs_down(&lu_sites_guard);
1615         lu_env_fini(&lu_shrink_env);
1616         cfs_up(&lu_sites_guard);
1617
1618         lu_ref_global_fini();
1619 }
1620
1621 struct lu_buf LU_BUF_NULL = {
1622         .lb_buf = NULL,
1623         .lb_len = 0
1624 };
1625 EXPORT_SYMBOL(LU_BUF_NULL);
1626
1627 /**
1628  * Output site statistical counters into a buffer. Suitable for
1629  * lprocfs_rd_*()-style functions.
1630  */
1631 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1632 {
1633         int i;
1634         int populated;
1635
1636         /*
1637          * How many hash buckets are not-empty? Don't bother with locks: it's
1638          * an estimation anyway.
1639          */
1640         for (i = 0, populated = 0; i < s->ls_hash_size; i++)
1641                 populated += !cfs_hlist_empty(&s->ls_hash[i]);
1642
1643         return snprintf(page, count, "%d %d %d/%d %d %d %d %d %d %d %d\n",
1644                         s->ls_total,
1645                         s->ls_busy,
1646                         populated,
1647                         s->ls_hash_size,
1648                         s->ls_stats.s_created,
1649                         s->ls_stats.s_cache_hit,
1650                         s->ls_stats.s_cache_miss,
1651                         s->ls_stats.s_cache_check,
1652                         s->ls_stats.s_cache_race,
1653                         s->ls_stats.s_cache_death_race,
1654                         s->ls_stats.s_lru_purged);
1655 }
1656 EXPORT_SYMBOL(lu_site_stats_print);
1657
1658 const char *lu_time_names[LU_TIME_NR] = {
1659         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1660         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1661         [LU_TIME_FIND_INSERT] = "find_insert"
1662 };
1663 EXPORT_SYMBOL(lu_time_names);
1664
1665 /**
1666  * Helper function to initialize a number of kmem slab caches at once.
1667  */
1668 int lu_kmem_init(struct lu_kmem_descr *caches)
1669 {
1670         int result;
1671
1672         for (result = 0; caches->ckd_cache != NULL; ++caches) {
1673                 *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
1674                                                           caches->ckd_size,
1675                                                           0, 0);
1676                 if (*caches->ckd_cache == NULL) {
1677                         result = -ENOMEM;
1678                         break;
1679                 }
1680         }
1681         return result;
1682 }
1683 EXPORT_SYMBOL(lu_kmem_init);
1684
1685 /**
1686  * Helper function to finalize a number of kmem slab cached at once. Dual to
1687  * lu_kmem_init().
1688  */
1689 void lu_kmem_fini(struct lu_kmem_descr *caches)
1690 {
1691         int rc;
1692
1693         for (; caches->ckd_cache != NULL; ++caches) {
1694                 if (*caches->ckd_cache != NULL) {
1695                         rc = cfs_mem_cache_destroy(*caches->ckd_cache);
1696                         LASSERTF(rc == 0, "couldn't destroy %s slab\n",
1697                                  caches->ckd_name);
1698                         *caches->ckd_cache = NULL;
1699                 }
1700         }
1701 }
1702 EXPORT_SYMBOL(lu_kmem_fini);