Whamcloud - gitweb
LU-73 RHEL6 support.
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lu_object.c
37  *
38  * Lustre Object.
39  * These are the only exported functions, they provide some generic
40  * infrastructure for managing object devices
41  *
42  *   Author: Nikita Danilov <nikita.danilov@sun.com>
43  */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46 #ifndef EXPORT_SYMTAB
47 # define EXPORT_SYMTAB
48 #endif
49
50 #include <libcfs/libcfs.h>
51
52 #ifdef __KERNEL__
53 # include <linux/module.h>
54 #endif
55
56 /* hash_long() */
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
65 #include <lu_time.h>
66
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
68
69 /**
70  * Decrease reference counter on object. If last reference is freed, return
71  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72  * case, free object immediately.
73  */
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
75 {
76         struct lu_site_bkt_data *bkt;
77         struct lu_object_header *top;
78         struct lu_site          *site;
79         struct lu_object        *orig;
80         cfs_hash_bd_t            bd;
81
82         top  = o->lo_header;
83         site = o->lo_dev->ld_site;
84         orig = o;
85
86         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
87         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
88
89         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
90                 if (lu_object_is_dying(top)) {
91
92                         /*
93                          * somebody may be waiting for this, currently only
94                          * used for cl_object, see cl_object_put_last().
95                          */
96                         cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
97                 }
98                 return;
99         }
100
101         LASSERT(bkt->lsb_busy > 0);
102         bkt->lsb_busy--;
103         /*
104          * When last reference is released, iterate over object
105          * layers, and notify them that object is no longer busy.
106          */
107         cfs_list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
108                 if (o->lo_ops->loo_object_release != NULL)
109                         o->lo_ops->loo_object_release(env, o);
110         }
111
112         if (!lu_object_is_dying(top)) {
113                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
114                 return;
115         }
116
117         /*
118          * If object is dying (will not be cached), removed it
119          * from hash table and LRU.
120          *
121          * This is done with hash table and LRU lists locked. As the only
122          * way to acquire first reference to previously unreferenced
123          * object is through hash-table lookup (lu_object_find()),
124          * or LRU scanning (lu_site_purge()), that are done under hash-table
125          * and LRU lock, no race with concurrent object lookup is possible
126          * and we can safely destroy object below.
127          */
128         cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
129         cfs_list_del_init(&top->loh_lru);
130         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
131         /*
132          * Object was already removed from hash and lru above, can
133          * kill it.
134          */
135         lu_object_free(env, orig);
136 }
137 EXPORT_SYMBOL(lu_object_put);
138
139 /**
140  * Allocate new object.
141  *
142  * This follows object creation protocol, described in the comment within
143  * struct lu_device_operations definition.
144  */
145 static struct lu_object *lu_object_alloc(const struct lu_env *env,
146                                          struct lu_device *dev,
147                                          const struct lu_fid *f,
148                                          const struct lu_object_conf *conf)
149 {
150         struct lu_object *scan;
151         struct lu_object *top;
152         cfs_list_t *layers;
153         int clean;
154         int result;
155         ENTRY;
156
157         /*
158          * Create top-level object slice. This will also create
159          * lu_object_header.
160          */
161         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
162         if (top == NULL)
163                 RETURN(ERR_PTR(-ENOMEM));
164         /*
165          * This is the only place where object fid is assigned. It's constant
166          * after this point.
167          */
168         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
169         top->lo_header->loh_fid = *f;
170         layers = &top->lo_header->loh_layers;
171         do {
172                 /*
173                  * Call ->loo_object_init() repeatedly, until no more new
174                  * object slices are created.
175                  */
176                 clean = 1;
177                 cfs_list_for_each_entry(scan, layers, lo_linkage) {
178                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
179                                 continue;
180                         clean = 0;
181                         scan->lo_header = top->lo_header;
182                         result = scan->lo_ops->loo_object_init(env, scan, conf);
183                         if (result != 0) {
184                                 lu_object_free(env, top);
185                                 RETURN(ERR_PTR(result));
186                         }
187                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
188                 }
189         } while (!clean);
190
191         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
192                 if (scan->lo_ops->loo_object_start != NULL) {
193                         result = scan->lo_ops->loo_object_start(env, scan);
194                         if (result != 0) {
195                                 lu_object_free(env, top);
196                                 RETURN(ERR_PTR(result));
197                         }
198                 }
199         }
200
201         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
202         RETURN(top);
203 }
204
205 /**
206  * Free an object.
207  */
208 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
209 {
210         struct lu_site_bkt_data *bkt;
211         struct lu_site          *site;
212         struct lu_object        *scan;
213         cfs_list_t              *layers;
214         cfs_list_t               splice;
215
216         site   = o->lo_dev->ld_site;
217         layers = &o->lo_header->loh_layers;
218         bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
219         /*
220          * First call ->loo_object_delete() method to release all resources.
221          */
222         cfs_list_for_each_entry_reverse(scan, layers, lo_linkage) {
223                 if (scan->lo_ops->loo_object_delete != NULL)
224                         scan->lo_ops->loo_object_delete(env, scan);
225         }
226
227         /*
228          * Then, splice object layers into stand-alone list, and call
229          * ->loo_object_free() on all layers to free memory. Splice is
230          * necessary, because lu_object_header is freed together with the
231          * top-level slice.
232          */
233         CFS_INIT_LIST_HEAD(&splice);
234         cfs_list_splice_init(layers, &splice);
235         while (!cfs_list_empty(&splice)) {
236                 /*
237                  * Free layers in bottom-to-top order, so that object header
238                  * lives as long as possible and ->loo_object_free() methods
239                  * can look at its contents.
240                  */
241                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
242                 cfs_list_del_init(&o->lo_linkage);
243                 LASSERT(o->lo_ops->loo_object_free != NULL);
244                 o->lo_ops->loo_object_free(env, o);
245         }
246
247         if (cfs_waitq_active(&bkt->lsb_marche_funebre))
248                 cfs_waitq_broadcast(&bkt->lsb_marche_funebre);
249 }
250
251 /**
252  * Free \a nr objects from the cold end of the site LRU list.
253  */
254 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
255 {
256         struct lu_object_header *h;
257         struct lu_object_header *temp;
258         struct lu_site_bkt_data *bkt;
259         cfs_hash_bd_t            bd;
260         cfs_hash_bd_t            bd2;
261         cfs_list_t               dispose;
262         int                      did_sth;
263         int                      start;
264         int                      count;
265         int                      bnr;
266         int                      i;
267
268         CFS_INIT_LIST_HEAD(&dispose);
269         /*
270          * Under LRU list lock, scan LRU list and move unreferenced objects to
271          * the dispose list, removing them from LRU and hash table.
272          */
273         start = s->ls_purge_start;
274         bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
275  again:
276         did_sth = 0;
277         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
278                 if (i < start)
279                         continue;
280                 count = bnr;
281                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
282                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
283
284                 cfs_list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
285                         /*
286                          * Objects are sorted in lru order, and "busy"
287                          * objects (ones with h->loh_ref > 0) naturally tend to
288                          * live near hot end that we scan last. Unfortunately,
289                          * sites usually have small (less then ten) number of
290                          * busy yet rarely accessed objects (some global
291                          * objects, accessed directly through pointers,
292                          * bypassing hash table).
293                          * Currently algorithm scans them over and over again.
294                          * Probably we should move busy objects out of LRU,
295                          * or we can live with that.
296                          */
297                         if (cfs_atomic_read(&h->loh_ref) > 0)
298                                 continue;
299
300                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
301                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
302
303                         cfs_hash_bd_del_locked(s->ls_obj_hash,
304                                                &bd2, &h->loh_hash);
305                         cfs_list_move(&h->loh_lru, &dispose);
306                         if (did_sth == 0)
307                                 did_sth = 1;
308
309                         if (nr != ~0 && --nr == 0)
310                                 break;
311
312                         if (count > 0 && --count == 0)
313                                 break;
314
315                 }
316                 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
317                 cfs_cond_resched();
318                 /*
319                  * Free everything on the dispose list. This is safe against
320                  * races due to the reasons described in lu_object_put().
321                  */
322                 while (!cfs_list_empty(&dispose)) {
323                         h = container_of0(dispose.next,
324                                           struct lu_object_header, loh_lru);
325                         cfs_list_del_init(&h->loh_lru);
326                         lu_object_free(env, lu_object_top(h));
327                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
328                 }
329
330                 if (nr == 0)
331                         break;
332         }
333
334         if (nr != 0 && did_sth && start != 0) {
335                 start = 0; /* restart from the first bucket */
336                 goto again;
337         }
338         /* race on s->ls_purge_start, but nobody cares */
339         s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
340
341         return nr;
342 }
343 EXPORT_SYMBOL(lu_site_purge);
344
345 /*
346  * Object printing.
347  *
348  * Code below has to jump through certain loops to output object description
349  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
350  * composes object description from strings that are parts of _lines_ of
351  * output (i.e., strings that are not terminated by newline). This doesn't fit
352  * very well into libcfs_debug_msg() interface that assumes that each message
353  * supplied to it is a self-contained output line.
354  *
355  * To work around this, strings are collected in a temporary buffer
356  * (implemented as a value of lu_cdebug_key key), until terminating newline
357  * character is detected.
358  *
359  */
360
361 enum {
362         /**
363          * Maximal line size.
364          *
365          * XXX overflow is not handled correctly.
366          */
367         LU_CDEBUG_LINE = 256
368 };
369
370 struct lu_cdebug_data {
371         /**
372          * Temporary buffer.
373          */
374         char lck_area[LU_CDEBUG_LINE];
375 };
376
377 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
378 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
379
380 /**
381  * Key, holding temporary buffer. This key is registered very early by
382  * lu_global_init().
383  */
384 struct lu_context_key lu_global_key = {
385         .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
386         .lct_init = lu_global_key_init,
387         .lct_fini = lu_global_key_fini
388 };
389
390 /**
391  * Printer function emitting messages through libcfs_debug_msg().
392  */
393 int lu_cdebug_printer(const struct lu_env *env,
394                       void *cookie, const char *format, ...)
395 {
396         struct lu_cdebug_print_info *info = cookie;
397         struct lu_cdebug_data       *key;
398         int used;
399         int complete;
400         va_list args;
401
402         va_start(args, format);
403
404         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
405         LASSERT(key != NULL);
406
407         used = strlen(key->lck_area);
408         complete = format[strlen(format) - 1] == '\n';
409         /*
410          * Append new chunk to the buffer.
411          */
412         vsnprintf(key->lck_area + used,
413                   ARRAY_SIZE(key->lck_area) - used, format, args);
414         if (complete) {
415                 if (cfs_cdebug_show(info->lpi_mask, info->lpi_subsys))
416                         libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
417                                          (char *)info->lpi_file, info->lpi_fn,
418                                          info->lpi_line, "%s", key->lck_area);
419                 key->lck_area[0] = 0;
420         }
421         va_end(args);
422         return 0;
423 }
424 EXPORT_SYMBOL(lu_cdebug_printer);
425
426 /**
427  * Print object header.
428  */
429 void lu_object_header_print(const struct lu_env *env, void *cookie,
430                             lu_printer_t printer,
431                             const struct lu_object_header *hdr)
432 {
433         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
434                    hdr, hdr->loh_flags, cfs_atomic_read(&hdr->loh_ref),
435                    PFID(&hdr->loh_fid),
436                    cfs_hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
437                    cfs_list_empty((cfs_list_t *)&hdr->loh_lru) ? \
438                    "" : " lru",
439                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
440 }
441 EXPORT_SYMBOL(lu_object_header_print);
442
443 /**
444  * Print human readable representation of the \a o to the \a printer.
445  */
446 void lu_object_print(const struct lu_env *env, void *cookie,
447                      lu_printer_t printer, const struct lu_object *o)
448 {
449         static const char ruler[] = "........................................";
450         struct lu_object_header *top;
451         int depth;
452
453         top = o->lo_header;
454         lu_object_header_print(env, cookie, printer, top);
455         (*printer)(env, cookie, "{ \n");
456         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
457                 depth = o->lo_depth + 4;
458
459                 /*
460                  * print `.' \a depth times followed by type name and address
461                  */
462                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
463                            o->lo_dev->ld_type->ldt_name, o);
464                 if (o->lo_ops->loo_object_print != NULL)
465                         o->lo_ops->loo_object_print(env, cookie, printer, o);
466                 (*printer)(env, cookie, "\n");
467         }
468         (*printer)(env, cookie, "} header@%p\n", top);
469 }
470 EXPORT_SYMBOL(lu_object_print);
471
472 /**
473  * Check object consistency.
474  */
475 int lu_object_invariant(const struct lu_object *o)
476 {
477         struct lu_object_header *top;
478
479         top = o->lo_header;
480         cfs_list_for_each_entry(o, &top->loh_layers, lo_linkage) {
481                 if (o->lo_ops->loo_object_invariant != NULL &&
482                     !o->lo_ops->loo_object_invariant(o))
483                         return 0;
484         }
485         return 1;
486 }
487 EXPORT_SYMBOL(lu_object_invariant);
488
489 static struct lu_object *htable_lookup(struct lu_site *s,
490                                        cfs_hash_bd_t *bd,
491                                        const struct lu_fid *f,
492                                        cfs_waitlink_t *waiter,
493                                        __u64 *version)
494 {
495         struct lu_site_bkt_data *bkt;
496         struct lu_object_header *h;
497         cfs_hlist_node_t        *hnode;
498         __u64  ver = cfs_hash_bd_version_get(bd);
499
500         if (*version == ver)
501                 return NULL;
502
503         *version = ver;
504         bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
505         /* cfs_hash_bd_lookup_intent is a somehow "internal" function
506          * of cfs_hash, but we don't want refcount on object right now */
507         hnode = cfs_hash_bd_lookup_locked(s->ls_obj_hash, bd, (void *)f);
508         if (hnode == NULL) {
509                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
510                 return NULL;
511         }
512
513         h = container_of0(hnode, struct lu_object_header, loh_hash);
514         if (likely(!lu_object_is_dying(h))) {
515                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
516                 return lu_object_top(h);
517         }
518
519         /*
520          * Lookup found an object being destroyed this object cannot be
521          * returned (to assure that references to dying objects are eventually
522          * drained), and moreover, lookup has to wait until object is freed.
523          */
524         cfs_atomic_dec(&h->loh_ref);
525
526         cfs_waitlink_init(waiter);
527         cfs_waitq_add(&bkt->lsb_marche_funebre, waiter);
528         cfs_set_current_state(CFS_TASK_UNINT);
529         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
530         return ERR_PTR(-EAGAIN);
531 }
532
533 /**
534  * Search cache for an object with the fid \a f. If such object is found,
535  * return it. Otherwise, create new object, insert it into cache and return
536  * it. In any case, additional reference is acquired on the returned object.
537  */
538 struct lu_object *lu_object_find(const struct lu_env *env,
539                                  struct lu_device *dev, const struct lu_fid *f,
540                                  const struct lu_object_conf *conf)
541 {
542         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
543 }
544 EXPORT_SYMBOL(lu_object_find);
545
546 /**
547  * Core logic of lu_object_find*() functions.
548  */
549 static struct lu_object *lu_object_find_try(const struct lu_env *env,
550                                             struct lu_device *dev,
551                                             const struct lu_fid *f,
552                                             const struct lu_object_conf *conf,
553                                             cfs_waitlink_t *waiter)
554 {
555         struct lu_object      *o;
556         struct lu_object      *shadow;
557         struct lu_site        *s;
558         cfs_hash_t            *hs;
559         cfs_hash_bd_t          bd;
560         __u64                  version = 0;
561
562         /*
563          * This uses standard index maintenance protocol:
564          *
565          *     - search index under lock, and return object if found;
566          *     - otherwise, unlock index, allocate new object;
567          *     - lock index and search again;
568          *     - if nothing is found (usual case), insert newly created
569          *       object into index;
570          *     - otherwise (race: other thread inserted object), free
571          *       object just allocated.
572          *     - unlock index;
573          *     - return object.
574          *
575          * If dying object is found during index search, add @waiter to the
576          * site wait-queue and return ERR_PTR(-EAGAIN).
577          */
578         s  = dev->ld_site;
579         hs = s->ls_obj_hash;
580         cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
581         o = htable_lookup(s, &bd, f, waiter, &version);
582         cfs_hash_bd_unlock(hs, &bd, 1);
583         if (o != NULL)
584                 return o;
585
586         /*
587          * Allocate new object. This may result in rather complicated
588          * operations, including fld queries, inode loading, etc.
589          */
590         o = lu_object_alloc(env, dev, f, conf);
591         if (unlikely(IS_ERR(o)))
592                 return o;
593
594         LASSERT(lu_fid_eq(lu_object_fid(o), f));
595
596         cfs_hash_bd_lock(hs, &bd, 1);
597
598         shadow = htable_lookup(s, &bd, f, waiter, &version);
599         if (likely(shadow == NULL)) {
600                 struct lu_site_bkt_data *bkt;
601
602                 bkt = cfs_hash_bd_extra_get(hs, &bd);
603                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
604                 cfs_list_add_tail(&o->lo_header->loh_lru, &bkt->lsb_lru);
605                 bkt->lsb_busy++;
606                 cfs_hash_bd_unlock(hs, &bd, 1);
607                 return o;
608         }
609
610         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
611         cfs_hash_bd_unlock(hs, &bd, 1);
612         lu_object_free(env, o);
613         return shadow;
614 }
615
616 /**
617  * Much like lu_object_find(), but top level device of object is specifically
618  * \a dev rather than top level device of the site. This interface allows
619  * objects of different "stacking" to be created within the same site.
620  */
621 struct lu_object *lu_object_find_at(const struct lu_env *env,
622                                     struct lu_device *dev,
623                                     const struct lu_fid *f,
624                                     const struct lu_object_conf *conf)
625 {
626         struct lu_site_bkt_data *bkt;
627         struct lu_object        *obj;
628         cfs_waitlink_t           wait;
629
630         while (1) {
631                 obj = lu_object_find_try(env, dev, f, conf, &wait);
632                 if (obj != ERR_PTR(-EAGAIN))
633                         return obj;
634                 /*
635                  * lu_object_find_try() already added waiter into the
636                  * wait queue.
637                  */
638                 cfs_waitq_wait(&wait, CFS_TASK_UNINT);
639                 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
640                 cfs_waitq_del(&bkt->lsb_marche_funebre, &wait);
641         }
642 }
643 EXPORT_SYMBOL(lu_object_find_at);
644
645 /**
646  * Find object with given fid, and return its slice belonging to given device.
647  */
648 struct lu_object *lu_object_find_slice(const struct lu_env *env,
649                                        struct lu_device *dev,
650                                        const struct lu_fid *f,
651                                        const struct lu_object_conf *conf)
652 {
653         struct lu_object *top;
654         struct lu_object *obj;
655
656         top = lu_object_find(env, dev, f, conf);
657         if (!IS_ERR(top)) {
658                 obj = lu_object_locate(top->lo_header, dev->ld_type);
659                 if (obj == NULL)
660                         lu_object_put(env, top);
661         } else
662                 obj = top;
663         return obj;
664 }
665 EXPORT_SYMBOL(lu_object_find_slice);
666
667 /**
668  * Global list of all device types.
669  */
670 static CFS_LIST_HEAD(lu_device_types);
671
672 int lu_device_type_init(struct lu_device_type *ldt)
673 {
674         int result;
675
676         CFS_INIT_LIST_HEAD(&ldt->ldt_linkage);
677         result = ldt->ldt_ops->ldto_init(ldt);
678         if (result == 0)
679                 cfs_list_add(&ldt->ldt_linkage, &lu_device_types);
680         return result;
681 }
682 EXPORT_SYMBOL(lu_device_type_init);
683
684 void lu_device_type_fini(struct lu_device_type *ldt)
685 {
686         cfs_list_del_init(&ldt->ldt_linkage);
687         ldt->ldt_ops->ldto_fini(ldt);
688 }
689 EXPORT_SYMBOL(lu_device_type_fini);
690
691 void lu_types_stop(void)
692 {
693         struct lu_device_type *ldt;
694
695         cfs_list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
696                 if (ldt->ldt_device_nr == 0)
697                         ldt->ldt_ops->ldto_stop(ldt);
698         }
699 }
700 EXPORT_SYMBOL(lu_types_stop);
701
702 /**
703  * Global list of all sites on this node
704  */
705 static CFS_LIST_HEAD(lu_sites);
706 static CFS_DECLARE_MUTEX(lu_sites_guard);
707
708 /**
709  * Global environment used by site shrinker.
710  */
711 static struct lu_env lu_shrink_env;
712
713 struct lu_site_print_arg {
714         struct lu_env   *lsp_env;
715         void            *lsp_cookie;
716         lu_printer_t     lsp_printer;
717 };
718
719 static int
720 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
721                   cfs_hlist_node_t *hnode, void *data)
722 {
723         struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
724         struct lu_object_header  *h;
725
726         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
727         if (!cfs_list_empty(&h->loh_layers)) {
728                 const struct lu_object *o;
729
730                 o = lu_object_top(h);
731                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
732                                 arg->lsp_printer, o);
733         } else {
734                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
735                                        arg->lsp_printer, h);
736         }
737         return 0;
738 }
739
740 /**
741  * Print all objects in \a s.
742  */
743 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
744                    lu_printer_t printer)
745 {
746         struct lu_site_print_arg arg = {
747                 .lsp_env     = (struct lu_env *)env,
748                 .lsp_cookie  = cookie,
749                 .lsp_printer = printer,
750         };
751
752         cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
753 }
754 EXPORT_SYMBOL(lu_site_print);
755
756 enum {
757         LU_CACHE_PERCENT   = 20,
758 };
759
760 /**
761  * Return desired hash table order.
762  */
763 static int lu_htable_order(void)
764 {
765         unsigned long cache_size;
766         int bits;
767
768         /*
769          * Calculate hash table size, assuming that we want reasonable
770          * performance when 20% of total memory is occupied by cache of
771          * lu_objects.
772          *
773          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
774          */
775         cache_size = cfs_num_physpages;
776
777 #if BITS_PER_LONG == 32
778         /* limit hashtable size for lowmem systems to low RAM */
779         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
780                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
781 #endif
782
783         cache_size = cache_size / 100 * LU_CACHE_PERCENT *
784                 (CFS_PAGE_SIZE / 1024);
785
786         for (bits = 1; (1 << bits) < cache_size; ++bits) {
787                 ;
788         }
789         return bits;
790 }
791
792 static unsigned lu_obj_hop_hash(cfs_hash_t *hs, void *key, unsigned mask)
793 {
794         struct lu_fid  *fid = (struct lu_fid *)key;
795         unsigned        hash;
796
797         hash = (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
798         hash += fid_hash(fid, hs->hs_bkt_bits) << hs->hs_bkt_bits;
799         return hash & mask;
800 }
801
802 static void *lu_obj_hop_object(cfs_hlist_node_t *hnode)
803 {
804         return cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
805 }
806
807 static void *lu_obj_hop_key(cfs_hlist_node_t *hnode)
808 {
809         struct lu_object_header *h;
810
811         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
812         return &h->loh_fid;
813 }
814
815 static int lu_obj_hop_keycmp(void *key, cfs_hlist_node_t *hnode)
816 {
817         struct lu_object_header *h;
818
819         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
820         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
821 }
822
823 static void lu_obj_hop_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
824 {
825         struct lu_object_header *h;
826
827         h = cfs_hlist_entry(hnode, struct lu_object_header, loh_hash);
828         if (cfs_atomic_add_return(1, &h->loh_ref) == 1) {
829                 struct lu_site_bkt_data *bkt;
830                 cfs_hash_bd_t            bd;
831
832                 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
833                 bkt = cfs_hash_bd_extra_get(hs, &bd);
834                 bkt->lsb_busy++;
835         }
836 }
837
838 static void lu_obj_hop_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
839 {
840         LBUG(); /* we should never called it */
841 }
842
843 cfs_hash_ops_t lu_site_hash_ops = {
844         .hs_hash        = lu_obj_hop_hash,
845         .hs_key         = lu_obj_hop_key,
846         .hs_keycmp      = lu_obj_hop_keycmp,
847         .hs_object      = lu_obj_hop_object,
848         .hs_get         = lu_obj_hop_get,
849         .hs_put_locked  = lu_obj_hop_put_locked,
850 };
851
852 /**
853  * Initialize site \a s, with \a d as the top level device.
854  */
855 #define LU_SITE_BITS_MIN    12
856 #define LU_SITE_BITS_MAX    23
857 /**
858  * total 128 buckets, we don't want too many buckets because:
859  * - consume too much memory
860  * - avoid unbalanced LRU list
861  */
862 #define LU_SITE_BKT_BITS    7
863
864 int lu_site_init(struct lu_site *s, struct lu_device *top)
865 {
866         struct lu_site_bkt_data *bkt;
867         cfs_hash_bd_t bd;
868         int bits;
869         int i;
870         ENTRY;
871
872         memset(s, 0, sizeof *s);
873         bits = lu_htable_order();
874         for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
875              bits >= LU_SITE_BITS_MIN; bits--) {
876                 s->ls_obj_hash = cfs_hash_create("lu_site", bits, bits,
877                                                  bits - LU_SITE_BKT_BITS,
878                                                  sizeof(*bkt), 0, 0,
879                                                  &lu_site_hash_ops,
880                                                  CFS_HASH_SPIN_BKTLOCK |
881                                                  CFS_HASH_NO_ITEMREF |
882                                                  CFS_HASH_DEPTH |
883                                                  CFS_HASH_ASSERT_EMPTY);
884                 if (s->ls_obj_hash != NULL)
885                         break;
886         }
887
888         if (s->ls_obj_hash == NULL) {
889                 CERROR("failed to create lu_site hash with bits: %d\n", bits);
890                 return -ENOMEM;
891         }
892
893         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
894                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
895                 CFS_INIT_LIST_HEAD(&bkt->lsb_lru);
896                 cfs_waitq_init(&bkt->lsb_marche_funebre);
897         }
898
899         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
900         if (s->ls_stats == NULL) {
901                 cfs_hash_putref(s->ls_obj_hash);
902                 s->ls_obj_hash = NULL;
903                 return -ENOMEM;
904         }
905
906         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
907                              0, "created", "created");
908         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
909                              0, "cache_hit", "cache_hit");
910         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
911                              0, "cache_miss", "cache_miss");
912         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
913                              0, "cache_race", "cache_race");
914         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
915                              0, "cache_death_race", "cache_death_race");
916         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
917                              0, "lru_purged", "lru_purged");
918
919         CFS_INIT_LIST_HEAD(&s->ls_linkage);
920         s->ls_top_dev = top;
921         top->ld_site = s;
922         lu_device_get(top);
923         lu_ref_add(&top->ld_reference, "site-top", s);
924
925         RETURN(0);
926 }
927 EXPORT_SYMBOL(lu_site_init);
928
929 /**
930  * Finalize \a s and release its resources.
931  */
932 void lu_site_fini(struct lu_site *s)
933 {
934         cfs_down(&lu_sites_guard);
935         cfs_list_del_init(&s->ls_linkage);
936         cfs_up(&lu_sites_guard);
937
938         if (s->ls_obj_hash != NULL) {
939                 cfs_hash_putref(s->ls_obj_hash);
940                 s->ls_obj_hash = NULL;
941         }
942
943         if (s->ls_top_dev != NULL) {
944                 s->ls_top_dev->ld_site = NULL;
945                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
946                 lu_device_put(s->ls_top_dev);
947                 s->ls_top_dev = NULL;
948         }
949
950         if (s->ls_stats != NULL)
951                 lprocfs_free_stats(&s->ls_stats);
952 }
953 EXPORT_SYMBOL(lu_site_fini);
954
955 /**
956  * Called when initialization of stack for this site is completed.
957  */
958 int lu_site_init_finish(struct lu_site *s)
959 {
960         int result;
961         cfs_down(&lu_sites_guard);
962         result = lu_context_refill(&lu_shrink_env.le_ctx);
963         if (result == 0)
964                 cfs_list_add(&s->ls_linkage, &lu_sites);
965         cfs_up(&lu_sites_guard);
966         return result;
967 }
968 EXPORT_SYMBOL(lu_site_init_finish);
969
970 /**
971  * Acquire additional reference on device \a d
972  */
973 void lu_device_get(struct lu_device *d)
974 {
975         cfs_atomic_inc(&d->ld_ref);
976 }
977 EXPORT_SYMBOL(lu_device_get);
978
979 /**
980  * Release reference on device \a d.
981  */
982 void lu_device_put(struct lu_device *d)
983 {
984         LASSERT(cfs_atomic_read(&d->ld_ref) > 0);
985         cfs_atomic_dec(&d->ld_ref);
986 }
987 EXPORT_SYMBOL(lu_device_put);
988
989 /**
990  * Initialize device \a d of type \a t.
991  */
992 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
993 {
994         if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
995                 t->ldt_ops->ldto_start(t);
996         memset(d, 0, sizeof *d);
997         cfs_atomic_set(&d->ld_ref, 0);
998         d->ld_type = t;
999         lu_ref_init(&d->ld_reference);
1000         return 0;
1001 }
1002 EXPORT_SYMBOL(lu_device_init);
1003
1004 /**
1005  * Finalize device \a d.
1006  */
1007 void lu_device_fini(struct lu_device *d)
1008 {
1009         struct lu_device_type *t;
1010
1011         t = d->ld_type;
1012         if (d->ld_obd != NULL) {
1013                 d->ld_obd->obd_lu_dev = NULL;
1014                 d->ld_obd = NULL;
1015         }
1016
1017         lu_ref_fini(&d->ld_reference);
1018         LASSERTF(cfs_atomic_read(&d->ld_ref) == 0,
1019                  "Refcount is %u\n", cfs_atomic_read(&d->ld_ref));
1020         LASSERT(t->ldt_device_nr > 0);
1021         if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1022                 t->ldt_ops->ldto_stop(t);
1023 }
1024 EXPORT_SYMBOL(lu_device_fini);
1025
1026 /**
1027  * Initialize object \a o that is part of compound object \a h and was created
1028  * by device \a d.
1029  */
1030 int lu_object_init(struct lu_object *o,
1031                    struct lu_object_header *h, struct lu_device *d)
1032 {
1033         memset(o, 0, sizeof *o);
1034         o->lo_header = h;
1035         o->lo_dev    = d;
1036         lu_device_get(d);
1037         o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
1038         CFS_INIT_LIST_HEAD(&o->lo_linkage);
1039         return 0;
1040 }
1041 EXPORT_SYMBOL(lu_object_init);
1042
1043 /**
1044  * Finalize object and release its resources.
1045  */
1046 void lu_object_fini(struct lu_object *o)
1047 {
1048         struct lu_device *dev = o->lo_dev;
1049
1050         LASSERT(cfs_list_empty(&o->lo_linkage));
1051
1052         if (dev != NULL) {
1053                 lu_ref_del_at(&dev->ld_reference,
1054                               o->lo_dev_ref , "lu_object", o);
1055                 lu_device_put(dev);
1056                 o->lo_dev = NULL;
1057         }
1058 }
1059 EXPORT_SYMBOL(lu_object_fini);
1060
1061 /**
1062  * Add object \a o as first layer of compound object \a h
1063  *
1064  * This is typically called by the ->ldo_object_alloc() method of top-level
1065  * device.
1066  */
1067 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1068 {
1069         cfs_list_move(&o->lo_linkage, &h->loh_layers);
1070 }
1071 EXPORT_SYMBOL(lu_object_add_top);
1072
1073 /**
1074  * Add object \a o as a layer of compound object, going after \a before.
1075  *
1076  * This is typically called by the ->ldo_object_alloc() method of \a
1077  * before->lo_dev.
1078  */
1079 void lu_object_add(struct lu_object *before, struct lu_object *o)
1080 {
1081         cfs_list_move(&o->lo_linkage, &before->lo_linkage);
1082 }
1083 EXPORT_SYMBOL(lu_object_add);
1084
1085 /**
1086  * Initialize compound object.
1087  */
1088 int lu_object_header_init(struct lu_object_header *h)
1089 {
1090         memset(h, 0, sizeof *h);
1091         cfs_atomic_set(&h->loh_ref, 1);
1092         CFS_INIT_HLIST_NODE(&h->loh_hash);
1093         CFS_INIT_LIST_HEAD(&h->loh_lru);
1094         CFS_INIT_LIST_HEAD(&h->loh_layers);
1095         lu_ref_init(&h->loh_reference);
1096         return 0;
1097 }
1098 EXPORT_SYMBOL(lu_object_header_init);
1099
1100 /**
1101  * Finalize compound object.
1102  */
1103 void lu_object_header_fini(struct lu_object_header *h)
1104 {
1105         LASSERT(cfs_list_empty(&h->loh_layers));
1106         LASSERT(cfs_list_empty(&h->loh_lru));
1107         LASSERT(cfs_hlist_unhashed(&h->loh_hash));
1108         lu_ref_fini(&h->loh_reference);
1109 }
1110 EXPORT_SYMBOL(lu_object_header_fini);
1111
1112 /**
1113  * Given a compound object, find its slice, corresponding to the device type
1114  * \a dtype.
1115  */
1116 struct lu_object *lu_object_locate(struct lu_object_header *h,
1117                                    const struct lu_device_type *dtype)
1118 {
1119         struct lu_object *o;
1120
1121         cfs_list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1122                 if (o->lo_dev->ld_type == dtype)
1123                         return o;
1124         }
1125         return NULL;
1126 }
1127 EXPORT_SYMBOL(lu_object_locate);
1128
1129
1130
1131 /**
1132  * Finalize and free devices in the device stack.
1133  *
1134  * Finalize device stack by purging object cache, and calling
1135  * lu_device_type_operations::ldto_device_fini() and
1136  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1137  */
1138 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1139 {
1140         struct lu_site   *site = top->ld_site;
1141         struct lu_device *scan;
1142         struct lu_device *next;
1143
1144         lu_site_purge(env, site, ~0);
1145         for (scan = top; scan != NULL; scan = next) {
1146                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1147                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1148                 lu_device_put(scan);
1149         }
1150
1151         /* purge again. */
1152         lu_site_purge(env, site, ~0);
1153
1154         if (!cfs_hash_is_empty(site->ls_obj_hash)) {
1155                 /*
1156                  * Uh-oh, objects still exist.
1157                  */
1158                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
1159
1160                 lu_site_print(env, site, &cookie, lu_cdebug_printer);
1161         }
1162
1163         for (scan = top; scan != NULL; scan = next) {
1164                 const struct lu_device_type *ldt = scan->ld_type;
1165                 struct obd_type             *type;
1166
1167                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1168                 type = ldt->ldt_obd_type;
1169                 if (type != NULL) {
1170                         type->typ_refcnt--;
1171                         class_put_type(type);
1172                 }
1173         }
1174 }
1175 EXPORT_SYMBOL(lu_stack_fini);
1176
1177 enum {
1178         /**
1179          * Maximal number of tld slots.
1180          */
1181         LU_CONTEXT_KEY_NR = 32
1182 };
1183
1184 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1185
1186 static cfs_spinlock_t lu_keys_guard = CFS_SPIN_LOCK_UNLOCKED;
1187
1188 /**
1189  * Global counter incremented whenever key is registered, unregistered,
1190  * revived or quiesced. This is used to void unnecessary calls to
1191  * lu_context_refill(). No locking is provided, as initialization and shutdown
1192  * are supposed to be externally serialized.
1193  */
1194 static unsigned key_set_version = 0;
1195
1196 /**
1197  * Register new key.
1198  */
1199 int lu_context_key_register(struct lu_context_key *key)
1200 {
1201         int result;
1202         int i;
1203
1204         LASSERT(key->lct_init != NULL);
1205         LASSERT(key->lct_fini != NULL);
1206         LASSERT(key->lct_tags != 0);
1207         LASSERT(key->lct_owner != NULL);
1208
1209         result = -ENFILE;
1210         cfs_spin_lock(&lu_keys_guard);
1211         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1212                 if (lu_keys[i] == NULL) {
1213                         key->lct_index = i;
1214                         cfs_atomic_set(&key->lct_used, 1);
1215                         lu_keys[i] = key;
1216                         lu_ref_init(&key->lct_reference);
1217                         result = 0;
1218                         ++key_set_version;
1219                         break;
1220                 }
1221         }
1222         cfs_spin_unlock(&lu_keys_guard);
1223         return result;
1224 }
1225 EXPORT_SYMBOL(lu_context_key_register);
1226
1227 static void key_fini(struct lu_context *ctx, int index)
1228 {
1229         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1230                 struct lu_context_key *key;
1231
1232                 key = lu_keys[index];
1233                 LASSERT(key != NULL);
1234                 LASSERT(key->lct_fini != NULL);
1235                 LASSERT(cfs_atomic_read(&key->lct_used) > 1);
1236
1237                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1238                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1239                 cfs_atomic_dec(&key->lct_used);
1240                 LASSERT(key->lct_owner != NULL);
1241                 if (!(ctx->lc_tags & LCT_NOREF)) {
1242                         LASSERT(cfs_module_refcount(key->lct_owner) > 0);
1243                         cfs_module_put(key->lct_owner);
1244                 }
1245                 ctx->lc_value[index] = NULL;
1246         }
1247 }
1248
1249 /**
1250  * Deregister key.
1251  */
1252 void lu_context_key_degister(struct lu_context_key *key)
1253 {
1254         LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
1255         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1256
1257         lu_context_key_quiesce(key);
1258
1259         ++key_set_version;
1260         cfs_spin_lock(&lu_keys_guard);
1261         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1262         if (lu_keys[key->lct_index]) {
1263                 lu_keys[key->lct_index] = NULL;
1264                 lu_ref_fini(&key->lct_reference);
1265         }
1266         cfs_spin_unlock(&lu_keys_guard);
1267
1268         LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
1269                  "key has instances: %d\n",
1270                  cfs_atomic_read(&key->lct_used));
1271 }
1272 EXPORT_SYMBOL(lu_context_key_degister);
1273
1274 /**
1275  * Register a number of keys. This has to be called after all keys have been
1276  * initialized by a call to LU_CONTEXT_KEY_INIT().
1277  */
1278 int lu_context_key_register_many(struct lu_context_key *k, ...)
1279 {
1280         struct lu_context_key *key = k;
1281         va_list args;
1282         int result;
1283
1284         va_start(args, k);
1285         do {
1286                 result = lu_context_key_register(key);
1287                 if (result)
1288                         break;
1289                 key = va_arg(args, struct lu_context_key *);
1290         } while (key != NULL);
1291         va_end(args);
1292
1293         if (result != 0) {
1294                 va_start(args, k);
1295                 while (k != key) {
1296                         lu_context_key_degister(k);
1297                         k = va_arg(args, struct lu_context_key *);
1298                 }
1299                 va_end(args);
1300         }
1301
1302         return result;
1303 }
1304 EXPORT_SYMBOL(lu_context_key_register_many);
1305
1306 /**
1307  * De-register a number of keys. This is a dual to
1308  * lu_context_key_register_many().
1309  */
1310 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1311 {
1312         va_list args;
1313
1314         va_start(args, k);
1315         do {
1316                 lu_context_key_degister(k);
1317                 k = va_arg(args, struct lu_context_key*);
1318         } while (k != NULL);
1319         va_end(args);
1320 }
1321 EXPORT_SYMBOL(lu_context_key_degister_many);
1322
1323 /**
1324  * Revive a number of keys.
1325  */
1326 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1327 {
1328         va_list args;
1329
1330         va_start(args, k);
1331         do {
1332                 lu_context_key_revive(k);
1333                 k = va_arg(args, struct lu_context_key*);
1334         } while (k != NULL);
1335         va_end(args);
1336 }
1337 EXPORT_SYMBOL(lu_context_key_revive_many);
1338
1339 /**
1340  * Quiescent a number of keys.
1341  */
1342 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1343 {
1344         va_list args;
1345
1346         va_start(args, k);
1347         do {
1348                 lu_context_key_quiesce(k);
1349                 k = va_arg(args, struct lu_context_key*);
1350         } while (k != NULL);
1351         va_end(args);
1352 }
1353 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1354
1355 /**
1356  * Return value associated with key \a key in context \a ctx.
1357  */
1358 void *lu_context_key_get(const struct lu_context *ctx,
1359                          const struct lu_context_key *key)
1360 {
1361         LINVRNT(ctx->lc_state == LCS_ENTERED);
1362         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1363         LASSERT(lu_keys[key->lct_index] == key);
1364         return ctx->lc_value[key->lct_index];
1365 }
1366 EXPORT_SYMBOL(lu_context_key_get);
1367
1368 /**
1369  * List of remembered contexts. XXX document me.
1370  */
1371 static CFS_LIST_HEAD(lu_context_remembered);
1372
1373 /**
1374  * Destroy \a key in all remembered contexts. This is used to destroy key
1375  * values in "shared" contexts (like service threads), when a module owning
1376  * the key is about to be unloaded.
1377  */
1378 void lu_context_key_quiesce(struct lu_context_key *key)
1379 {
1380         struct lu_context *ctx;
1381         extern unsigned cl_env_cache_purge(unsigned nr);
1382
1383         if (!(key->lct_tags & LCT_QUIESCENT)) {
1384                 /*
1385                  * XXX layering violation.
1386                  */
1387                 cl_env_cache_purge(~0);
1388                 key->lct_tags |= LCT_QUIESCENT;
1389                 /*
1390                  * XXX memory barrier has to go here.
1391                  */
1392                 cfs_spin_lock(&lu_keys_guard);
1393                 cfs_list_for_each_entry(ctx, &lu_context_remembered,
1394                                         lc_remember)
1395                         key_fini(ctx, key->lct_index);
1396                 cfs_spin_unlock(&lu_keys_guard);
1397                 ++key_set_version;
1398         }
1399 }
1400 EXPORT_SYMBOL(lu_context_key_quiesce);
1401
1402 void lu_context_key_revive(struct lu_context_key *key)
1403 {
1404         key->lct_tags &= ~LCT_QUIESCENT;
1405         ++key_set_version;
1406 }
1407 EXPORT_SYMBOL(lu_context_key_revive);
1408
1409 static void keys_fini(struct lu_context *ctx)
1410 {
1411         int i;
1412
1413         cfs_spin_lock(&lu_keys_guard);
1414         if (ctx->lc_value != NULL) {
1415                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1416                         key_fini(ctx, i);
1417                 OBD_FREE(ctx->lc_value,
1418                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1419                 ctx->lc_value = NULL;
1420         }
1421         cfs_spin_unlock(&lu_keys_guard);
1422 }
1423
1424 static int keys_fill(struct lu_context *ctx)
1425 {
1426         int i;
1427
1428         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1429                 struct lu_context_key *key;
1430
1431                 key = lu_keys[i];
1432                 if (ctx->lc_value[i] == NULL && key != NULL &&
1433                     (key->lct_tags & ctx->lc_tags) &&
1434                     /*
1435                      * Don't create values for a LCT_QUIESCENT key, as this
1436                      * will pin module owning a key.
1437                      */
1438                     !(key->lct_tags & LCT_QUIESCENT)) {
1439                         void *value;
1440
1441                         LINVRNT(key->lct_init != NULL);
1442                         LINVRNT(key->lct_index == i);
1443
1444                         value = key->lct_init(ctx, key);
1445                         if (unlikely(IS_ERR(value)))
1446                                 return PTR_ERR(value);
1447
1448                         LASSERT(key->lct_owner != NULL);
1449                         if (!(ctx->lc_tags & LCT_NOREF))
1450                                 cfs_try_module_get(key->lct_owner);
1451                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1452                         cfs_atomic_inc(&key->lct_used);
1453                         /*
1454                          * This is the only place in the code, where an
1455                          * element of ctx->lc_value[] array is set to non-NULL
1456                          * value.
1457                          */
1458                         ctx->lc_value[i] = value;
1459                         if (key->lct_exit != NULL)
1460                                 ctx->lc_tags |= LCT_HAS_EXIT;
1461                 }
1462                 ctx->lc_version = key_set_version;
1463         }
1464         return 0;
1465 }
1466
1467 static int keys_init(struct lu_context *ctx)
1468 {
1469         int result;
1470
1471         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1472         if (likely(ctx->lc_value != NULL))
1473                 result = keys_fill(ctx);
1474         else
1475                 result = -ENOMEM;
1476
1477         if (result != 0)
1478                 keys_fini(ctx);
1479         return result;
1480 }
1481
1482 /**
1483  * Initialize context data-structure. Create values for all keys.
1484  */
1485 int lu_context_init(struct lu_context *ctx, __u32 tags)
1486 {
1487         memset(ctx, 0, sizeof *ctx);
1488         ctx->lc_state = LCS_INITIALIZED;
1489         ctx->lc_tags = tags;
1490         if (tags & LCT_REMEMBER) {
1491                 cfs_spin_lock(&lu_keys_guard);
1492                 cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
1493                 cfs_spin_unlock(&lu_keys_guard);
1494         } else
1495                 CFS_INIT_LIST_HEAD(&ctx->lc_remember);
1496         return keys_init(ctx);
1497 }
1498 EXPORT_SYMBOL(lu_context_init);
1499
1500 /**
1501  * Finalize context data-structure. Destroy key values.
1502  */
1503 void lu_context_fini(struct lu_context *ctx)
1504 {
1505         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1506         ctx->lc_state = LCS_FINALIZED;
1507         keys_fini(ctx);
1508         cfs_spin_lock(&lu_keys_guard);
1509         cfs_list_del_init(&ctx->lc_remember);
1510         cfs_spin_unlock(&lu_keys_guard);
1511 }
1512 EXPORT_SYMBOL(lu_context_fini);
1513
1514 /**
1515  * Called before entering context.
1516  */
1517 void lu_context_enter(struct lu_context *ctx)
1518 {
1519         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1520         ctx->lc_state = LCS_ENTERED;
1521 }
1522 EXPORT_SYMBOL(lu_context_enter);
1523
1524 /**
1525  * Called after exiting from \a ctx
1526  */
1527 void lu_context_exit(struct lu_context *ctx)
1528 {
1529         int i;
1530
1531         LINVRNT(ctx->lc_state == LCS_ENTERED);
1532         ctx->lc_state = LCS_LEFT;
1533         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1534                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1535                         if (ctx->lc_value[i] != NULL) {
1536                                 struct lu_context_key *key;
1537
1538                                 key = lu_keys[i];
1539                                 LASSERT(key != NULL);
1540                                 if (key->lct_exit != NULL)
1541                                         key->lct_exit(ctx,
1542                                                       key, ctx->lc_value[i]);
1543                         }
1544                 }
1545         }
1546 }
1547 EXPORT_SYMBOL(lu_context_exit);
1548
1549 /**
1550  * Allocate for context all missing keys that were registered after context
1551  * creation.
1552  */
1553 int lu_context_refill(struct lu_context *ctx)
1554 {
1555         LINVRNT(ctx->lc_value != NULL);
1556         return ctx->lc_version == key_set_version ? 0 : keys_fill(ctx);
1557 }
1558 EXPORT_SYMBOL(lu_context_refill);
1559
1560 int lu_env_init(struct lu_env *env, __u32 tags)
1561 {
1562         int result;
1563
1564         env->le_ses = NULL;
1565         result = lu_context_init(&env->le_ctx, tags);
1566         if (likely(result == 0))
1567                 lu_context_enter(&env->le_ctx);
1568         return result;
1569 }
1570 EXPORT_SYMBOL(lu_env_init);
1571
1572 void lu_env_fini(struct lu_env *env)
1573 {
1574         lu_context_exit(&env->le_ctx);
1575         lu_context_fini(&env->le_ctx);
1576         env->le_ses = NULL;
1577 }
1578 EXPORT_SYMBOL(lu_env_fini);
1579
1580 int lu_env_refill(struct lu_env *env)
1581 {
1582         int result;
1583
1584         result = lu_context_refill(&env->le_ctx);
1585         if (result == 0 && env->le_ses != NULL)
1586                 result = lu_context_refill(env->le_ses);
1587         return result;
1588 }
1589 EXPORT_SYMBOL(lu_env_refill);
1590
1591 static struct cfs_shrinker *lu_site_shrinker = NULL;
1592
1593 typedef struct lu_site_stats{
1594         unsigned        lss_populated;
1595         unsigned        lss_max_search;
1596         unsigned        lss_total;
1597         unsigned        lss_busy;
1598 } lu_site_stats_t;
1599
1600 static void lu_site_stats_get(cfs_hash_t *hs,
1601                               lu_site_stats_t *stats, int populated)
1602 {
1603         cfs_hash_bd_t bd;
1604         int           i;
1605
1606         cfs_hash_for_each_bucket(hs, &bd, i) {
1607                 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1608                 cfs_hlist_head_t        *hhead;
1609
1610                 cfs_hash_bd_lock(hs, &bd, 1);
1611                 stats->lss_busy  += bkt->lsb_busy;
1612                 stats->lss_total += cfs_hash_bd_count_get(&bd);
1613                 stats->lss_max_search = max((int)stats->lss_max_search,
1614                                             cfs_hash_bd_depmax_get(&bd));
1615                 if (!populated) {
1616                         cfs_hash_bd_unlock(hs, &bd, 1);
1617                         continue;
1618                 }
1619
1620                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1621                         if (!cfs_hlist_empty(hhead))
1622                                 stats->lss_populated++;
1623                 }
1624                 cfs_hash_bd_unlock(hs, &bd, 1);
1625         }
1626 }
1627
1628 #ifdef __KERNEL__
1629 static int KERN_SHRINKER(lu_cache_shrink)
1630 {
1631         lu_site_stats_t stats;
1632         struct lu_site *s;
1633         struct lu_site *tmp;
1634         int cached = 0;
1635         int remain = nr_to_scan;
1636         CFS_LIST_HEAD(splice);
1637
1638         if (nr_to_scan != 0) {
1639                 if (!(gfp_mask & __GFP_FS))
1640                         return -1;
1641                 CDEBUG(D_INODE, "Shrink %d objects\n", nr_to_scan);
1642         }
1643
1644         cfs_down(&lu_sites_guard);
1645         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1646                 if (nr_to_scan != 0) {
1647                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1648                         /*
1649                          * Move just shrunk site to the tail of site list to
1650                          * assure shrinking fairness.
1651                          */
1652                         cfs_list_move_tail(&s->ls_linkage, &splice);
1653                 }
1654
1655                 memset(&stats, 0, sizeof(stats));
1656                 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1657                 cached += stats.lss_total - stats.lss_busy;
1658                 if (nr_to_scan && remain <= 0)
1659                         break;
1660         }
1661         cfs_list_splice(&splice, lu_sites.prev);
1662         cfs_up(&lu_sites_guard);
1663
1664         cached = (cached / 100) * sysctl_vfs_cache_pressure;
1665         if (nr_to_scan == 0)
1666                 CDEBUG(D_INODE, "%d objects cached\n", cached);
1667         return cached;
1668 }
1669
1670 /*
1671  * Debugging stuff.
1672  */
1673
1674 /**
1675  * Environment to be used in debugger, contains all tags.
1676  */
1677 struct lu_env lu_debugging_env;
1678
1679 /**
1680  * Debugging printer function using printk().
1681  */
1682 int lu_printk_printer(const struct lu_env *env,
1683                       void *unused, const char *format, ...)
1684 {
1685         va_list args;
1686
1687         va_start(args, format);
1688         vprintk(format, args);
1689         va_end(args);
1690         return 0;
1691 }
1692
1693 void lu_debugging_setup(void)
1694 {
1695         lu_env_init(&lu_debugging_env, ~0);
1696 }
1697
1698 void lu_context_keys_dump(void)
1699 {
1700         int i;
1701
1702         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1703                 struct lu_context_key *key;
1704
1705                 key = lu_keys[i];
1706                 if (key != NULL) {
1707                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
1708                                i, key, key->lct_tags,
1709                                key->lct_init, key->lct_fini, key->lct_exit,
1710                                key->lct_index, cfs_atomic_read(&key->lct_used),
1711                                key->lct_owner ? key->lct_owner->name : "",
1712                                key->lct_owner);
1713                         lu_ref_print(&key->lct_reference);
1714                 }
1715         }
1716 }
1717 EXPORT_SYMBOL(lu_context_keys_dump);
1718 #else  /* !__KERNEL__ */
1719 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1720 {
1721         return 0;
1722 }
1723 #endif /* __KERNEL__ */
1724
1725 int  cl_global_init(void);
1726 void cl_global_fini(void);
1727 int  lu_ref_global_init(void);
1728 void lu_ref_global_fini(void);
1729
1730 int dt_global_init(void);
1731 void dt_global_fini(void);
1732
1733 int llo_global_init(void);
1734 void llo_global_fini(void);
1735
1736 /**
1737  * Initialization of global lu_* data.
1738  */
1739 int lu_global_init(void)
1740 {
1741         int result;
1742
1743         CDEBUG(D_CONSOLE, "Lustre LU module (%p).\n", &lu_keys);
1744
1745         result = lu_ref_global_init();
1746         if (result != 0)
1747                 return result;
1748
1749         LU_CONTEXT_KEY_INIT(&lu_global_key);
1750         result = lu_context_key_register(&lu_global_key);
1751         if (result != 0)
1752                 return result;
1753         /*
1754          * At this level, we don't know what tags are needed, so allocate them
1755          * conservatively. This should not be too bad, because this
1756          * environment is global.
1757          */
1758         cfs_down(&lu_sites_guard);
1759         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1760         cfs_up(&lu_sites_guard);
1761         if (result != 0)
1762                 return result;
1763
1764         /*
1765          * seeks estimation: 3 seeks to read a record from oi, one to read
1766          * inode, one for ea. Unfortunately setting this high value results in
1767          * lu_object/inode cache consuming all the memory.
1768          */
1769         lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink);
1770         if (lu_site_shrinker == NULL)
1771                 return -ENOMEM;
1772
1773         result = lu_time_global_init();
1774         if (result)
1775                 GOTO(out, result);
1776
1777 #ifdef __KERNEL__
1778         result = dt_global_init();
1779         if (result)
1780                 GOTO(out, result);
1781
1782         result = llo_global_init();
1783         if (result)
1784                 GOTO(out, result);
1785 #endif
1786         result = cl_global_init();
1787 out:
1788
1789         return result;
1790 }
1791
1792 /**
1793  * Dual to lu_global_init().
1794  */
1795 void lu_global_fini(void)
1796 {
1797         cl_global_fini();
1798 #ifdef __KERNEL__
1799         llo_global_fini();
1800         dt_global_fini();
1801 #endif
1802         lu_time_global_fini();
1803         if (lu_site_shrinker != NULL) {
1804                 cfs_remove_shrinker(lu_site_shrinker);
1805                 lu_site_shrinker = NULL;
1806         }
1807
1808         lu_context_key_degister(&lu_global_key);
1809
1810         /*
1811          * Tear shrinker environment down _after_ de-registering
1812          * lu_global_key, because the latter has a value in the former.
1813          */
1814         cfs_down(&lu_sites_guard);
1815         lu_env_fini(&lu_shrink_env);
1816         cfs_up(&lu_sites_guard);
1817
1818         lu_ref_global_fini();
1819 }
1820
1821 struct lu_buf LU_BUF_NULL = {
1822         .lb_buf = NULL,
1823         .lb_len = 0
1824 };
1825 EXPORT_SYMBOL(LU_BUF_NULL);
1826
1827 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1828 {
1829 #ifdef LPROCFS
1830         struct lprocfs_counter ret;
1831
1832         lprocfs_stats_collect(stats, idx, &ret);
1833         return (__u32)ret.lc_count;
1834 #else
1835         return 0;
1836 #endif
1837 }
1838
1839 /**
1840  * Output site statistical counters into a buffer. Suitable for
1841  * lprocfs_rd_*()-style functions.
1842  */
1843 int lu_site_stats_print(const struct lu_site *s, char *page, int count)
1844 {
1845         lu_site_stats_t stats;
1846
1847         memset(&stats, 0, sizeof(stats));
1848         lu_site_stats_get(s->ls_obj_hash, &stats, 1);
1849
1850         return snprintf(page, count, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
1851                         stats.lss_busy,
1852                         stats.lss_total,
1853                         stats.lss_populated,
1854                         CFS_HASH_NHLIST(s->ls_obj_hash),
1855                         stats.lss_max_search,
1856                         ls_stats_read(s->ls_stats, LU_SS_CREATED),
1857                         ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
1858                         ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
1859                         ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
1860                         ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
1861                         ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
1862 }
1863 EXPORT_SYMBOL(lu_site_stats_print);
1864
1865 const char *lu_time_names[LU_TIME_NR] = {
1866         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1867         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1868         [LU_TIME_FIND_INSERT] = "find_insert"
1869 };
1870 EXPORT_SYMBOL(lu_time_names);
1871
1872 /**
1873  * Helper function to initialize a number of kmem slab caches at once.
1874  */
1875 int lu_kmem_init(struct lu_kmem_descr *caches)
1876 {
1877         int result;
1878
1879         for (result = 0; caches->ckd_cache != NULL; ++caches) {
1880                 *caches->ckd_cache = cfs_mem_cache_create(caches->ckd_name,
1881                                                           caches->ckd_size,
1882                                                           0, 0);
1883                 if (*caches->ckd_cache == NULL) {
1884                         result = -ENOMEM;
1885                         break;
1886                 }
1887         }
1888         return result;
1889 }
1890 EXPORT_SYMBOL(lu_kmem_init);
1891
1892 /**
1893  * Helper function to finalize a number of kmem slab cached at once. Dual to
1894  * lu_kmem_init().
1895  */
1896 void lu_kmem_fini(struct lu_kmem_descr *caches)
1897 {
1898         int rc;
1899
1900         for (; caches->ckd_cache != NULL; ++caches) {
1901                 if (*caches->ckd_cache != NULL) {
1902                         rc = cfs_mem_cache_destroy(*caches->ckd_cache);
1903                         LASSERTF(rc == 0, "couldn't destroy %s slab\n",
1904                                  caches->ckd_name);
1905                         *caches->ckd_cache = NULL;
1906                 }
1907         }
1908 }
1909 EXPORT_SYMBOL(lu_kmem_fini);