Whamcloud - gitweb
On a server, a file system object is uniquely identified by a fid, which is
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/obdclass/lu_object.c
37  *
38  * Lustre Object.
39  * These are the only exported functions, they provide some generic
40  * infrastructure for managing object devices
41  *
42  *   Author: Nikita Danilov <nikita.danilov@sun.com>
43  */
44
45 #define DEBUG_SUBSYSTEM S_CLASS
46 #ifndef EXPORT_SYMTAB
47 # define EXPORT_SYMTAB
48 #endif
49
50 #include <libcfs/libcfs.h>
51
52 #ifdef __KERNEL__
53 # include <linux/module.h>
54 #endif
55
56 /* hash_long() */
57 #include <libcfs/libcfs_hash.h>
58 #include <obd_class.h>
59 #include <obd_support.h>
60 #include <lustre_disk.h>
61 #include <lustre_fid.h>
62 #include <lu_object.h>
63 #include <libcfs/list.h>
64 /* lu_time_global_{init,fini}() */
65 #include <lu_time.h>
66
67 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
68
69 /**
70  * Decrease reference counter on object. If last reference is freed, return
71  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
72  * case, free object immediately.
73  */
74 void lu_object_put(const struct lu_env *env, struct lu_object *o)
75 {
76         struct lu_object_header *top;
77         struct lu_site          *site;
78         struct lu_object        *orig;
79         int                      kill_it;
80
81         top = o->lo_header;
82         site = o->lo_dev->ld_site;
83         orig = o;
84         kill_it = 0;
85         write_lock(&site->ls_guard);
86         if (atomic_dec_and_test(&top->loh_ref)) {
87                 /*
88                  * When last reference is released, iterate over object
89                  * layers, and notify them that object is no longer busy.
90                  */
91                 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
92                         if (o->lo_ops->loo_object_release != NULL)
93                                 o->lo_ops->loo_object_release(env, o);
94                 }
95                 -- site->ls_busy;
96                 if (lu_object_is_dying(top)) {
97                         /*
98                          * If object is dying (will not be cached), removed it
99                          * from hash table and LRU.
100                          *
101                          * This is done with hash table and LRU lists
102                          * locked. As the only way to acquire first reference
103                          * to previously unreferenced object is through
104                          * hash-table lookup (lu_object_find()), or LRU
105                          * scanning (lu_site_purge()), that are done under
106                          * hash-table and LRU lock, no race with concurrent
107                          * object lookup is possible and we can safely destroy
108                          * object below.
109                          */
110                         hlist_del_init(&top->loh_hash);
111                         list_del_init(&top->loh_lru);
112                         -- site->ls_total;
113                         kill_it = 1;
114                 }
115         }
116         write_unlock(&site->ls_guard);
117         if (kill_it)
118                 /*
119                  * Object was already removed from hash and lru above, can
120                  * kill it.
121                  */
122                 lu_object_free(env, orig);
123 }
124 EXPORT_SYMBOL(lu_object_put);
125
126 /**
127  * Allocate new object.
128  *
129  * This follows object creation protocol, described in the comment within
130  * struct lu_device_operations definition.
131  */
132 static struct lu_object *lu_object_alloc(const struct lu_env *env,
133                                          struct lu_device *dev,
134                                          const struct lu_fid *f,
135                                          const struct lu_object_conf *conf)
136 {
137         struct lu_object *scan;
138         struct lu_object *top;
139         struct list_head *layers;
140         int clean;
141         int result;
142         ENTRY;
143
144         /*
145          * Create top-level object slice. This will also create
146          * lu_object_header.
147          */
148         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
149         if (top == NULL)
150                 RETURN(ERR_PTR(-ENOMEM));
151         /*
152          * This is the only place where object fid is assigned. It's constant
153          * after this point.
154          */
155         LASSERT(fid_is_igif(f) || fid_ver(f) == 0);
156         top->lo_header->loh_fid  = *f;
157         layers = &top->lo_header->loh_layers;
158         do {
159                 /*
160                  * Call ->loo_object_init() repeatedly, until no more new
161                  * object slices are created.
162                  */
163                 clean = 1;
164                 list_for_each_entry(scan, layers, lo_linkage) {
165                         if (scan->lo_flags & LU_OBJECT_ALLOCATED)
166                                 continue;
167                         clean = 0;
168                         scan->lo_header = top->lo_header;
169                         result = scan->lo_ops->loo_object_init(env, scan, conf);
170                         if (result != 0) {
171                                 lu_object_free(env, top);
172                                 RETURN(ERR_PTR(result));
173                         }
174                         scan->lo_flags |= LU_OBJECT_ALLOCATED;
175                 }
176         } while (!clean);
177
178         list_for_each_entry_reverse(scan, layers, lo_linkage) {
179                 if (scan->lo_ops->loo_object_start != NULL) {
180                         result = scan->lo_ops->loo_object_start(env, scan);
181                         if (result != 0) {
182                                 lu_object_free(env, top);
183                                 RETURN(ERR_PTR(result));
184                         }
185                 }
186         }
187
188         dev->ld_site->ls_stats.s_created ++;
189         RETURN(top);
190 }
191
192 /**
193  * Free an object.
194  */
195 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
196 {
197         struct list_head splice;
198         struct lu_object *scan;
199         struct lu_site          *site;
200         struct list_head        *layers;
201
202         site   = o->lo_dev->ld_site;
203         layers = &o->lo_header->loh_layers;
204         /*
205          * First call ->loo_object_delete() method to release all resources.
206          */
207         list_for_each_entry_reverse(scan, layers, lo_linkage) {
208                 if (scan->lo_ops->loo_object_delete != NULL)
209                         scan->lo_ops->loo_object_delete(env, scan);
210         }
211
212         /*
213          * Then, splice object layers into stand-alone list, and call
214          * ->loo_object_free() on all layers to free memory. Splice is
215          * necessary, because lu_object_header is freed together with the
216          * top-level slice.
217          */
218         CFS_INIT_LIST_HEAD(&splice);
219         list_splice_init(layers, &splice);
220         while (!list_empty(&splice)) {
221                 /*
222                  * Free layers in bottom-to-top order, so that object header
223                  * lives as long as possible and ->loo_object_free() methods
224                  * can look at its contents.
225                  */
226                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
227                 list_del_init(&o->lo_linkage);
228                 LASSERT(o->lo_ops->loo_object_free != NULL);
229                 o->lo_ops->loo_object_free(env, o);
230         }
231         cfs_waitq_broadcast(&site->ls_marche_funebre);
232 }
233
234 /**
235  * Free \a nr objects from the cold end of the site LRU list.
236  */
237 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
238 {
239         struct list_head         dispose;
240         struct lu_object_header *h;
241         struct lu_object_header *temp;
242
243         CFS_INIT_LIST_HEAD(&dispose);
244         /*
245          * Under LRU list lock, scan LRU list and move unreferenced objects to
246          * the dispose list, removing them from LRU and hash table.
247          */
248         write_lock(&s->ls_guard);
249         list_for_each_entry_safe(h, temp, &s->ls_lru, loh_lru) {
250                 /*
251                  * Objects are sorted in lru order, and "busy" objects (ones
252                  * with h->loh_ref > 0) naturally tend to live near hot end
253                  * that we scan last. Unfortunately, sites usually have small
254                  * (less then ten) number of busy yet rarely accessed objects
255                  * (some global objects, accessed directly through pointers,
256                  * bypassing hash table). Currently algorithm scans them over
257                  * and over again. Probably we should move busy objects out of
258                  * LRU, or we can live with that.
259                  */
260                 if (nr-- == 0)
261                         break;
262                 if (atomic_read(&h->loh_ref) > 0)
263                         continue;
264                 hlist_del_init(&h->loh_hash);
265                 list_move(&h->loh_lru, &dispose);
266                 s->ls_total --;
267         }
268         write_unlock(&s->ls_guard);
269         /*
270          * Free everything on the dispose list. This is safe against races due
271          * to the reasons described in lu_object_put().
272          */
273         while (!list_empty(&dispose)) {
274                 h = container_of0(dispose.next,
275                                  struct lu_object_header, loh_lru);
276                 list_del_init(&h->loh_lru);
277                 lu_object_free(env, lu_object_top(h));
278                 s->ls_stats.s_lru_purged ++;
279         }
280         return nr;
281 }
282 EXPORT_SYMBOL(lu_site_purge);
283
284 /*
285  * Object printing.
286  *
287  * Code below has to jump through certain loops to output object description
288  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
289  * composes object description from strings that are parts of _lines_ of
290  * output (i.e., strings that are not terminated by newline). This doesn't fit
291  * very well into libcfs_debug_msg() interface that assumes that each message
292  * supplied to it is a self-contained output line.
293  *
294  * To work around this, strings are collected in a temporary buffer
295  * (implemented as a value of lu_cdebug_key key), until terminating newline
296  * character is detected.
297  *
298  */
299
300 enum {
301         /**
302          * Maximal line size.
303          *
304          * XXX overflow is not handled correctly.
305          */
306         LU_CDEBUG_LINE = 256
307 };
308
309 struct lu_cdebug_data {
310         /**
311          * Temporary buffer.
312          */
313         char lck_area[LU_CDEBUG_LINE];
314 };
315
316 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
317 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
318
319 /**
320  * Key, holding temporary buffer. This key is registered very early by
321  * lu_global_init().
322  */
323 struct lu_context_key lu_global_key = {
324         .lct_tags = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD,
325         .lct_init = lu_global_key_init,
326         .lct_fini = lu_global_key_fini
327 };
328
329 /**
330  * Printer function emitting messages through libcfs_debug_msg().
331  */
332 int lu_cdebug_printer(const struct lu_env *env,
333                       void *cookie, const char *format, ...)
334 {
335         struct lu_cdebug_print_info *info = cookie;
336         struct lu_cdebug_data       *key;
337         int used;
338         int complete;
339         va_list args;
340
341         va_start(args, format);
342
343         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
344         LASSERT(key != NULL);
345
346         used = strlen(key->lck_area);
347         complete = format[strlen(format) - 1] == '\n';
348         /*
349          * Append new chunk to the buffer.
350          */
351         vsnprintf(key->lck_area + used,
352                   ARRAY_SIZE(key->lck_area) - used, format, args);
353         if (complete) {
354                 libcfs_debug_msg(NULL, info->lpi_subsys, info->lpi_mask,
355                                  (char *)info->lpi_file, info->lpi_fn,
356                                  info->lpi_line, "%s", key->lck_area);
357                 key->lck_area[0] = 0;
358         }
359         va_end(args);
360         return 0;
361 }
362 EXPORT_SYMBOL(lu_cdebug_printer);
363
364 /*
365  * Print object header.
366  */
367 static void lu_object_header_print(const struct lu_env *env,
368                                    void *cookie, lu_printer_t printer,
369                                    const struct lu_object_header *hdr)
370 {
371         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
372                    hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
373                    PFID(&hdr->loh_fid),
374                    hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
375                    list_empty(&hdr->loh_lru) ? "" : " lru",
376                    hdr->loh_attr & LOHA_EXISTS ? " exist":"");
377 }
378
379 /*
380  * Print human readable representation of the @o to the @printer.
381  */
382 void lu_object_print(const struct lu_env *env, void *cookie,
383                      lu_printer_t printer, const struct lu_object *o)
384 {
385         static const char ruler[] = "........................................";
386         struct lu_object_header *top;
387         int depth;
388
389         top = o->lo_header;
390         lu_object_header_print(env, cookie, printer, top);
391         (*printer)(env, cookie, "\n");
392         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
393                 depth = o->lo_depth + 4;
394                 LASSERT(o->lo_ops->loo_object_print != NULL);
395                 /*
396                  * print `.' @depth times.
397                  */
398                 (*printer)(env, cookie, "%*.*s", depth, depth, ruler);
399                 o->lo_ops->loo_object_print(env, cookie, printer, o);
400                 (*printer)(env, cookie, "\n");
401         }
402 }
403 EXPORT_SYMBOL(lu_object_print);
404
405 /*
406  * Check object consistency.
407  */
408 int lu_object_invariant(const struct lu_object *o)
409 {
410         struct lu_object_header *top;
411
412         top = o->lo_header;
413         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
414                 if (o->lo_ops->loo_object_invariant != NULL &&
415                     !o->lo_ops->loo_object_invariant(o))
416                         return 0;
417         }
418         return 1;
419 }
420 EXPORT_SYMBOL(lu_object_invariant);
421
422 static struct lu_object *htable_lookup(struct lu_site *s,
423                                        const struct hlist_head *bucket,
424                                        const struct lu_fid *f)
425 {
426         struct lu_object_header *h;
427         struct hlist_node *scan;
428
429         hlist_for_each_entry(h, scan, bucket, loh_hash) {
430                 s->ls_stats.s_cache_check ++;
431                 if (likely(lu_fid_eq(&h->loh_fid, f) &&
432                            !lu_object_is_dying(h))) {
433                         /* bump reference count... */
434                         if (atomic_add_return(1, &h->loh_ref) == 1)
435                                 ++ s->ls_busy;
436                         /* and move to the head of the LRU */
437                         /*
438                          * XXX temporary disable this to measure effects of
439                          * read-write locking.
440                          */
441                         /* list_move_tail(&h->loh_lru, &s->ls_lru); */
442                         s->ls_stats.s_cache_hit ++;
443                         return lu_object_top(h);
444                 }
445         }
446         s->ls_stats.s_cache_miss ++;
447         return NULL;
448 }
449
450 static __u32 fid_hash(const struct lu_fid *f, int bits)
451 {
452         /* all objects with same id and different versions will belong to same
453          * collisions list. */
454         return hash_long(fid_flatten(f), bits);
455 }
456
457 /*
458  * Search cache for an object with the fid @f. If such object is found, return
459  * it. Otherwise, create new object, insert it into cache and return it. In
460  * any case, additional reference is acquired on the returned object.
461  */
462 struct lu_object *lu_object_find(const struct lu_env *env,
463                                  struct lu_site *s, const struct lu_fid *f)
464 {
465         struct lu_object     *o;
466         struct lu_object     *shadow;
467         struct hlist_head *bucket;
468
469         /*
470          * This uses standard index maintenance protocol:
471          *
472          *     - search index under lock, and return object if found;
473          *     - otherwise, unlock index, allocate new object;
474          *     - lock index and search again;
475          *     - if nothing is found (usual case), insert newly created
476          *       object into index;
477          *     - otherwise (race: other thread inserted object), free
478          *       object just allocated.
479          *     - unlock index;
480          *     - return object.
481          */
482
483         bucket = s->ls_hash + fid_hash(f, s->ls_hash_bits);
484
485         read_lock(&s->ls_guard);
486         o = htable_lookup(s, bucket, f);
487         read_unlock(&s->ls_guard);
488
489         if (o != NULL)
490                 return o;
491
492         /*
493          * Allocate new object. This may result in rather complicated
494          * operations, including fld queries, inode loading, etc.
495          */
496         o = lu_object_alloc(env, s, f);
497         if (unlikely(IS_ERR(o)))
498                 return o;
499
500         LASSERT(lu_fid_eq(lu_object_fid(o), f));
501
502         write_lock(&s->ls_guard);
503         shadow = htable_lookup(s, bucket, f);
504         if (likely(shadow == NULL)) {
505                 hlist_add_head(&o->lo_header->loh_hash, bucket);
506                 list_add_tail(&o->lo_header->loh_lru, &s->ls_lru);
507                 ++ s->ls_busy;
508                 ++ s->ls_total;
509                 shadow = o;
510                 o = NULL;
511         } else
512                 s->ls_stats.s_cache_race ++;
513         write_unlock(&s->ls_guard);
514         if (o != NULL)
515                 lu_object_free(env, o);
516         return shadow;
517 }
518 EXPORT_SYMBOL(lu_object_find);
519
520 /*
521  * Global list of all sites on this node
522  */
523 static CFS_LIST_HEAD(lu_sites);
524 static DECLARE_MUTEX(lu_sites_guard);
525
526 /*
527  * Global environment used by site shrinker.
528  */
529 static struct lu_env lu_shrink_env;
530
531 /*
532  * Print all objects in @s.
533  */
534 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
535                    lu_printer_t printer)
536 {
537         int i;
538
539         for (i = 0; i < s->ls_hash_size; ++i) {
540                 struct lu_object_header *h;
541                 struct hlist_node       *scan;
542
543                 read_lock(&s->ls_guard);
544                 hlist_for_each_entry(h, scan, &s->ls_hash[i], loh_hash) {
545
546                         if (!list_empty(&h->loh_layers)) {
547                                 const struct lu_object *obj;
548
549                                 obj = lu_object_top(h);
550                                 lu_object_print(env, cookie, printer, obj);
551                         } else
552                                 lu_object_header_print(env, cookie, printer, h);
553                 }
554                 read_unlock(&s->ls_guard);
555         }
556 }
557 EXPORT_SYMBOL(lu_site_print);
558
559 enum {
560         LU_CACHE_PERCENT   = 20,
561 };
562
563 /*
564  * Return desired hash table order.
565  */
566 static int lu_htable_order(void)
567 {
568         unsigned long cache_size;
569         int bits;
570
571         /*
572          * Calculate hash table size, assuming that we want reasonable
573          * performance when 20% of total memory is occupied by cache of
574          * lu_objects.
575          *
576          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
577          */
578         cache_size = num_physpages;
579
580 #if BITS_PER_LONG == 32
581         /* limit hashtable size for lowmem systems to low RAM */
582         if (cache_size > 1 << (30 - CFS_PAGE_SHIFT))
583                 cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4;
584 #endif
585
586         cache_size = cache_size / 100 * LU_CACHE_PERCENT *
587                 (CFS_PAGE_SIZE / 1024);
588
589         for (bits = 1; (1 << bits) < cache_size; ++bits) {
590                 ;
591         }
592         return bits;
593 }
594
595 /*
596  * Initialize site @s, with @d as the top level device.
597  */
598 int lu_site_init(struct lu_site *s, struct lu_device *top)
599 {
600         int bits;
601         int size;
602         int i;
603         ENTRY;
604
605         memset(s, 0, sizeof *s);
606         rwlock_init(&s->ls_guard);
607         CFS_INIT_LIST_HEAD(&s->ls_lru);
608         CFS_INIT_LIST_HEAD(&s->ls_linkage);
609         s->ls_top_dev = top;
610         top->ld_site = s;
611         lu_device_get(top);
612
613         for (bits = lu_htable_order(), size = 1 << bits;
614              (s->ls_hash =
615               cfs_alloc_large(size * sizeof s->ls_hash[0])) == NULL;
616              --bits, size >>= 1) {
617                 /*
618                  * Scale hash table down, until allocation succeeds.
619                  */
620                 ;
621         }
622
623         s->ls_hash_size = size;
624         s->ls_hash_bits = bits;
625         s->ls_hash_mask = size - 1;
626
627         for (i = 0; i < size; i++)
628                 INIT_HLIST_HEAD(&s->ls_hash[i]);
629
630         RETURN(0);
631 }
632 EXPORT_SYMBOL(lu_site_init);
633
634 /*
635  * Finalize @s and release its resources.
636  */
637 void lu_site_fini(struct lu_site *s)
638 {
639         LASSERT(list_empty(&s->ls_lru));
640         LASSERT(s->ls_total == 0);
641
642         down(&lu_sites_guard);
643         list_del_init(&s->ls_linkage);
644         up(&lu_sites_guard);
645
646         if (s->ls_hash != NULL) {
647                 int i;
648                 for (i = 0; i < s->ls_hash_size; i++)
649                         LASSERT(hlist_empty(&s->ls_hash[i]));
650                 cfs_free_large(s->ls_hash);
651                 s->ls_hash = NULL;
652         }
653         if (s->ls_top_dev != NULL) {
654                 s->ls_top_dev->ld_site = NULL;
655                 lu_device_put(s->ls_top_dev);
656                 s->ls_top_dev = NULL;
657         }
658 }
659 EXPORT_SYMBOL(lu_site_fini);
660
661 /*
662  * Called when initialization of stack for this site is completed.
663  */
664 int lu_site_init_finish(struct lu_site *s)
665 {
666         int result;
667         down(&lu_sites_guard);
668         result = lu_context_refill(&lu_shrink_env.le_ctx);
669         if (result == 0)
670                 list_add(&s->ls_linkage, &lu_sites);
671         up(&lu_sites_guard);
672         return result;
673 }
674 EXPORT_SYMBOL(lu_site_init_finish);
675
676 /*
677  * Acquire additional reference on device @d
678  */
679 void lu_device_get(struct lu_device *d)
680 {
681         atomic_inc(&d->ld_ref);
682 }
683 EXPORT_SYMBOL(lu_device_get);
684
685 /*
686  * Release reference on device @d.
687  */
688 void lu_device_put(struct lu_device *d)
689 {
690         atomic_dec(&d->ld_ref);
691 }
692 EXPORT_SYMBOL(lu_device_put);
693
694 /*
695  * Initialize device @d of type @t.
696  */
697 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
698 {
699         memset(d, 0, sizeof *d);
700         atomic_set(&d->ld_ref, 0);
701         d->ld_type = t;
702         return 0;
703 }
704 EXPORT_SYMBOL(lu_device_init);
705
706 /*
707  * Finalize device @d.
708  */
709 void lu_device_fini(struct lu_device *d)
710 {
711         if (d->ld_obd != NULL)
712                 /* finish lprocfs */
713                 lprocfs_obd_cleanup(d->ld_obd);
714
715         LASSERTF(atomic_read(&d->ld_ref) == 0,
716                  "Refcount is %u\n", atomic_read(&d->ld_ref));
717 }
718 EXPORT_SYMBOL(lu_device_fini);
719
720 /*
721  * Initialize object @o that is part of compound object @h and was created by
722  * device @d.
723  */
724 int lu_object_init(struct lu_object *o,
725                    struct lu_object_header *h, struct lu_device *d)
726 {
727         memset(o, 0, sizeof *o);
728         o->lo_header = h;
729         o->lo_dev    = d;
730         lu_device_get(d);
731         CFS_INIT_LIST_HEAD(&o->lo_linkage);
732         return 0;
733 }
734 EXPORT_SYMBOL(lu_object_init);
735
736 /*
737  * Finalize object and release its resources.
738  */
739 void lu_object_fini(struct lu_object *o)
740 {
741         LASSERT(list_empty(&o->lo_linkage));
742
743         if (o->lo_dev != NULL) {
744                 lu_device_put(o->lo_dev);
745                 o->lo_dev = NULL;
746         }
747 }
748 EXPORT_SYMBOL(lu_object_fini);
749
750 /*
751  * Add object @o as first layer of compound object @h
752  *
753  * This is typically called by the ->ldo_object_alloc() method of top-level
754  * device.
755  */
756 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
757 {
758         list_move(&o->lo_linkage, &h->loh_layers);
759 }
760 EXPORT_SYMBOL(lu_object_add_top);
761
762 /**
763  * Add object \a o as a layer of compound object, going after \a before.
764  *
765  * This is typically called by the ->ldo_object_alloc() method of \a
766  * before->lo_dev.
767  */
768 void lu_object_add(struct lu_object *before, struct lu_object *o)
769 {
770         list_move(&o->lo_linkage, &before->lo_linkage);
771 }
772 EXPORT_SYMBOL(lu_object_add);
773
774 /**
775  * Initialize compound object.
776  */
777 int lu_object_header_init(struct lu_object_header *h)
778 {
779         memset(h, 0, sizeof *h);
780         atomic_set(&h->loh_ref, 1);
781         INIT_HLIST_NODE(&h->loh_hash);
782         CFS_INIT_LIST_HEAD(&h->loh_lru);
783         CFS_INIT_LIST_HEAD(&h->loh_layers);
784         return 0;
785 }
786 EXPORT_SYMBOL(lu_object_header_init);
787
788 /*
789  * Finalize compound object.
790  */
791 void lu_object_header_fini(struct lu_object_header *h)
792 {
793         LASSERT(list_empty(&h->loh_layers));
794         LASSERT(list_empty(&h->loh_lru));
795         LASSERT(hlist_unhashed(&h->loh_hash));
796 }
797 EXPORT_SYMBOL(lu_object_header_fini);
798
799 /*
800  * Given a compound object, find its slice, corresponding to the device type
801  * @dtype.
802  */
803 struct lu_object *lu_object_locate(struct lu_object_header *h,
804                                    struct lu_device_type *dtype)
805 {
806         struct lu_object *o;
807
808         list_for_each_entry(o, &h->loh_layers, lo_linkage) {
809                 if (o->lo_dev->ld_type == dtype)
810                         return o;
811         }
812         return NULL;
813 }
814 EXPORT_SYMBOL(lu_object_locate);
815
816
817
818 /*
819  * Finalize and free devices in the device stack.
820  * 
821  * Finalize device stack by purging object cache, and calling
822  * lu_device_type_operations::ldto_device_fini() and
823  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
824  */
825 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
826 {
827         struct lu_site   *site = top->ld_site;
828         struct lu_device *scan;
829         struct lu_device *next;
830
831         lu_site_purge(env, site, ~0);
832         for (scan = top; scan != NULL; scan = next) {
833                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
834                 lu_device_put(scan);
835         }
836
837         /* purge again. */
838         lu_site_purge(env, site, ~0);
839
840         if (!list_empty(&site->ls_lru) || site->ls_total != 0) {
841                 /*
842                  * Uh-oh, objects still exist.
843                  */
844                 static DECLARE_LU_CDEBUG_PRINT_INFO(cookie, D_ERROR);
845
846                 lu_site_print(env, site, &cookie, lu_cdebug_printer);
847         }
848
849         for (scan = top; scan != NULL; scan = next) {
850                 const struct lu_device_type *ldt = scan->ld_type;
851                 struct obd_type             *type;
852
853                 next = ldt->ldt_ops->ldto_device_free(env, scan);
854                 type = ldt->ldt_obd_type;
855                 type->typ_refcnt--;
856                 class_put_type(type);
857         }
858 }
859 EXPORT_SYMBOL(lu_stack_fini);
860
861 enum {
862         /*
863          * Maximal number of tld slots.
864          */
865         LU_CONTEXT_KEY_NR = 16
866 };
867
868 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
869
870 static spinlock_t lu_keys_guard = SPIN_LOCK_UNLOCKED;
871
872 /*
873  * Register new key.
874  */
875 int lu_context_key_register(struct lu_context_key *key)
876 {
877         int result;
878         int i;
879
880         LASSERT(key->lct_init != NULL);
881         LASSERT(key->lct_fini != NULL);
882         LASSERT(key->lct_tags != 0);
883         LASSERT(key->lct_owner != NULL);
884
885         result = -ENFILE;
886         spin_lock(&lu_keys_guard);
887         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
888                 if (lu_keys[i] == NULL) {
889                         key->lct_index = i;
890                         atomic_set(&key->lct_used, 1);
891                         lu_keys[i] = key;
892                         result = 0;
893                         break;
894                 }
895         }
896         spin_unlock(&lu_keys_guard);
897         return result;
898 }
899 EXPORT_SYMBOL(lu_context_key_register);
900
901 static void key_fini(struct lu_context *ctx, int index)
902 {
903         if (ctx->lc_value[index] != NULL) {
904                 struct lu_context_key *key;
905
906                 key = lu_keys[index];
907                 LASSERT(key != NULL);
908                 LASSERT(key->lct_fini != NULL);
909                 LASSERT(atomic_read(&key->lct_used) > 1);
910
911                 key->lct_fini(ctx, key, ctx->lc_value[index]);
912                 atomic_dec(&key->lct_used);
913                 LASSERT(key->lct_owner != NULL);
914                 if (!(ctx->lc_tags & LCT_NOREF)) {
915                         LASSERT(module_refcount(key->lct_owner) > 0);
916                         module_put(key->lct_owner);
917                 }
918                 ctx->lc_value[index] = NULL;
919         }
920 }
921
922 /*
923  * Deregister key.
924  */
925 void lu_context_key_degister(struct lu_context_key *key)
926 {
927         LASSERT(atomic_read(&key->lct_used) >= 1);
928         LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
929
930         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
931
932         if (atomic_read(&key->lct_used) > 1)
933                 CERROR("key has instances.\n");
934         spin_lock(&lu_keys_guard);
935         lu_keys[key->lct_index] = NULL;
936         spin_unlock(&lu_keys_guard);
937 }
938 EXPORT_SYMBOL(lu_context_key_degister);
939
940 /*
941  * Return value associated with key @key in context @ctx.
942  */
943 void *lu_context_key_get(const struct lu_context *ctx,
944                          struct lu_context_key *key)
945 {
946         LASSERT(ctx->lc_state == LCS_ENTERED);
947         LASSERT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
948         return ctx->lc_value[key->lct_index];
949 }
950 EXPORT_SYMBOL(lu_context_key_get);
951
952 static void keys_fini(struct lu_context *ctx)
953 {
954         int i;
955
956         if (ctx->lc_value != NULL) {
957                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
958                         key_fini(ctx, i);
959                 OBD_FREE(ctx->lc_value,
960                          ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
961                 ctx->lc_value = NULL;
962         }
963 }
964
965 static int keys_fill(const struct lu_context *ctx)
966 {
967         int i;
968
969         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
970                 struct lu_context_key *key;
971
972                 key = lu_keys[i];
973                 if (ctx->lc_value[i] == NULL &&
974                     key != NULL && key->lct_tags & ctx->lc_tags) {
975                         void *value;
976
977                         LASSERT(key->lct_init != NULL);
978                         LASSERT(key->lct_index == i);
979
980                         value = key->lct_init(ctx, key);
981                         if (unlikely(IS_ERR(value)))
982                                 return PTR_ERR(value);
983                         LASSERT(key->lct_owner != NULL);
984                         if (!(ctx->lc_tags & LCT_NOREF))
985                                 try_module_get(key->lct_owner);
986                         atomic_inc(&key->lct_used);
987                         ctx->lc_value[i] = value;
988                 }
989         }
990         return 0;
991 }
992
993 static int keys_init(struct lu_context *ctx)
994 {
995         int result;
996
997         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
998         if (likely(ctx->lc_value != NULL))
999                 result = keys_fill(ctx);
1000         else
1001                 result = -ENOMEM;
1002
1003         if (result != 0)
1004                 keys_fini(ctx);
1005         return result;
1006 }
1007
1008 /*
1009  * Initialize context data-structure. Create values for all keys.
1010  */
1011 int lu_context_init(struct lu_context *ctx, __u32 tags)
1012 {
1013         memset(ctx, 0, sizeof *ctx);
1014         ctx->lc_state = LCS_INITIALIZED;
1015         ctx->lc_tags = tags;
1016         return keys_init(ctx);
1017 }
1018 EXPORT_SYMBOL(lu_context_init);
1019
1020 /*
1021  * Finalize context data-structure. Destroy key values.
1022  */
1023 void lu_context_fini(struct lu_context *ctx)
1024 {
1025         LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1026         ctx->lc_state = LCS_FINALIZED;
1027         keys_fini(ctx);
1028 }
1029 EXPORT_SYMBOL(lu_context_fini);
1030
1031 /*
1032  * Called before entering context.
1033  */
1034 void lu_context_enter(struct lu_context *ctx)
1035 {
1036         LASSERT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1037         ctx->lc_state = LCS_ENTERED;
1038 }
1039 EXPORT_SYMBOL(lu_context_enter);
1040
1041 /*
1042  * Called after exiting from @ctx
1043  */
1044 void lu_context_exit(struct lu_context *ctx)
1045 {
1046         int i;
1047
1048         LASSERT(ctx->lc_state == LCS_ENTERED);
1049         ctx->lc_state = LCS_LEFT;
1050         if (ctx->lc_value != NULL) {
1051                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1052                         if (ctx->lc_value[i] != NULL) {
1053                                 struct lu_context_key *key;
1054
1055                                 key = lu_keys[i];
1056                                 LASSERT(key != NULL);
1057                                 if (key->lct_exit != NULL)
1058                                         key->lct_exit(ctx,
1059                                                       key, ctx->lc_value[i]);
1060                         }
1061                 }
1062         }
1063 }
1064 EXPORT_SYMBOL(lu_context_exit);
1065
1066 /*
1067  * Allocate for context all missing keys that were registered after context
1068  * creation.
1069  */
1070 int lu_context_refill(const struct lu_context *ctx)
1071 {
1072         LASSERT(ctx->lc_value != NULL);
1073         return keys_fill(ctx);
1074 }
1075 EXPORT_SYMBOL(lu_context_refill);
1076
1077 static int lu_env_setup(struct lu_env *env, struct lu_context *ses,
1078                         __u32 tags, int noref)
1079 {
1080         int result;
1081
1082         LASSERT(ergo(!noref, !(tags & LCT_NOREF)));
1083
1084         env->le_ses = ses;
1085         result = lu_context_init(&env->le_ctx, tags);
1086         if (likely(result == 0))
1087                 lu_context_enter(&env->le_ctx);
1088         return result;
1089 }
1090
1091 static int lu_env_init_noref(struct lu_env *env, struct lu_context *ses,
1092                              __u32 tags)
1093 {
1094         return lu_env_setup(env, ses, tags, 1);
1095 }
1096
1097 int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags)
1098 {
1099         return lu_env_setup(env, ses, tags, 0);
1100 }
1101 EXPORT_SYMBOL(lu_env_init);
1102
1103 void lu_env_fini(struct lu_env *env)
1104 {
1105         lu_context_exit(&env->le_ctx);
1106         lu_context_fini(&env->le_ctx);
1107         env->le_ses = NULL;
1108 }
1109 EXPORT_SYMBOL(lu_env_fini);
1110
1111 static int lu_cache_shrink(int nr, unsigned int gfp_mask)
1112 {
1113         struct lu_site *s;
1114         struct lu_site *tmp;
1115         int cached = 0;
1116         int remain = nr;
1117         CFS_LIST_HEAD(splice);
1118
1119         if (nr != 0 && !(gfp_mask & __GFP_FS))
1120                 return -1;
1121
1122         down(&lu_sites_guard);
1123         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1124                 if (nr != 0) {
1125                         remain = lu_site_purge(&lu_shrink_env, s, remain);
1126                         /*
1127                          * Move just shrunk site to the tail of site list to
1128                          * assure shrinking fairness.
1129                          */
1130                         list_move_tail(&s->ls_linkage, &splice);
1131                 }
1132                 read_lock(&s->ls_guard);
1133                 cached += s->ls_total - s->ls_busy;
1134                 read_unlock(&s->ls_guard);
1135                 if (remain <= 0)
1136                         break;
1137         }
1138         list_splice(&splice, lu_sites.prev);
1139         up(&lu_sites_guard);
1140         return cached;
1141 }
1142
1143 static struct shrinker *lu_site_shrinker = NULL;
1144
1145 /*
1146  * Initialization of global lu_* data.
1147  */
1148 int lu_global_init(void)
1149 {
1150         int result;
1151
1152         LU_CONTEXT_KEY_INIT(&lu_global_key);
1153         result = lu_context_key_register(&lu_global_key);
1154         if (result == 0) {
1155                 /*
1156                  * At this level, we don't know what tags are needed, so
1157                  * allocate them conservatively. This should not be too bad,
1158                  * because this environment is global.
1159                  */
1160                 down(&lu_sites_guard);
1161                 result = lu_env_init_noref(&lu_shrink_env, NULL, LCT_SHRINKER);
1162                 up(&lu_sites_guard);
1163                 if (result == 0) {
1164                         /*
1165                          * seeks estimation: 3 seeks to read a record from oi,
1166                          * one to read inode, one for ea. Unfortunately
1167                          * setting this high value results in lu_object/inode
1168                          * cache consuming all the memory.
1169                          */
1170                         lu_site_shrinker = set_shrinker(DEFAULT_SEEKS,
1171                                                         lu_cache_shrink);
1172                         if (result == 0)
1173                                 result = lu_time_global_init();
1174                 }
1175         }
1176         return result;
1177 }
1178
1179 /*
1180  * Dual to lu_global_init().
1181  */
1182 void lu_global_fini(void)
1183 {
1184         lu_time_global_fini();
1185         if (lu_site_shrinker != NULL) {
1186                 remove_shrinker(lu_site_shrinker);
1187                 lu_site_shrinker = NULL;
1188         }
1189
1190         lu_context_key_degister(&lu_global_key);
1191
1192         /*
1193          * Tear shrinker environment down _after_ de-registering
1194          * lu_global_key, because the latter has a value in the former.
1195          */
1196         down(&lu_sites_guard);
1197         lu_env_fini(&lu_shrink_env);
1198         up(&lu_sites_guard);
1199 }
1200
1201 struct lu_buf LU_BUF_NULL = {
1202         .lb_buf = NULL,
1203         .lb_len = 0
1204 };
1205 EXPORT_SYMBOL(LU_BUF_NULL);
1206
1207 /*
1208  * XXX: Functions below logically belong to fid module, but they are used by
1209  * dt_store_open(). Put them here until better place is found.
1210  */
1211
1212 void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid,
1213               struct lu_fid *befider)
1214 {
1215         int recsize;
1216         __u64 seq;
1217         __u32 oid;
1218
1219         seq = fid_seq(fid);
1220         oid = fid_oid(fid);
1221
1222         /*
1223          * Two cases: compact 6 bytes representation for a common case, and
1224          * full 17 byte representation for "unusual" fid.
1225          */
1226
1227         /*
1228          * Check that usual case is really usual.
1229          */
1230         CLASSERT(LUSTRE_SEQ_MAX_WIDTH < 0xffffull);
1231
1232         if (fid_is_igif(fid) ||
1233             seq > 0xffffffull || oid > 0xffff || fid_ver(fid) != 0) {
1234                 fid_cpu_to_be(befider, fid);
1235                 recsize = sizeof *befider;
1236         } else {
1237                 unsigned char *small_befider;
1238
1239                 small_befider = (char *)befider;
1240
1241                 small_befider[0] = seq >> 16;
1242                 small_befider[1] = seq >> 8;
1243                 small_befider[2] = seq;
1244
1245                 small_befider[3] = oid >> 8;
1246                 small_befider[4] = oid;
1247
1248                 recsize = 5;
1249         }
1250         memcpy(pack->fp_area, befider, recsize);
1251         pack->fp_len = recsize + 1;
1252 }
1253 EXPORT_SYMBOL(fid_pack);
1254
1255 int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid)
1256 {
1257         int result;
1258
1259         result = 0;
1260         switch (pack->fp_len) {
1261         case sizeof *fid + 1:
1262                 memcpy(fid, pack->fp_area, sizeof *fid);
1263                 fid_be_to_cpu(fid, fid);
1264                 break;
1265         case 6: {
1266                 const unsigned char *area;
1267
1268                 area = pack->fp_area;
1269                 fid->f_seq = (area[0] << 16) | (area[1] << 8) | area[2];
1270                 fid->f_oid = (area[3] << 8) | area[4];
1271                 fid->f_ver = 0;
1272                 break;
1273         }
1274         default:
1275                 CERROR("Unexpected packed fid size: %d\n", pack->fp_len);
1276                 result = -EIO;
1277         }
1278         return result;
1279 }
1280 EXPORT_SYMBOL(fid_unpack);
1281
1282 const char *lu_time_names[LU_TIME_NR] = {
1283         [LU_TIME_FIND_LOOKUP] = "find_lookup",
1284         [LU_TIME_FIND_ALLOC]  = "find_alloc",
1285         [LU_TIME_FIND_INSERT] = "find_insert"
1286 };
1287 EXPORT_SYMBOL(lu_time_names);