Whamcloud - gitweb
05498b8b628a348f0f00dde6cf0cc69252290bcd
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/obdclass/lu_object.c
33  *
34  * Lustre Object.
35  * These are the only exported functions, they provide some generic
36  * infrastructure for managing object devices
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <linux/module.h>
44 #include <linux/list.h>
45 #include <libcfs/libcfs.h>
46 #include <libcfs/libcfs_hash.h> /* hash_long() */
47 #include <libcfs/linux/linux-mem.h>
48 #include <libcfs/linux/linux-hash.h>
49 #include <obd_class.h>
50 #include <obd_support.h>
51 #include <lustre_disk.h>
52 #include <lustre_fid.h>
53 #include <lu_object.h>
54 #include <lu_ref.h>
55
56 struct lu_site_bkt_data {
57         /**
58          * LRU list, updated on each access to object. Protected by
59          * bucket lock of lu_site::ls_obj_hash.
60          *
61          * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
62          * moved to the lu_site::ls_lru.prev (this is due to the non-existence
63          * of list_for_each_entry_safe_reverse()).
64          */
65         struct list_head                lsb_lru;
66         /**
67          * Wait-queue signaled when an object in this site is ultimately
68          * destroyed (lu_object_free()) or initialized (lu_object_start()).
69          * It is used by lu_object_find() to wait before re-trying when
70          * object in the process of destruction is found in the hash table;
71          * or wait object to be initialized by the allocator.
72          *
73          * \see htable_lookup().
74          */
75         wait_queue_head_t               lsb_waitq;
76 };
77
78 enum {
79         LU_CACHE_PERCENT_MAX     = 50,
80         LU_CACHE_PERCENT_DEFAULT = 20
81 };
82
83 #define LU_CACHE_NR_MAX_ADJUST          512
84 #define LU_CACHE_NR_UNLIMITED           -1
85 #define LU_CACHE_NR_DEFAULT             LU_CACHE_NR_UNLIMITED
86 #define LU_CACHE_NR_LDISKFS_LIMIT       LU_CACHE_NR_UNLIMITED
87 /** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */
88 #define LU_CACHE_NR_ZFS_LIMIT           10240
89
90 #define LU_SITE_BITS_MIN    12
91 #define LU_SITE_BITS_MAX    24
92 #define LU_SITE_BITS_MAX_CL 19
93 /**
94  * total 256 buckets, we don't want too many buckets because:
95  * - consume too much memory
96  * - avoid unbalanced LRU list
97  */
98 #define LU_SITE_BKT_BITS    8
99
100
101 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
102 module_param(lu_cache_percent, int, 0644);
103 MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
104
105 static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
106 module_param(lu_cache_nr, long, 0644);
107 MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache");
108
109 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
110 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
111
112 wait_queue_head_t *
113 lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid)
114 {
115         struct cfs_hash_bd bd;
116         struct lu_site_bkt_data *bkt;
117
118         cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
119         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
120         return &bkt->lsb_waitq;
121 }
122 EXPORT_SYMBOL(lu_site_wq_from_fid);
123
124 /**
125  * Decrease reference counter on object. If last reference is freed, return
126  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
127  * case, free object immediately.
128  */
129 void lu_object_put(const struct lu_env *env, struct lu_object *o)
130 {
131         struct lu_site_bkt_data *bkt;
132         struct lu_object_header *top = o->lo_header;
133         struct lu_site *site = o->lo_dev->ld_site;
134         struct lu_object *orig = o;
135         struct cfs_hash_bd bd;
136         const struct lu_fid *fid = lu_object_fid(o);
137         bool is_dying;
138
139         /*
140          * till we have full fids-on-OST implemented anonymous objects
141          * are possible in OSP. such an object isn't listed in the site
142          * so we should not remove it from the site.
143          */
144         if (fid_is_zero(fid)) {
145                 LASSERT(top->loh_hash.next == NULL
146                         && top->loh_hash.pprev == NULL);
147                 LASSERT(list_empty(&top->loh_lru));
148                 if (!atomic_dec_and_test(&top->loh_ref))
149                         return;
150                 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
151                         if (o->lo_ops->loo_object_release != NULL)
152                                 o->lo_ops->loo_object_release(env, o);
153                 }
154                 lu_object_free(env, orig);
155                 return;
156         }
157
158         cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
159         bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
160
161         is_dying = lu_object_is_dying(top);
162         if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
163                 /* at this point the object reference is dropped and lock is
164                  * not taken, so lu_object should not be touched because it
165                  * can be freed by concurrent thread. Use local variable for
166                  * check.
167                  */
168                 if (is_dying) {
169                         /*
170                          * somebody may be waiting for this, currently only
171                          * used for cl_object, see cl_object_put_last().
172                          */
173                         wake_up_all(&bkt->lsb_waitq);
174                 }
175                 return;
176         }
177
178         /*
179          * When last reference is released, iterate over object
180          * layers, and notify them that object is no longer busy.
181          */
182         list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
183                 if (o->lo_ops->loo_object_release != NULL)
184                         o->lo_ops->loo_object_release(env, o);
185         }
186
187         /* don't use local 'is_dying' here because if was taken without lock
188          * but here we need the latest actual value of it so check lu_object
189          * directly here.
190          */
191         if (!lu_object_is_dying(top) &&
192             (lu_object_exists(orig) || lu_object_is_cl(orig))) {
193                 LASSERT(list_empty(&top->loh_lru));
194                 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
195                 percpu_counter_inc(&site->ls_lru_len_counter);
196                 CDEBUG(D_INODE, "Add %p/%p to site lru. hash: %p, bkt: %p\n",
197                        orig, top, site->ls_obj_hash, bkt);
198                 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
199                 return;
200         }
201
202         /*
203          * If object is dying (will not be cached) then remove it
204          * from hash table and LRU.
205          *
206          * This is done with hash table and LRU lists locked. As the only
207          * way to acquire first reference to previously unreferenced
208          * object is through hash-table lookup (lu_object_find()),
209          * or LRU scanning (lu_site_purge()), that are done under hash-table
210          * and LRU lock, no race with concurrent object lookup is possible
211          * and we can safely destroy object below.
212          */
213         if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
214                 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
215         cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
216         /*
217          * Object was already removed from hash and lru above, can
218          * kill it.
219          */
220         lu_object_free(env, orig);
221 }
222 EXPORT_SYMBOL(lu_object_put);
223
224 /**
225  * Put object and don't keep in cache. This is temporary solution for
226  * multi-site objects when its layering is not constant.
227  */
228 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
229 {
230         set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
231         return lu_object_put(env, o);
232 }
233 EXPORT_SYMBOL(lu_object_put_nocache);
234
235 /**
236  * Kill the object and take it out of LRU cache.
237  * Currently used by client code for layout change.
238  */
239 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
240 {
241         struct lu_object_header *top;
242
243         top = o->lo_header;
244         set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
245         if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
246                 struct lu_site *site = o->lo_dev->ld_site;
247                 struct cfs_hash *obj_hash = site->ls_obj_hash;
248                 struct cfs_hash_bd bd;
249
250                 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
251                 if (!list_empty(&top->loh_lru)) {
252                         struct lu_site_bkt_data *bkt;
253
254                         list_del_init(&top->loh_lru);
255                         bkt = cfs_hash_bd_extra_get(obj_hash, &bd);
256                         percpu_counter_dec(&site->ls_lru_len_counter);
257                 }
258                 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
259                 cfs_hash_bd_unlock(obj_hash, &bd, 1);
260         }
261 }
262 EXPORT_SYMBOL(lu_object_unhash);
263
264 /**
265  * Allocate new object.
266  *
267  * This follows object creation protocol, described in the comment within
268  * struct lu_device_operations definition.
269  */
270 static struct lu_object *lu_object_alloc(const struct lu_env *env,
271                                          struct lu_device *dev,
272                                          const struct lu_fid *f)
273 {
274         struct lu_object *top;
275
276         /*
277          * Create top-level object slice. This will also create
278          * lu_object_header.
279          */
280         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
281         if (top == NULL)
282                 return ERR_PTR(-ENOMEM);
283         if (IS_ERR(top))
284                 return top;
285         /*
286          * This is the only place where object fid is assigned. It's constant
287          * after this point.
288          */
289         top->lo_header->loh_fid = *f;
290
291         return top;
292 }
293
294 /**
295  * Initialize object.
296  *
297  * This is called after object hash insertion to avoid returning an object with
298  * stale attributes.
299  */
300 static int lu_object_start(const struct lu_env *env, struct lu_device *dev,
301                            struct lu_object *top,
302                            const struct lu_object_conf *conf)
303 {
304         struct lu_object *scan;
305         struct list_head *layers;
306         unsigned int init_mask = 0;
307         unsigned int init_flag;
308         int clean;
309         int result;
310
311         layers = &top->lo_header->loh_layers;
312
313         do {
314                 /*
315                  * Call ->loo_object_init() repeatedly, until no more new
316                  * object slices are created.
317                  */
318                 clean = 1;
319                 init_flag = 1;
320                 list_for_each_entry(scan, layers, lo_linkage) {
321                         if (init_mask & init_flag)
322                                 goto next;
323                         clean = 0;
324                         scan->lo_header = top->lo_header;
325                         result = scan->lo_ops->loo_object_init(env, scan, conf);
326                         if (result)
327                                 return result;
328
329                         init_mask |= init_flag;
330 next:
331                         init_flag <<= 1;
332                 }
333         } while (!clean);
334
335         list_for_each_entry_reverse(scan, layers, lo_linkage) {
336                 if (scan->lo_ops->loo_object_start != NULL) {
337                         result = scan->lo_ops->loo_object_start(env, scan);
338                         if (result)
339                                 return result;
340                 }
341         }
342
343         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
344
345         set_bit(LU_OBJECT_INITED, &top->lo_header->loh_flags);
346
347         return 0;
348 }
349
350 /**
351  * Free an object.
352  */
353 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
354 {
355         wait_queue_head_t *wq;
356         struct lu_site          *site;
357         struct lu_object        *scan;
358         struct list_head        *layers;
359         struct list_head         splice;
360
361         site = o->lo_dev->ld_site;
362         layers = &o->lo_header->loh_layers;
363         wq = lu_site_wq_from_fid(site, &o->lo_header->loh_fid);
364         /*
365          * First call ->loo_object_delete() method to release all resources.
366          */
367         list_for_each_entry_reverse(scan, layers, lo_linkage) {
368                 if (scan->lo_ops->loo_object_delete != NULL)
369                         scan->lo_ops->loo_object_delete(env, scan);
370         }
371
372         /*
373          * Then, splice object layers into stand-alone list, and call
374          * ->loo_object_free() on all layers to free memory. Splice is
375          * necessary, because lu_object_header is freed together with the
376          * top-level slice.
377          */
378         INIT_LIST_HEAD(&splice);
379         list_splice_init(layers, &splice);
380         while (!list_empty(&splice)) {
381                 /*
382                  * Free layers in bottom-to-top order, so that object header
383                  * lives as long as possible and ->loo_object_free() methods
384                  * can look at its contents.
385                  */
386                 o = container_of0(splice.prev, struct lu_object, lo_linkage);
387                 list_del_init(&o->lo_linkage);
388                 LASSERT(o->lo_ops->loo_object_free != NULL);
389                 o->lo_ops->loo_object_free(env, o);
390         }
391
392         if (waitqueue_active(wq))
393                 wake_up_all(wq);
394 }
395
396 /**
397  * Free \a nr objects from the cold end of the site LRU list.
398  * if canblock is 0, then don't block awaiting for another
399  * instance of lu_site_purge() to complete
400  */
401 int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s,
402                           int nr, int canblock)
403 {
404         struct lu_object_header *h;
405         struct lu_object_header *temp;
406         struct lu_site_bkt_data *bkt;
407         struct cfs_hash_bd            bd;
408         struct cfs_hash_bd            bd2;
409         struct list_head         dispose;
410         int                      did_sth;
411         unsigned int             start = 0;
412         int                      count;
413         int                      bnr;
414         unsigned int             i;
415
416         if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
417                 RETURN(0);
418
419         INIT_LIST_HEAD(&dispose);
420         /*
421          * Under LRU list lock, scan LRU list and move unreferenced objects to
422          * the dispose list, removing them from LRU and hash table.
423          */
424         if (nr != ~0)
425                 start = s->ls_purge_start;
426         bnr = (nr == ~0) ? -1 : nr / (int)CFS_HASH_NBKT(s->ls_obj_hash) + 1;
427  again:
428         /*
429          * It doesn't make any sense to make purge threads parallel, that can
430          * only bring troubles to us. See LU-5331.
431          */
432         if (canblock != 0)
433                 mutex_lock(&s->ls_purge_mutex);
434         else if (mutex_trylock(&s->ls_purge_mutex) == 0)
435                 goto out;
436
437         did_sth = 0;
438         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
439                 if (i < start)
440                         continue;
441                 count = bnr;
442                 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
443                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
444
445                 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
446                         LASSERT(atomic_read(&h->loh_ref) == 0);
447
448                         cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
449                         LASSERT(bd.bd_bucket == bd2.bd_bucket);
450
451                         cfs_hash_bd_del_locked(s->ls_obj_hash,
452                                                &bd2, &h->loh_hash);
453                         list_move(&h->loh_lru, &dispose);
454                         percpu_counter_dec(&s->ls_lru_len_counter);
455                         if (did_sth == 0)
456                                 did_sth = 1;
457
458                         if (nr != ~0 && --nr == 0)
459                                 break;
460
461                         if (count > 0 && --count == 0)
462                                 break;
463
464                 }
465                 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
466                 cond_resched();
467                 /*
468                  * Free everything on the dispose list. This is safe against
469                  * races due to the reasons described in lu_object_put().
470                  */
471                 while (!list_empty(&dispose)) {
472                         h = container_of0(dispose.next,
473                                           struct lu_object_header, loh_lru);
474                         list_del_init(&h->loh_lru);
475                         lu_object_free(env, lu_object_top(h));
476                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
477                 }
478
479                 if (nr == 0)
480                         break;
481         }
482         mutex_unlock(&s->ls_purge_mutex);
483
484         if (nr != 0 && did_sth && start != 0) {
485                 start = 0; /* restart from the first bucket */
486                 goto again;
487         }
488         /* race on s->ls_purge_start, but nobody cares */
489         s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
490
491 out:
492         return nr;
493 }
494 EXPORT_SYMBOL(lu_site_purge_objects);
495
496 /*
497  * Object printing.
498  *
499  * Code below has to jump through certain loops to output object description
500  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
501  * composes object description from strings that are parts of _lines_ of
502  * output (i.e., strings that are not terminated by newline). This doesn't fit
503  * very well into libcfs_debug_msg() interface that assumes that each message
504  * supplied to it is a self-contained output line.
505  *
506  * To work around this, strings are collected in a temporary buffer
507  * (implemented as a value of lu_cdebug_key key), until terminating newline
508  * character is detected.
509  *
510  */
511
512 enum {
513         /**
514          * Maximal line size.
515          *
516          * XXX overflow is not handled correctly.
517          */
518         LU_CDEBUG_LINE = 512
519 };
520
521 struct lu_cdebug_data {
522         /**
523          * Temporary buffer.
524          */
525         char lck_area[LU_CDEBUG_LINE];
526 };
527
528 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
529 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
530
531 /**
532  * Key, holding temporary buffer. This key is registered very early by
533  * lu_global_init().
534  */
535 static struct lu_context_key lu_global_key = {
536         .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
537                     LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
538         .lct_init = lu_global_key_init,
539         .lct_fini = lu_global_key_fini
540 };
541
542 /**
543  * Printer function emitting messages through libcfs_debug_msg().
544  */
545 int lu_cdebug_printer(const struct lu_env *env,
546                       void *cookie, const char *format, ...)
547 {
548         struct libcfs_debug_msg_data *msgdata = cookie;
549         struct lu_cdebug_data        *key;
550         int used;
551         int complete;
552         va_list args;
553
554         va_start(args, format);
555
556         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
557         LASSERT(key != NULL);
558
559         used = strlen(key->lck_area);
560         complete = format[strlen(format) - 1] == '\n';
561         /*
562          * Append new chunk to the buffer.
563          */
564         vsnprintf(key->lck_area + used,
565                   ARRAY_SIZE(key->lck_area) - used, format, args);
566         if (complete) {
567                 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
568                         libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
569                 key->lck_area[0] = 0;
570         }
571         va_end(args);
572         return 0;
573 }
574 EXPORT_SYMBOL(lu_cdebug_printer);
575
576 /**
577  * Print object header.
578  */
579 void lu_object_header_print(const struct lu_env *env, void *cookie,
580                             lu_printer_t printer,
581                             const struct lu_object_header *hdr)
582 {
583         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
584                    hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
585                    PFID(&hdr->loh_fid),
586                    hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
587                    list_empty((struct list_head *)&hdr->loh_lru) ? \
588                    "" : " lru",
589                    hdr->loh_attr & LOHA_EXISTS ? " exist" : "");
590 }
591 EXPORT_SYMBOL(lu_object_header_print);
592
593 /**
594  * Print human readable representation of the \a o to the \a printer.
595  */
596 void lu_object_print(const struct lu_env *env, void *cookie,
597                      lu_printer_t printer, const struct lu_object *o)
598 {
599         static const char ruler[] = "........................................";
600         struct lu_object_header *top;
601         int depth = 4;
602
603         top = o->lo_header;
604         lu_object_header_print(env, cookie, printer, top);
605         (*printer)(env, cookie, "{\n");
606
607         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
608                 /*
609                  * print `.' \a depth times followed by type name and address
610                  */
611                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
612                            o->lo_dev->ld_type->ldt_name, o);
613
614                 if (o->lo_ops->loo_object_print != NULL)
615                         (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
616
617                 (*printer)(env, cookie, "\n");
618         }
619
620         (*printer)(env, cookie, "} header@%p\n", top);
621 }
622 EXPORT_SYMBOL(lu_object_print);
623
624 /**
625  * Check object consistency.
626  */
627 int lu_object_invariant(const struct lu_object *o)
628 {
629         struct lu_object_header *top;
630
631         top = o->lo_header;
632         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
633                 if (o->lo_ops->loo_object_invariant != NULL &&
634                     !o->lo_ops->loo_object_invariant(o))
635                         return 0;
636         }
637         return 1;
638 }
639
640 static struct lu_object *htable_lookup(struct lu_site *s,
641                                        struct cfs_hash_bd *bd,
642                                        const struct lu_fid *f,
643                                        __u64 *version)
644 {
645         struct lu_object_header *h;
646         struct hlist_node *hnode;
647         __u64 ver = cfs_hash_bd_version_get(bd);
648
649         if (*version == ver)
650                 return ERR_PTR(-ENOENT);
651
652         *version = ver;
653         /* cfs_hash_bd_peek_locked is a somehow "internal" function
654          * of cfs_hash, it doesn't add refcount on object. */
655         hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
656         if (!hnode) {
657                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
658                 return ERR_PTR(-ENOENT);
659         }
660
661         h = container_of0(hnode, struct lu_object_header, loh_hash);
662         cfs_hash_get(s->ls_obj_hash, hnode);
663         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
664         if (!list_empty(&h->loh_lru)) {
665                 list_del_init(&h->loh_lru);
666                 percpu_counter_dec(&s->ls_lru_len_counter);
667         }
668         return lu_object_top(h);
669 }
670
671 /**
672  * Search cache for an object with the fid \a f. If such object is found,
673  * return it. Otherwise, create new object, insert it into cache and return
674  * it. In any case, additional reference is acquired on the returned object.
675  */
676 struct lu_object *lu_object_find(const struct lu_env *env,
677                                  struct lu_device *dev, const struct lu_fid *f,
678                                  const struct lu_object_conf *conf)
679 {
680         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
681 }
682 EXPORT_SYMBOL(lu_object_find);
683
684 /*
685  * Limit the lu_object cache to a maximum of lu_cache_nr objects.  Because
686  * the calculation for the number of objects to reclaim is not covered by
687  * a lock the maximum number of objects is capped by LU_CACHE_MAX_ADJUST.
688  * This ensures that many concurrent threads will not accidentally purge
689  * the entire cache.
690  */
691 static void lu_object_limit(const struct lu_env *env,
692                             struct lu_device *dev)
693 {
694         __u64 size, nr;
695
696         if (lu_cache_nr == LU_CACHE_NR_UNLIMITED)
697                 return;
698
699         size = cfs_hash_size_get(dev->ld_site->ls_obj_hash);
700         nr = (__u64)lu_cache_nr;
701         if (size <= nr)
702                 return;
703
704         lu_site_purge_objects(env, dev->ld_site,
705                               MIN(size - nr, LU_CACHE_NR_MAX_ADJUST), 0);
706 }
707
708 /**
709  * Core logic of lu_object_find*() functions.
710  *
711  * Much like lu_object_find(), but top level device of object is specifically
712  * \a dev rather than top level device of the site. This interface allows
713  * objects of different "stacking" to be created within the same site.
714  */
715 struct lu_object *lu_object_find_at(const struct lu_env *env,
716                                     struct lu_device *dev,
717                                     const struct lu_fid *f,
718                                     const struct lu_object_conf *conf)
719 {
720         struct lu_object *o;
721         struct lu_object *shadow;
722         struct lu_site *s;
723         struct cfs_hash *hs;
724         struct cfs_hash_bd bd;
725         struct lu_site_bkt_data *bkt;
726         struct l_wait_info lwi = { 0 };
727         __u64 version = 0;
728         int rc;
729
730         ENTRY;
731
732         /*
733          * This uses standard index maintenance protocol:
734          *
735          *     - search index under lock, and return object if found;
736          *     - otherwise, unlock index, allocate new object;
737          *     - lock index and search again;
738          *     - if nothing is found (usual case), insert newly created
739          *       object into index;
740          *     - otherwise (race: other thread inserted object), free
741          *       object just allocated.
742          *     - unlock index;
743          *     - return object.
744          *
745          * For "LOC_F_NEW" case, we are sure the object is new established.
746          * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
747          * just alloc and insert directly.
748          *
749          */
750         s  = dev->ld_site;
751         hs = s->ls_obj_hash;
752
753         if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_OBD_ZERO_NLINK_RACE)))
754                 lu_site_purge(env, s, -1);
755
756         cfs_hash_bd_get(hs, f, &bd);
757         bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
758         if (!(conf && conf->loc_flags & LOC_F_NEW)) {
759                 cfs_hash_bd_lock(hs, &bd, 1);
760                 o = htable_lookup(s, &bd, f, &version);
761                 cfs_hash_bd_unlock(hs, &bd, 1);
762
763                 if (!IS_ERR(o)) {
764                         if (likely(lu_object_is_inited(o->lo_header)))
765                                 RETURN(o);
766
767                         l_wait_event(bkt->lsb_waitq,
768                                      lu_object_is_inited(o->lo_header) ||
769                                      lu_object_is_dying(o->lo_header), &lwi);
770
771                         if (lu_object_is_dying(o->lo_header)) {
772                                 lu_object_put(env, o);
773
774                                 RETURN(ERR_PTR(-ENOENT));
775                         }
776
777                         RETURN(o);
778                 }
779
780                 if (PTR_ERR(o) != -ENOENT)
781                         RETURN(o);
782         }
783
784         /*
785          * Allocate new object, NB, object is unitialized in case object
786          * is changed between allocation and hash insertion, thus the object
787          * with stale attributes is returned.
788          */
789         o = lu_object_alloc(env, dev, f);
790         if (IS_ERR(o))
791                 RETURN(o);
792
793         LASSERT(lu_fid_eq(lu_object_fid(o), f));
794
795         CFS_RACE_WAIT(OBD_FAIL_OBD_ZERO_NLINK_RACE);
796
797         cfs_hash_bd_lock(hs, &bd, 1);
798
799         if (conf && conf->loc_flags & LOC_F_NEW)
800                 shadow = ERR_PTR(-ENOENT);
801         else
802                 shadow = htable_lookup(s, &bd, f, &version);
803         if (likely(PTR_ERR(shadow) == -ENOENT)) {
804                 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
805                 cfs_hash_bd_unlock(hs, &bd, 1);
806
807                 /*
808                  * This may result in rather complicated operations, including
809                  * fld queries, inode loading, etc.
810                  */
811                 rc = lu_object_start(env, dev, o, conf);
812                 if (rc) {
813                         lu_object_put_nocache(env, o);
814                         RETURN(ERR_PTR(rc));
815                 }
816
817                 wake_up_all(&bkt->lsb_waitq);
818
819                 lu_object_limit(env, dev);
820
821                 RETURN(o);
822         }
823
824         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
825         cfs_hash_bd_unlock(hs, &bd, 1);
826         lu_object_free(env, o);
827
828         if (!(conf && conf->loc_flags & LOC_F_NEW) &&
829             !lu_object_is_inited(shadow->lo_header)) {
830                 l_wait_event(bkt->lsb_waitq,
831                              lu_object_is_inited(shadow->lo_header) ||
832                              lu_object_is_dying(shadow->lo_header), &lwi);
833
834                 if (lu_object_is_dying(shadow->lo_header)) {
835                         lu_object_put(env, shadow);
836
837                         RETURN(ERR_PTR(-ENOENT));
838                 }
839         }
840
841         RETURN(shadow);
842 }
843 EXPORT_SYMBOL(lu_object_find_at);
844
845 /**
846  * Find object with given fid, and return its slice belonging to given device.
847  */
848 struct lu_object *lu_object_find_slice(const struct lu_env *env,
849                                        struct lu_device *dev,
850                                        const struct lu_fid *f,
851                                        const struct lu_object_conf *conf)
852 {
853         struct lu_object *top;
854         struct lu_object *obj;
855
856         top = lu_object_find(env, dev, f, conf);
857         if (IS_ERR(top))
858                 return top;
859
860         obj = lu_object_locate(top->lo_header, dev->ld_type);
861         if (unlikely(obj == NULL)) {
862                 lu_object_put(env, top);
863                 obj = ERR_PTR(-ENOENT);
864         }
865
866         return obj;
867 }
868 EXPORT_SYMBOL(lu_object_find_slice);
869
870 int lu_device_type_init(struct lu_device_type *ldt)
871 {
872         int result = 0;
873
874         atomic_set(&ldt->ldt_device_nr, 0);
875         if (ldt->ldt_ops->ldto_init)
876                 result = ldt->ldt_ops->ldto_init(ldt);
877
878         return result;
879 }
880 EXPORT_SYMBOL(lu_device_type_init);
881
882 void lu_device_type_fini(struct lu_device_type *ldt)
883 {
884         if (ldt->ldt_ops->ldto_fini)
885                 ldt->ldt_ops->ldto_fini(ldt);
886 }
887 EXPORT_SYMBOL(lu_device_type_fini);
888
889 /**
890  * Global list of all sites on this node
891  */
892 static LIST_HEAD(lu_sites);
893 static DECLARE_RWSEM(lu_sites_guard);
894
895 /**
896  * Global environment used by site shrinker.
897  */
898 static struct lu_env lu_shrink_env;
899
900 struct lu_site_print_arg {
901         struct lu_env   *lsp_env;
902         void            *lsp_cookie;
903         lu_printer_t     lsp_printer;
904 };
905
906 static int
907 lu_site_obj_print(struct cfs_hash *hs, struct cfs_hash_bd *bd,
908                   struct hlist_node *hnode, void *data)
909 {
910         struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
911         struct lu_object_header  *h;
912
913         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
914         if (!list_empty(&h->loh_layers)) {
915                 const struct lu_object *o;
916
917                 o = lu_object_top(h);
918                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
919                                 arg->lsp_printer, o);
920         } else {
921                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
922                                        arg->lsp_printer, h);
923         }
924         return 0;
925 }
926
927 /**
928  * Print all objects in \a s.
929  */
930 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
931                    lu_printer_t printer)
932 {
933         struct lu_site_print_arg arg = {
934                 .lsp_env     = (struct lu_env *)env,
935                 .lsp_cookie  = cookie,
936                 .lsp_printer = printer,
937         };
938
939         cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
940 }
941 EXPORT_SYMBOL(lu_site_print);
942
943 /**
944  * Return desired hash table order.
945  */
946 static unsigned long lu_htable_order(struct lu_device *top)
947 {
948         unsigned long cache_size;
949         unsigned long bits;
950         unsigned long bits_max = LU_SITE_BITS_MAX;
951
952         /*
953          * For ZFS based OSDs the cache should be disabled by default.  This
954          * allows the ZFS ARC maximum flexibility in determining what buffers
955          * to cache.  If Lustre has objects or buffer which it wants to ensure
956          * always stay cached it must maintain a hold on them.
957          */
958         if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) {
959                 lu_cache_percent = 1;
960                 lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT;
961                 return LU_SITE_BITS_MIN;
962         }
963
964         if (strcmp(top->ld_type->ldt_name, LUSTRE_VVP_NAME) == 0)
965                 bits_max = LU_SITE_BITS_MAX_CL;
966
967         /*
968          * Calculate hash table size, assuming that we want reasonable
969          * performance when 20% of total memory is occupied by cache of
970          * lu_objects.
971          *
972          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
973          */
974         cache_size = totalram_pages;
975
976 #if BITS_PER_LONG == 32
977         /* limit hashtable size for lowmem systems to low RAM */
978         if (cache_size > 1 << (30 - PAGE_SHIFT))
979                 cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
980 #endif
981
982         /* clear off unreasonable cache setting. */
983         if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
984                 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
985                       " the range of (0, %u]. Will use default value: %u.\n",
986                       lu_cache_percent, LU_CACHE_PERCENT_MAX,
987                       LU_CACHE_PERCENT_DEFAULT);
988
989                 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
990         }
991         cache_size = cache_size / 100 * lu_cache_percent *
992                 (PAGE_SIZE / 1024);
993
994         for (bits = 1; (1 << bits) < cache_size; ++bits) {
995                 ;
996         }
997
998         return clamp_t(typeof(bits), bits, LU_SITE_BITS_MIN, bits_max);
999 }
1000
1001 static unsigned lu_obj_hop_hash(struct cfs_hash *hs,
1002                                 const void *key, unsigned mask)
1003 {
1004         struct lu_fid  *fid = (struct lu_fid *)key;
1005         __u32           hash;
1006
1007         hash = fid_flatten32(fid);
1008         hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
1009         hash = hash_long(hash, hs->hs_bkt_bits);
1010
1011         /* give me another random factor */
1012         hash -= hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
1013
1014         hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
1015         hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
1016
1017         return hash & mask;
1018 }
1019
1020 static void *lu_obj_hop_object(struct hlist_node *hnode)
1021 {
1022         return hlist_entry(hnode, struct lu_object_header, loh_hash);
1023 }
1024
1025 static void *lu_obj_hop_key(struct hlist_node *hnode)
1026 {
1027         struct lu_object_header *h;
1028
1029         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
1030         return &h->loh_fid;
1031 }
1032
1033 static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
1034 {
1035         struct lu_object_header *h;
1036
1037         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
1038         return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
1039 }
1040
1041 static void lu_obj_hop_get(struct cfs_hash *hs, struct hlist_node *hnode)
1042 {
1043         struct lu_object_header *h;
1044
1045         h = hlist_entry(hnode, struct lu_object_header, loh_hash);
1046         atomic_inc(&h->loh_ref);
1047 }
1048
1049 static void lu_obj_hop_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
1050 {
1051         LBUG(); /* we should never called it */
1052 }
1053
1054 static struct cfs_hash_ops lu_site_hash_ops = {
1055         .hs_hash        = lu_obj_hop_hash,
1056         .hs_key         = lu_obj_hop_key,
1057         .hs_keycmp      = lu_obj_hop_keycmp,
1058         .hs_object      = lu_obj_hop_object,
1059         .hs_get         = lu_obj_hop_get,
1060         .hs_put_locked  = lu_obj_hop_put_locked,
1061 };
1062
1063 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
1064 {
1065         spin_lock(&s->ls_ld_lock);
1066         if (list_empty(&d->ld_linkage))
1067                 list_add(&d->ld_linkage, &s->ls_ld_linkage);
1068         spin_unlock(&s->ls_ld_lock);
1069 }
1070 EXPORT_SYMBOL(lu_dev_add_linkage);
1071
1072 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
1073 {
1074         spin_lock(&s->ls_ld_lock);
1075         list_del_init(&d->ld_linkage);
1076         spin_unlock(&s->ls_ld_lock);
1077 }
1078 EXPORT_SYMBOL(lu_dev_del_linkage);
1079
1080 /**
1081   * Initialize site \a s, with \a d as the top level device.
1082   */
1083 int lu_site_init(struct lu_site *s, struct lu_device *top)
1084 {
1085         struct lu_site_bkt_data *bkt;
1086         struct cfs_hash_bd bd;
1087         char name[16];
1088         unsigned long bits;
1089         unsigned int i;
1090         int rc;
1091         ENTRY;
1092
1093         memset(s, 0, sizeof *s);
1094         mutex_init(&s->ls_purge_mutex);
1095
1096 #ifdef HAVE_PERCPU_COUNTER_INIT_GFP_FLAG
1097         rc = percpu_counter_init(&s->ls_lru_len_counter, 0, GFP_NOFS);
1098 #else
1099         rc = percpu_counter_init(&s->ls_lru_len_counter, 0);
1100 #endif
1101         if (rc)
1102                 return -ENOMEM;
1103
1104         snprintf(name, sizeof(name), "lu_site_%s", top->ld_type->ldt_name);
1105         for (bits = lu_htable_order(top);
1106              bits >= LU_SITE_BITS_MIN; bits--) {
1107                 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
1108                                                  bits - LU_SITE_BKT_BITS,
1109                                                  sizeof(*bkt), 0, 0,
1110                                                  &lu_site_hash_ops,
1111                                                  CFS_HASH_SPIN_BKTLOCK |
1112                                                  CFS_HASH_NO_ITEMREF |
1113                                                  CFS_HASH_DEPTH |
1114                                                  CFS_HASH_ASSERT_EMPTY |
1115                                                  CFS_HASH_COUNTER);
1116                 if (s->ls_obj_hash != NULL)
1117                         break;
1118         }
1119
1120         if (s->ls_obj_hash == NULL) {
1121                 CERROR("failed to create lu_site hash with bits: %lu\n", bits);
1122                 return -ENOMEM;
1123         }
1124
1125         cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1126                 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1127                 INIT_LIST_HEAD(&bkt->lsb_lru);
1128                 init_waitqueue_head(&bkt->lsb_waitq);
1129         }
1130
1131         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1132         if (s->ls_stats == NULL) {
1133                 cfs_hash_putref(s->ls_obj_hash);
1134                 s->ls_obj_hash = NULL;
1135                 return -ENOMEM;
1136         }
1137
1138         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1139                              0, "created", "created");
1140         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1141                              0, "cache_hit", "cache_hit");
1142         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1143                              0, "cache_miss", "cache_miss");
1144         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1145                              0, "cache_race", "cache_race");
1146         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1147                              0, "cache_death_race", "cache_death_race");
1148         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1149                              0, "lru_purged", "lru_purged");
1150
1151         INIT_LIST_HEAD(&s->ls_linkage);
1152         s->ls_top_dev = top;
1153         top->ld_site = s;
1154         lu_device_get(top);
1155         lu_ref_add(&top->ld_reference, "site-top", s);
1156
1157         INIT_LIST_HEAD(&s->ls_ld_linkage);
1158         spin_lock_init(&s->ls_ld_lock);
1159
1160         lu_dev_add_linkage(s, top);
1161
1162         RETURN(0);
1163 }
1164 EXPORT_SYMBOL(lu_site_init);
1165
1166 /**
1167  * Finalize \a s and release its resources.
1168  */
1169 void lu_site_fini(struct lu_site *s)
1170 {
1171         down_write(&lu_sites_guard);
1172         list_del_init(&s->ls_linkage);
1173         up_write(&lu_sites_guard);
1174
1175         percpu_counter_destroy(&s->ls_lru_len_counter);
1176
1177         if (s->ls_obj_hash != NULL) {
1178                 cfs_hash_putref(s->ls_obj_hash);
1179                 s->ls_obj_hash = NULL;
1180         }
1181
1182         if (s->ls_top_dev != NULL) {
1183                 s->ls_top_dev->ld_site = NULL;
1184                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1185                 lu_device_put(s->ls_top_dev);
1186                 s->ls_top_dev = NULL;
1187         }
1188
1189         if (s->ls_stats != NULL)
1190                 lprocfs_free_stats(&s->ls_stats);
1191 }
1192 EXPORT_SYMBOL(lu_site_fini);
1193
1194 /**
1195  * Called when initialization of stack for this site is completed.
1196  */
1197 int lu_site_init_finish(struct lu_site *s)
1198 {
1199         int result;
1200         down_write(&lu_sites_guard);
1201         result = lu_context_refill(&lu_shrink_env.le_ctx);
1202         if (result == 0)
1203                 list_add(&s->ls_linkage, &lu_sites);
1204         up_write(&lu_sites_guard);
1205         return result;
1206 }
1207 EXPORT_SYMBOL(lu_site_init_finish);
1208
1209 /**
1210  * Acquire additional reference on device \a d
1211  */
1212 void lu_device_get(struct lu_device *d)
1213 {
1214         atomic_inc(&d->ld_ref);
1215 }
1216 EXPORT_SYMBOL(lu_device_get);
1217
1218 /**
1219  * Release reference on device \a d.
1220  */
1221 void lu_device_put(struct lu_device *d)
1222 {
1223         LASSERT(atomic_read(&d->ld_ref) > 0);
1224         atomic_dec(&d->ld_ref);
1225 }
1226 EXPORT_SYMBOL(lu_device_put);
1227
1228 /**
1229  * Initialize device \a d of type \a t.
1230  */
1231 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1232 {
1233         if (atomic_inc_return(&t->ldt_device_nr) == 1 &&
1234             t->ldt_ops->ldto_start != NULL)
1235                 t->ldt_ops->ldto_start(t);
1236
1237         memset(d, 0, sizeof *d);
1238         d->ld_type = t;
1239         lu_ref_init(&d->ld_reference);
1240         INIT_LIST_HEAD(&d->ld_linkage);
1241
1242         return 0;
1243 }
1244 EXPORT_SYMBOL(lu_device_init);
1245
1246 /**
1247  * Finalize device \a d.
1248  */
1249 void lu_device_fini(struct lu_device *d)
1250 {
1251         struct lu_device_type *t = d->ld_type;
1252
1253         if (d->ld_obd != NULL) {
1254                 d->ld_obd->obd_lu_dev = NULL;
1255                 d->ld_obd = NULL;
1256         }
1257
1258         lu_ref_fini(&d->ld_reference);
1259         LASSERTF(atomic_read(&d->ld_ref) == 0,
1260                  "Refcount is %u\n", atomic_read(&d->ld_ref));
1261         LASSERT(atomic_read(&t->ldt_device_nr) > 0);
1262
1263         if (atomic_dec_and_test(&t->ldt_device_nr) &&
1264             t->ldt_ops->ldto_stop != NULL)
1265                 t->ldt_ops->ldto_stop(t);
1266 }
1267 EXPORT_SYMBOL(lu_device_fini);
1268
1269 /**
1270  * Initialize object \a o that is part of compound object \a h and was created
1271  * by device \a d.
1272  */
1273 int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1274                    struct lu_device *d)
1275 {
1276         memset(o, 0, sizeof(*o));
1277         o->lo_header = h;
1278         o->lo_dev = d;
1279         lu_device_get(d);
1280         lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
1281         INIT_LIST_HEAD(&o->lo_linkage);
1282
1283         return 0;
1284 }
1285 EXPORT_SYMBOL(lu_object_init);
1286
1287 /**
1288  * Finalize object and release its resources.
1289  */
1290 void lu_object_fini(struct lu_object *o)
1291 {
1292         struct lu_device *dev = o->lo_dev;
1293
1294         LASSERT(list_empty(&o->lo_linkage));
1295
1296         if (dev != NULL) {
1297                 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1298                               "lu_object", o);
1299                 lu_device_put(dev);
1300                 o->lo_dev = NULL;
1301         }
1302 }
1303 EXPORT_SYMBOL(lu_object_fini);
1304
1305 /**
1306  * Add object \a o as first layer of compound object \a h
1307  *
1308  * This is typically called by the ->ldo_object_alloc() method of top-level
1309  * device.
1310  */
1311 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1312 {
1313         list_move(&o->lo_linkage, &h->loh_layers);
1314 }
1315 EXPORT_SYMBOL(lu_object_add_top);
1316
1317 /**
1318  * Add object \a o as a layer of compound object, going after \a before.
1319  *
1320  * This is typically called by the ->ldo_object_alloc() method of \a
1321  * before->lo_dev.
1322  */
1323 void lu_object_add(struct lu_object *before, struct lu_object *o)
1324 {
1325         list_move(&o->lo_linkage, &before->lo_linkage);
1326 }
1327 EXPORT_SYMBOL(lu_object_add);
1328
1329 /**
1330  * Initialize compound object.
1331  */
1332 int lu_object_header_init(struct lu_object_header *h)
1333 {
1334         memset(h, 0, sizeof *h);
1335         atomic_set(&h->loh_ref, 1);
1336         INIT_HLIST_NODE(&h->loh_hash);
1337         INIT_LIST_HEAD(&h->loh_lru);
1338         INIT_LIST_HEAD(&h->loh_layers);
1339         lu_ref_init(&h->loh_reference);
1340         return 0;
1341 }
1342 EXPORT_SYMBOL(lu_object_header_init);
1343
1344 /**
1345  * Finalize compound object.
1346  */
1347 void lu_object_header_fini(struct lu_object_header *h)
1348 {
1349         LASSERT(list_empty(&h->loh_layers));
1350         LASSERT(list_empty(&h->loh_lru));
1351         LASSERT(hlist_unhashed(&h->loh_hash));
1352         lu_ref_fini(&h->loh_reference);
1353 }
1354 EXPORT_SYMBOL(lu_object_header_fini);
1355
1356 /**
1357  * Given a compound object, find its slice, corresponding to the device type
1358  * \a dtype.
1359  */
1360 struct lu_object *lu_object_locate(struct lu_object_header *h,
1361                                    const struct lu_device_type *dtype)
1362 {
1363         struct lu_object *o;
1364
1365         list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1366                 if (o->lo_dev->ld_type == dtype)
1367                         return o;
1368         }
1369         return NULL;
1370 }
1371 EXPORT_SYMBOL(lu_object_locate);
1372
1373 /**
1374  * Finalize and free devices in the device stack.
1375  *
1376  * Finalize device stack by purging object cache, and calling
1377  * lu_device_type_operations::ldto_device_fini() and
1378  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1379  */
1380 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1381 {
1382         struct lu_site   *site = top->ld_site;
1383         struct lu_device *scan;
1384         struct lu_device *next;
1385
1386         lu_site_purge(env, site, ~0);
1387         for (scan = top; scan != NULL; scan = next) {
1388                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1389                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1390                 lu_device_put(scan);
1391         }
1392
1393         /* purge again. */
1394         lu_site_purge(env, site, ~0);
1395
1396         for (scan = top; scan != NULL; scan = next) {
1397                 const struct lu_device_type *ldt = scan->ld_type;
1398                 struct obd_type             *type;
1399
1400                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1401                 type = ldt->ldt_obd_type;
1402                 if (type != NULL) {
1403                         type->typ_refcnt--;
1404                         class_put_type(type);
1405                 }
1406         }
1407 }
1408
1409 enum {
1410         /**
1411          * Maximal number of tld slots.
1412          */
1413         LU_CONTEXT_KEY_NR = 40
1414 };
1415
1416 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1417
1418 DEFINE_RWLOCK(lu_keys_guard);
1419 static atomic_t lu_key_initing_cnt = ATOMIC_INIT(0);
1420
1421 /**
1422  * Global counter incremented whenever key is registered, unregistered,
1423  * revived or quiesced. This is used to void unnecessary calls to
1424  * lu_context_refill(). No locking is provided, as initialization and shutdown
1425  * are supposed to be externally serialized.
1426  */
1427 static unsigned key_set_version = 0;
1428
1429 /**
1430  * Register new key.
1431  */
1432 int lu_context_key_register(struct lu_context_key *key)
1433 {
1434         int result;
1435         unsigned int i;
1436
1437         LASSERT(key->lct_init != NULL);
1438         LASSERT(key->lct_fini != NULL);
1439         LASSERT(key->lct_tags != 0);
1440         LASSERT(key->lct_owner != NULL);
1441
1442         result = -ENFILE;
1443         write_lock(&lu_keys_guard);
1444         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1445                 if (lu_keys[i] == NULL) {
1446                         key->lct_index = i;
1447                         atomic_set(&key->lct_used, 1);
1448                         lu_keys[i] = key;
1449                         lu_ref_init(&key->lct_reference);
1450                         result = 0;
1451                         ++key_set_version;
1452                         break;
1453                 }
1454         }
1455         write_unlock(&lu_keys_guard);
1456         return result;
1457 }
1458 EXPORT_SYMBOL(lu_context_key_register);
1459
1460 static void key_fini(struct lu_context *ctx, int index)
1461 {
1462         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1463                 struct lu_context_key *key;
1464
1465                 key = lu_keys[index];
1466                 LASSERT(key != NULL);
1467                 LASSERT(key->lct_fini != NULL);
1468                 LASSERT(atomic_read(&key->lct_used) > 1);
1469
1470                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1471                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1472                 if (atomic_dec_and_test(&key->lct_used))
1473                         wake_up_var(&key->lct_used);
1474
1475                 LASSERT(key->lct_owner != NULL);
1476                 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1477                         LINVRNT(module_refcount(key->lct_owner) > 0);
1478                         module_put(key->lct_owner);
1479                 }
1480                 ctx->lc_value[index] = NULL;
1481         }
1482 }
1483
1484 /**
1485  * Deregister key.
1486  */
1487 void lu_context_key_degister(struct lu_context_key *key)
1488 {
1489         LASSERT(atomic_read(&key->lct_used) >= 1);
1490         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1491
1492         lu_context_key_quiesce(key);
1493
1494         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1495
1496         /**
1497          * Wait until all transient contexts referencing this key have
1498          * run lu_context_key::lct_fini() method.
1499          */
1500         atomic_dec(&key->lct_used);
1501         wait_var_event(&key->lct_used, atomic_read(&key->lct_used) == 0);
1502
1503         write_lock(&lu_keys_guard);
1504         if (lu_keys[key->lct_index]) {
1505                 lu_keys[key->lct_index] = NULL;
1506                 lu_ref_fini(&key->lct_reference);
1507         }
1508         write_unlock(&lu_keys_guard);
1509
1510         LASSERTF(atomic_read(&key->lct_used) == 0,
1511                  "key has instances: %d\n",
1512                  atomic_read(&key->lct_used));
1513 }
1514 EXPORT_SYMBOL(lu_context_key_degister);
1515
1516 /**
1517  * Register a number of keys. This has to be called after all keys have been
1518  * initialized by a call to LU_CONTEXT_KEY_INIT().
1519  */
1520 int lu_context_key_register_many(struct lu_context_key *k, ...)
1521 {
1522         struct lu_context_key *key = k;
1523         va_list args;
1524         int result;
1525
1526         va_start(args, k);
1527         do {
1528                 result = lu_context_key_register(key);
1529                 if (result)
1530                         break;
1531                 key = va_arg(args, struct lu_context_key *);
1532         } while (key != NULL);
1533         va_end(args);
1534
1535         if (result != 0) {
1536                 va_start(args, k);
1537                 while (k != key) {
1538                         lu_context_key_degister(k);
1539                         k = va_arg(args, struct lu_context_key *);
1540                 }
1541                 va_end(args);
1542         }
1543
1544         return result;
1545 }
1546 EXPORT_SYMBOL(lu_context_key_register_many);
1547
1548 /**
1549  * De-register a number of keys. This is a dual to
1550  * lu_context_key_register_many().
1551  */
1552 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1553 {
1554         va_list args;
1555
1556         va_start(args, k);
1557         do {
1558                 lu_context_key_degister(k);
1559                 k = va_arg(args, struct lu_context_key*);
1560         } while (k != NULL);
1561         va_end(args);
1562 }
1563 EXPORT_SYMBOL(lu_context_key_degister_many);
1564
1565 /**
1566  * Revive a number of keys.
1567  */
1568 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1569 {
1570         va_list args;
1571
1572         va_start(args, k);
1573         do {
1574                 lu_context_key_revive(k);
1575                 k = va_arg(args, struct lu_context_key*);
1576         } while (k != NULL);
1577         va_end(args);
1578 }
1579 EXPORT_SYMBOL(lu_context_key_revive_many);
1580
1581 /**
1582  * Quiescent a number of keys.
1583  */
1584 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1585 {
1586         va_list args;
1587
1588         va_start(args, k);
1589         do {
1590                 lu_context_key_quiesce(k);
1591                 k = va_arg(args, struct lu_context_key*);
1592         } while (k != NULL);
1593         va_end(args);
1594 }
1595 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1596
1597 /**
1598  * Return value associated with key \a key in context \a ctx.
1599  */
1600 void *lu_context_key_get(const struct lu_context *ctx,
1601                          const struct lu_context_key *key)
1602 {
1603         LINVRNT(ctx->lc_state == LCS_ENTERED);
1604         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1605         LASSERT(lu_keys[key->lct_index] == key);
1606         return ctx->lc_value[key->lct_index];
1607 }
1608 EXPORT_SYMBOL(lu_context_key_get);
1609
1610 /**
1611  * List of remembered contexts. XXX document me.
1612  */
1613 static LIST_HEAD(lu_context_remembered);
1614
1615 /**
1616  * Destroy \a key in all remembered contexts. This is used to destroy key
1617  * values in "shared" contexts (like service threads), when a module owning
1618  * the key is about to be unloaded.
1619  */
1620 void lu_context_key_quiesce(struct lu_context_key *key)
1621 {
1622         struct lu_context *ctx;
1623
1624         if (!(key->lct_tags & LCT_QUIESCENT)) {
1625                 /*
1626                  * XXX memory barrier has to go here.
1627                  */
1628                 write_lock(&lu_keys_guard);
1629                 key->lct_tags |= LCT_QUIESCENT;
1630
1631                 /**
1632                  * Wait until all lu_context_key::lct_init() methods
1633                  * have completed.
1634                  */
1635                 while (atomic_read(&lu_key_initing_cnt) > 0) {
1636                         write_unlock(&lu_keys_guard);
1637                         CDEBUG(D_INFO, "lu_context_key_quiesce: \"%s\""
1638                                " %p, %d (%d)\n",
1639                                key->lct_owner ? key->lct_owner->name : "",
1640                                key, atomic_read(&key->lct_used),
1641                                atomic_read(&lu_key_initing_cnt));
1642                         schedule();
1643                         write_lock(&lu_keys_guard);
1644                 }
1645
1646                 list_for_each_entry(ctx, &lu_context_remembered,
1647                                     lc_remember)
1648                         key_fini(ctx, key->lct_index);
1649
1650                 ++key_set_version;
1651                 write_unlock(&lu_keys_guard);
1652         }
1653 }
1654
1655 void lu_context_key_revive(struct lu_context_key *key)
1656 {
1657         write_lock(&lu_keys_guard);
1658         key->lct_tags &= ~LCT_QUIESCENT;
1659         ++key_set_version;
1660         write_unlock(&lu_keys_guard);
1661 }
1662
1663 static void keys_fini(struct lu_context *ctx)
1664 {
1665         unsigned int i;
1666
1667         if (ctx->lc_value == NULL)
1668                 return;
1669
1670         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1671                 key_fini(ctx, i);
1672
1673         OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1674         ctx->lc_value = NULL;
1675 }
1676
1677 static int keys_fill(struct lu_context *ctx)
1678 {
1679         unsigned int i;
1680         unsigned pre_version;
1681
1682         /*
1683          * A serialisation with lu_context_key_quiesce() is needed, but some
1684          * "key->lct_init()" are calling kernel memory allocation routine and
1685          * can't be called while holding a spin_lock.
1686          * "lu_keys_guard" is held while incrementing "lu_key_initing_cnt"
1687          * to ensure the start of the serialisation.
1688          * An atomic_t variable is still used, in order not to reacquire the
1689          * lock when decrementing the counter.
1690          */
1691         read_lock(&lu_keys_guard);
1692         atomic_inc(&lu_key_initing_cnt);
1693         pre_version = key_set_version;
1694         read_unlock(&lu_keys_guard);
1695
1696 refill:
1697         LINVRNT(ctx->lc_value != NULL);
1698         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1699                 struct lu_context_key *key;
1700
1701                 key = lu_keys[i];
1702                 if (ctx->lc_value[i] == NULL && key != NULL &&
1703                     (key->lct_tags & ctx->lc_tags) &&
1704                     /*
1705                      * Don't create values for a LCT_QUIESCENT key, as this
1706                      * will pin module owning a key.
1707                      */
1708                     !(key->lct_tags & LCT_QUIESCENT)) {
1709                         void *value;
1710
1711                         LINVRNT(key->lct_init != NULL);
1712                         LINVRNT(key->lct_index == i);
1713
1714                         LASSERT(key->lct_owner != NULL);
1715                         if (!(ctx->lc_tags & LCT_NOREF) &&
1716                             try_module_get(key->lct_owner) == 0) {
1717                                 /* module is unloading, skip this key */
1718                                 continue;
1719                         }
1720
1721                         value = key->lct_init(ctx, key);
1722                         if (unlikely(IS_ERR(value))) {
1723                                 atomic_dec(&lu_key_initing_cnt);
1724                                 return PTR_ERR(value);
1725                         }
1726
1727                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1728                         atomic_inc(&key->lct_used);
1729                         /*
1730                          * This is the only place in the code, where an
1731                          * element of ctx->lc_value[] array is set to non-NULL
1732                          * value.
1733                          */
1734                         ctx->lc_value[i] = value;
1735                         if (key->lct_exit != NULL)
1736                                 ctx->lc_tags |= LCT_HAS_EXIT;
1737                 }
1738         }
1739
1740         read_lock(&lu_keys_guard);
1741         if (pre_version != key_set_version) {
1742                 pre_version = key_set_version;
1743                 read_unlock(&lu_keys_guard);
1744                 goto refill;
1745         }
1746
1747         ctx->lc_version = key_set_version;
1748
1749         atomic_dec(&lu_key_initing_cnt);
1750         read_unlock(&lu_keys_guard);
1751         return 0;
1752 }
1753
1754 static int keys_init(struct lu_context *ctx)
1755 {
1756         OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1757         if (likely(ctx->lc_value != NULL))
1758                 return keys_fill(ctx);
1759
1760         return -ENOMEM;
1761 }
1762
1763 /**
1764  * Initialize context data-structure. Create values for all keys.
1765  */
1766 int lu_context_init(struct lu_context *ctx, __u32 tags)
1767 {
1768         int     rc;
1769
1770         memset(ctx, 0, sizeof *ctx);
1771         ctx->lc_state = LCS_INITIALIZED;
1772         ctx->lc_tags = tags;
1773         if (tags & LCT_REMEMBER) {
1774                 write_lock(&lu_keys_guard);
1775                 list_add(&ctx->lc_remember, &lu_context_remembered);
1776                 write_unlock(&lu_keys_guard);
1777         } else {
1778                 INIT_LIST_HEAD(&ctx->lc_remember);
1779         }
1780
1781         rc = keys_init(ctx);
1782         if (rc != 0)
1783                 lu_context_fini(ctx);
1784
1785         return rc;
1786 }
1787 EXPORT_SYMBOL(lu_context_init);
1788
1789 /**
1790  * Finalize context data-structure. Destroy key values.
1791  */
1792 void lu_context_fini(struct lu_context *ctx)
1793 {
1794         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1795         ctx->lc_state = LCS_FINALIZED;
1796
1797         if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1798                 LASSERT(list_empty(&ctx->lc_remember));
1799                 keys_fini(ctx);
1800
1801         } else { /* could race with key degister */
1802                 write_lock(&lu_keys_guard);
1803                 keys_fini(ctx);
1804                 list_del_init(&ctx->lc_remember);
1805                 write_unlock(&lu_keys_guard);
1806         }
1807 }
1808 EXPORT_SYMBOL(lu_context_fini);
1809
1810 /**
1811  * Called before entering context.
1812  */
1813 void lu_context_enter(struct lu_context *ctx)
1814 {
1815         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1816         ctx->lc_state = LCS_ENTERED;
1817 }
1818 EXPORT_SYMBOL(lu_context_enter);
1819
1820 /**
1821  * Called after exiting from \a ctx
1822  */
1823 void lu_context_exit(struct lu_context *ctx)
1824 {
1825         unsigned int i;
1826
1827         LINVRNT(ctx->lc_state == LCS_ENTERED);
1828         ctx->lc_state = LCS_LEFT;
1829         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1830                 /* could race with key quiescency */
1831                 if (ctx->lc_tags & LCT_REMEMBER)
1832                         read_lock(&lu_keys_guard);
1833
1834                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1835                         if (ctx->lc_value[i] != NULL) {
1836                                 struct lu_context_key *key;
1837
1838                                 key = lu_keys[i];
1839                                 LASSERT(key != NULL);
1840                                 if (key->lct_exit != NULL)
1841                                         key->lct_exit(ctx,
1842                                                       key, ctx->lc_value[i]);
1843                         }
1844                 }
1845
1846                 if (ctx->lc_tags & LCT_REMEMBER)
1847                         read_unlock(&lu_keys_guard);
1848         }
1849 }
1850 EXPORT_SYMBOL(lu_context_exit);
1851
1852 /**
1853  * Allocate for context all missing keys that were registered after context
1854  * creation. key_set_version is only changed in rare cases when modules
1855  * are loaded and removed.
1856  */
1857 int lu_context_refill(struct lu_context *ctx)
1858 {
1859         read_lock(&lu_keys_guard);
1860         if (likely(ctx->lc_version == key_set_version)) {
1861                 read_unlock(&lu_keys_guard);
1862                 return 0;
1863         }
1864
1865         read_unlock(&lu_keys_guard);
1866         return keys_fill(ctx);
1867 }
1868
1869 /**
1870  * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1871  * obd being added. Currently, this is only used on client side, specifically
1872  * for echo device client, for other stack (like ptlrpc threads), context are
1873  * predefined when the lu_device type are registered, during the module probe
1874  * phase.
1875  */
1876 __u32 lu_context_tags_default = 0;
1877 __u32 lu_session_tags_default = 0;
1878
1879 void lu_context_tags_update(__u32 tags)
1880 {
1881         write_lock(&lu_keys_guard);
1882         lu_context_tags_default |= tags;
1883         key_set_version++;
1884         write_unlock(&lu_keys_guard);
1885 }
1886 EXPORT_SYMBOL(lu_context_tags_update);
1887
1888 void lu_context_tags_clear(__u32 tags)
1889 {
1890         write_lock(&lu_keys_guard);
1891         lu_context_tags_default &= ~tags;
1892         key_set_version++;
1893         write_unlock(&lu_keys_guard);
1894 }
1895 EXPORT_SYMBOL(lu_context_tags_clear);
1896
1897 void lu_session_tags_update(__u32 tags)
1898 {
1899         write_lock(&lu_keys_guard);
1900         lu_session_tags_default |= tags;
1901         key_set_version++;
1902         write_unlock(&lu_keys_guard);
1903 }
1904 EXPORT_SYMBOL(lu_session_tags_update);
1905
1906 void lu_session_tags_clear(__u32 tags)
1907 {
1908         write_lock(&lu_keys_guard);
1909         lu_session_tags_default &= ~tags;
1910         key_set_version++;
1911         write_unlock(&lu_keys_guard);
1912 }
1913 EXPORT_SYMBOL(lu_session_tags_clear);
1914
1915 int lu_env_init(struct lu_env *env, __u32 tags)
1916 {
1917         int result;
1918
1919         env->le_ses = NULL;
1920         result = lu_context_init(&env->le_ctx, tags);
1921         if (likely(result == 0))
1922                 lu_context_enter(&env->le_ctx);
1923         return result;
1924 }
1925 EXPORT_SYMBOL(lu_env_init);
1926
1927 void lu_env_fini(struct lu_env *env)
1928 {
1929         lu_context_exit(&env->le_ctx);
1930         lu_context_fini(&env->le_ctx);
1931         env->le_ses = NULL;
1932 }
1933 EXPORT_SYMBOL(lu_env_fini);
1934
1935 int lu_env_refill(struct lu_env *env)
1936 {
1937         int result;
1938
1939         result = lu_context_refill(&env->le_ctx);
1940         if (result == 0 && env->le_ses != NULL)
1941                 result = lu_context_refill(env->le_ses);
1942         return result;
1943 }
1944 EXPORT_SYMBOL(lu_env_refill);
1945
1946 /**
1947  * Currently, this API will only be used by echo client.
1948  * Because echo client and normal lustre client will share
1949  * same cl_env cache. So echo client needs to refresh
1950  * the env context after it get one from the cache, especially
1951  * when normal client and echo client co-exist in the same client.
1952  */
1953 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1954                           __u32 stags)
1955 {
1956         int    result;
1957
1958         if ((env->le_ctx.lc_tags & ctags) != ctags) {
1959                 env->le_ctx.lc_version = 0;
1960                 env->le_ctx.lc_tags |= ctags;
1961         }
1962
1963         if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1964                 env->le_ses->lc_version = 0;
1965                 env->le_ses->lc_tags |= stags;
1966         }
1967
1968         result = lu_env_refill(env);
1969
1970         return result;
1971 }
1972 EXPORT_SYMBOL(lu_env_refill_by_tags);
1973
1974 #ifdef HAVE_SERVER_SUPPORT
1975 struct lu_env_item {
1976         struct task_struct *lei_task;   /* rhashtable key */
1977         struct rhash_head lei_linkage;
1978         struct lu_env *lei_env;
1979         struct rcu_head lei_rcu_head;
1980 };
1981
1982 static const struct rhashtable_params lu_env_rhash_params = {
1983         .key_len     = sizeof(struct task_struct *),
1984         .key_offset  = offsetof(struct lu_env_item, lei_task),
1985         .head_offset = offsetof(struct lu_env_item, lei_linkage),
1986 };
1987
1988 struct rhashtable lu_env_rhash;
1989
1990 struct lu_env_percpu {
1991         struct task_struct *lep_task;
1992         struct lu_env *lep_env ____cacheline_aligned_in_smp;
1993 };
1994
1995 static struct lu_env_percpu lu_env_percpu[NR_CPUS];
1996
1997 int lu_env_add(struct lu_env *env)
1998 {
1999         struct lu_env_item *lei, *old;
2000
2001         LASSERT(env);
2002
2003         OBD_ALLOC_PTR(lei);
2004         if (!lei)
2005                 return -ENOMEM;
2006
2007         lei->lei_task = current;
2008         lei->lei_env = env;
2009
2010         old = rhashtable_lookup_get_insert_fast(&lu_env_rhash,
2011                                                 &lei->lei_linkage,
2012                                                 lu_env_rhash_params);
2013         LASSERT(!old);
2014
2015         return 0;
2016 }
2017 EXPORT_SYMBOL(lu_env_add);
2018
2019 static void lu_env_item_free(struct rcu_head *head)
2020 {
2021         struct lu_env_item *lei;
2022
2023         lei = container_of(head, struct lu_env_item, lei_rcu_head);
2024         OBD_FREE_PTR(lei);
2025 }
2026
2027 void lu_env_remove(struct lu_env *env)
2028 {
2029         struct lu_env_item *lei;
2030         const void *task = current;
2031         int i;
2032
2033         for_each_possible_cpu(i) {
2034                 if (lu_env_percpu[i].lep_env == env) {
2035                         LASSERT(lu_env_percpu[i].lep_task == task);
2036                         lu_env_percpu[i].lep_task = NULL;
2037                         lu_env_percpu[i].lep_env = NULL;
2038                 }
2039         }
2040
2041         /* The rcu_lock is not taking in this case since the key
2042          * used is the actual task_struct. This implies that each
2043          * object is only removed by the owning thread, so there
2044          * can never be a race on a particular object.
2045          */
2046         lei = rhashtable_lookup_fast(&lu_env_rhash, &task,
2047                                      lu_env_rhash_params);
2048         if (lei && rhashtable_remove_fast(&lu_env_rhash, &lei->lei_linkage,
2049                                           lu_env_rhash_params) == 0)
2050                 call_rcu(&lei->lei_rcu_head, lu_env_item_free);
2051 }
2052 EXPORT_SYMBOL(lu_env_remove);
2053
2054 struct lu_env *lu_env_find(void)
2055 {
2056         struct lu_env *env = NULL;
2057         struct lu_env_item *lei;
2058         const void *task = current;
2059         int i = get_cpu();
2060
2061         if (lu_env_percpu[i].lep_task == current) {
2062                 env = lu_env_percpu[i].lep_env;
2063                 put_cpu();
2064                 LASSERT(env);
2065                 return env;
2066         }
2067
2068         lei = rhashtable_lookup_fast(&lu_env_rhash, &task,
2069                                      lu_env_rhash_params);
2070         if (lei) {
2071                 env = lei->lei_env;
2072                 lu_env_percpu[i].lep_task = current;
2073                 lu_env_percpu[i].lep_env = env;
2074         }
2075         put_cpu();
2076
2077         return env;
2078 }
2079 EXPORT_SYMBOL(lu_env_find);
2080 #define lu_env_rhash_init(rhash, params) rhashtable_init(rhash, params)
2081 #define lu_env_rhash_destroy(rhash)      rhashtable_destroy(rhash)
2082 #else
2083 #define lu_env_rhash_init(rhash, params) 0
2084 #define lu_env_rhash_destroy(rhash)      do {} while (0)
2085 #endif /* HAVE_SERVER_SUPPORT */
2086
2087 static struct shrinker *lu_site_shrinker;
2088
2089 typedef struct lu_site_stats{
2090         unsigned        lss_populated;
2091         unsigned        lss_max_search;
2092         unsigned        lss_total;
2093         unsigned        lss_busy;
2094 } lu_site_stats_t;
2095
2096 static void lu_site_stats_get(const struct lu_site *s,
2097                               lu_site_stats_t *stats, int populated)
2098 {
2099         struct cfs_hash *hs = s->ls_obj_hash;
2100         struct cfs_hash_bd bd;
2101         unsigned int i;
2102         /*
2103          * percpu_counter_sum_positive() won't accept a const pointer
2104          * as it does modify the struct by taking a spinlock
2105          */
2106         struct lu_site *s2 = (struct lu_site *)s;
2107
2108         stats->lss_busy += cfs_hash_size_get(hs) -
2109                 percpu_counter_sum_positive(&s2->ls_lru_len_counter);
2110         cfs_hash_for_each_bucket(hs, &bd, i) {
2111                 struct hlist_head *hhead;
2112
2113                 cfs_hash_bd_lock(hs, &bd, 1);
2114                 stats->lss_total += cfs_hash_bd_count_get(&bd);
2115                 stats->lss_max_search = max((int)stats->lss_max_search,
2116                                             cfs_hash_bd_depmax_get(&bd));
2117                 if (!populated) {
2118                         cfs_hash_bd_unlock(hs, &bd, 1);
2119                         continue;
2120                 }
2121
2122                 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
2123                         if (!hlist_empty(hhead))
2124                                 stats->lss_populated++;
2125                 }
2126                 cfs_hash_bd_unlock(hs, &bd, 1);
2127         }
2128 }
2129
2130
2131 /*
2132  * lu_cache_shrink_count() returns an approximate number of cached objects
2133  * that can be freed by shrink_slab(). A counter, which tracks the
2134  * number of items in the site's lru, is maintained in a percpu_counter
2135  * for each site. The percpu values are incremented and decremented as
2136  * objects are added or removed from the lru. The percpu values are summed
2137  * and saved whenever a percpu value exceeds a threshold. Thus the saved,
2138  * summed value at any given time may not accurately reflect the current
2139  * lru length. But this value is sufficiently accurate for the needs of
2140  * a shrinker.
2141  *
2142  * Using a per cpu counter is a compromise solution to concurrent access:
2143  * lu_object_put() can update the counter without locking the site and
2144  * lu_cache_shrink_count can sum the counters without locking each
2145  * ls_obj_hash bucket.
2146  */
2147 static unsigned long lu_cache_shrink_count(struct shrinker *sk,
2148                                            struct shrink_control *sc)
2149 {
2150         struct lu_site *s;
2151         struct lu_site *tmp;
2152         unsigned long cached = 0;
2153
2154         if (!(sc->gfp_mask & __GFP_FS))
2155                 return 0;
2156
2157         down_read(&lu_sites_guard);
2158         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage)
2159                 cached += percpu_counter_read_positive(&s->ls_lru_len_counter);
2160         up_read(&lu_sites_guard);
2161
2162         cached = (cached / 100) * sysctl_vfs_cache_pressure;
2163         CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n",
2164                cached, sysctl_vfs_cache_pressure);
2165
2166         return cached;
2167 }
2168
2169 static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
2170                                           struct shrink_control *sc)
2171 {
2172         struct lu_site *s;
2173         struct lu_site *tmp;
2174         unsigned long remain = sc->nr_to_scan;
2175         LIST_HEAD(splice);
2176
2177         if (!(sc->gfp_mask & __GFP_FS))
2178                 /* We must not take the lu_sites_guard lock when
2179                  * __GFP_FS is *not* set because of the deadlock
2180                  * possibility detailed above. Additionally,
2181                  * since we cannot determine the number of
2182                  * objects in the cache without taking this
2183                  * lock, we're in a particularly tough spot. As
2184                  * a result, we'll just lie and say our cache is
2185                  * empty. This _should_ be ok, as we can't
2186                  * reclaim objects when __GFP_FS is *not* set
2187                  * anyways.
2188                  */
2189                 return SHRINK_STOP;
2190
2191         down_write(&lu_sites_guard);
2192         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
2193                 remain = lu_site_purge(&lu_shrink_env, s, remain);
2194                 /*
2195                  * Move just shrunk site to the tail of site list to
2196                  * assure shrinking fairness.
2197                  */
2198                 list_move_tail(&s->ls_linkage, &splice);
2199         }
2200         list_splice(&splice, lu_sites.prev);
2201         up_write(&lu_sites_guard);
2202
2203         return sc->nr_to_scan - remain;
2204 }
2205
2206 #ifndef HAVE_SHRINKER_COUNT
2207 /*
2208  * There exists a potential lock inversion deadlock scenario when using
2209  * Lustre on top of ZFS. This occurs between one of ZFS's
2210  * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
2211  * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
2212  * while thread B will take the ht_lock and sleep on the lu_sites_guard
2213  * lock. Obviously neither thread will wake and drop their respective hold
2214  * on their lock.
2215  *
2216  * To prevent this from happening we must ensure the lu_sites_guard lock is
2217  * not taken while down this code path. ZFS reliably does not set the
2218  * __GFP_FS bit in its code paths, so this can be used to determine if it
2219  * is safe to take the lu_sites_guard lock.
2220  *
2221  * Ideally we should accurately return the remaining number of cached
2222  * objects without taking the lu_sites_guard lock, but this is not
2223  * possible in the current implementation.
2224  */
2225 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
2226 {
2227         int cached = 0;
2228         struct shrink_control scv = {
2229                  .nr_to_scan = shrink_param(sc, nr_to_scan),
2230                  .gfp_mask   = shrink_param(sc, gfp_mask)
2231         };
2232 #if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
2233         struct shrinker* shrinker = NULL;
2234 #endif
2235
2236
2237         CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
2238
2239         if (scv.nr_to_scan != 0)
2240                 lu_cache_shrink_scan(shrinker, &scv);
2241
2242         cached = lu_cache_shrink_count(shrinker, &scv);
2243         return cached;
2244 }
2245
2246 #endif /* HAVE_SHRINKER_COUNT */
2247
2248
2249 /*
2250  * Debugging stuff.
2251  */
2252
2253 /**
2254  * Environment to be used in debugger, contains all tags.
2255  */
2256 static struct lu_env lu_debugging_env;
2257
2258 /**
2259  * Debugging printer function using printk().
2260  */
2261 int lu_printk_printer(const struct lu_env *env,
2262                       void *unused, const char *format, ...)
2263 {
2264         va_list args;
2265
2266         va_start(args, format);
2267         vprintk(format, args);
2268         va_end(args);
2269         return 0;
2270 }
2271
2272 int lu_debugging_setup(void)
2273 {
2274         return lu_env_init(&lu_debugging_env, ~0);
2275 }
2276
2277 void lu_context_keys_dump(void)
2278 {
2279         unsigned int i;
2280
2281         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
2282                 struct lu_context_key *key;
2283
2284                 key = lu_keys[i];
2285                 if (key != NULL) {
2286                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
2287                                i, key, key->lct_tags,
2288                                key->lct_init, key->lct_fini, key->lct_exit,
2289                                key->lct_index, atomic_read(&key->lct_used),
2290                                key->lct_owner ? key->lct_owner->name : "",
2291                                key->lct_owner);
2292                         lu_ref_print(&key->lct_reference);
2293                 }
2294         }
2295 }
2296
2297 /**
2298  * Initialization of global lu_* data.
2299  */
2300 int lu_global_init(void)
2301 {
2302         int result;
2303         DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
2304                          lu_cache_shrink_count, lu_cache_shrink_scan);
2305
2306         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
2307
2308         result = lu_ref_global_init();
2309         if (result != 0)
2310                 return result;
2311
2312         LU_CONTEXT_KEY_INIT(&lu_global_key);
2313         result = lu_context_key_register(&lu_global_key);
2314         if (result != 0)
2315                 return result;
2316
2317         /*
2318          * At this level, we don't know what tags are needed, so allocate them
2319          * conservatively. This should not be too bad, because this
2320          * environment is global.
2321          */
2322         down_write(&lu_sites_guard);
2323         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
2324         up_write(&lu_sites_guard);
2325         if (result != 0)
2326                 return result;
2327
2328         /*
2329          * seeks estimation: 3 seeks to read a record from oi, one to read
2330          * inode, one for ea. Unfortunately setting this high value results in
2331          * lu_object/inode cache consuming all the memory.
2332          */
2333         lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
2334         if (lu_site_shrinker == NULL)
2335                 return -ENOMEM;
2336
2337         result = lu_env_rhash_init(&lu_env_rhash, &lu_env_rhash_params);
2338
2339         return result;
2340 }
2341
2342 /**
2343  * Dual to lu_global_init().
2344  */
2345 void lu_global_fini(void)
2346 {
2347         if (lu_site_shrinker != NULL) {
2348                 remove_shrinker(lu_site_shrinker);
2349                 lu_site_shrinker = NULL;
2350         }
2351
2352         lu_context_key_degister(&lu_global_key);
2353
2354         /*
2355          * Tear shrinker environment down _after_ de-registering
2356          * lu_global_key, because the latter has a value in the former.
2357          */
2358         down_write(&lu_sites_guard);
2359         lu_env_fini(&lu_shrink_env);
2360         up_write(&lu_sites_guard);
2361
2362         lu_env_rhash_destroy(&lu_env_rhash);
2363
2364         lu_ref_global_fini();
2365 }
2366
2367 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
2368 {
2369 #ifdef CONFIG_PROC_FS
2370         struct lprocfs_counter ret;
2371
2372         lprocfs_stats_collect(stats, idx, &ret);
2373         return (__u32)ret.lc_count;
2374 #else
2375         return 0;
2376 #endif
2377 }
2378
2379 /**
2380  * Output site statistical counters into a buffer. Suitable for
2381  * lprocfs_rd_*()-style functions.
2382  */
2383 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
2384 {
2385         lu_site_stats_t stats;
2386
2387         memset(&stats, 0, sizeof(stats));
2388         lu_site_stats_get(s, &stats, 1);
2389
2390         seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2391                    stats.lss_busy,
2392                    stats.lss_total,
2393                    stats.lss_populated,
2394                    CFS_HASH_NHLIST(s->ls_obj_hash),
2395                    stats.lss_max_search,
2396                    ls_stats_read(s->ls_stats, LU_SS_CREATED),
2397                    ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2398                    ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2399                    ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2400                    ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2401                    ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2402         return 0;
2403 }
2404 EXPORT_SYMBOL(lu_site_stats_seq_print);
2405
2406 /**
2407  * Helper function to initialize a number of kmem slab caches at once.
2408  */
2409 int lu_kmem_init(struct lu_kmem_descr *caches)
2410 {
2411         int result;
2412         struct lu_kmem_descr *iter = caches;
2413
2414         for (result = 0; iter->ckd_cache != NULL; ++iter) {
2415                 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2416                                                      iter->ckd_size,
2417                                                      0, 0, NULL);
2418                 if (*iter->ckd_cache == NULL) {
2419                         result = -ENOMEM;
2420                         /* free all previously allocated caches */
2421                         lu_kmem_fini(caches);
2422                         break;
2423                 }
2424         }
2425         return result;
2426 }
2427 EXPORT_SYMBOL(lu_kmem_init);
2428
2429 /**
2430  * Helper function to finalize a number of kmem slab cached at once. Dual to
2431  * lu_kmem_init().
2432  */
2433 void lu_kmem_fini(struct lu_kmem_descr *caches)
2434 {
2435         for (; caches->ckd_cache != NULL; ++caches) {
2436                 if (*caches->ckd_cache != NULL) {
2437                         kmem_cache_destroy(*caches->ckd_cache);
2438                         *caches->ckd_cache = NULL;
2439                 }
2440         }
2441 }
2442 EXPORT_SYMBOL(lu_kmem_fini);
2443
2444 /**
2445  * Temporary solution to be able to assign fid in ->do_create()
2446  * till we have fully-functional OST fids
2447  */
2448 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2449                           const struct lu_fid *fid)
2450 {
2451         struct lu_site          *s = o->lo_dev->ld_site;
2452         struct lu_fid           *old = &o->lo_header->loh_fid;
2453         struct cfs_hash         *hs;
2454         struct cfs_hash_bd       bd;
2455
2456         LASSERT(fid_is_zero(old));
2457
2458         /* supposed to be unique */
2459         hs = s->ls_obj_hash;
2460         cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2461 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
2462         {
2463                 __u64 version = 0;
2464                 struct lu_object *shadow;
2465
2466                 shadow = htable_lookup(s, &bd, fid, &version);
2467                 /* supposed to be unique */
2468                 LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
2469         }
2470 #endif
2471         *old = *fid;
2472         cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2473         cfs_hash_bd_unlock(hs, &bd, 1);
2474 }
2475 EXPORT_SYMBOL(lu_object_assign_fid);
2476
2477 /**
2478  * allocates object with 0 (non-assiged) fid
2479  * XXX: temporary solution to be able to assign fid in ->do_create()
2480  *      till we have fully-functional OST fids
2481  */
2482 struct lu_object *lu_object_anon(const struct lu_env *env,
2483                                  struct lu_device *dev,
2484                                  const struct lu_object_conf *conf)
2485 {
2486         struct lu_fid fid;
2487         struct lu_object *o;
2488         int rc;
2489
2490         fid_zero(&fid);
2491         o = lu_object_alloc(env, dev, &fid);
2492         if (!IS_ERR(o)) {
2493                 rc = lu_object_start(env, dev, o, conf);
2494                 if (rc) {
2495                         lu_object_free(env, o);
2496                         return ERR_PTR(rc);
2497                 }
2498         }
2499
2500         return o;
2501 }
2502 EXPORT_SYMBOL(lu_object_anon);
2503
2504 struct lu_buf LU_BUF_NULL = {
2505         .lb_buf = NULL,
2506         .lb_len = 0
2507 };
2508 EXPORT_SYMBOL(LU_BUF_NULL);
2509
2510 void lu_buf_free(struct lu_buf *buf)
2511 {
2512         LASSERT(buf);
2513         if (buf->lb_buf) {
2514                 LASSERT(buf->lb_len > 0);
2515                 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2516                 buf->lb_buf = NULL;
2517                 buf->lb_len = 0;
2518         }
2519 }
2520 EXPORT_SYMBOL(lu_buf_free);
2521
2522 void lu_buf_alloc(struct lu_buf *buf, size_t size)
2523 {
2524         LASSERT(buf);
2525         LASSERT(buf->lb_buf == NULL);
2526         LASSERT(buf->lb_len == 0);
2527         OBD_ALLOC_LARGE(buf->lb_buf, size);
2528         if (likely(buf->lb_buf))
2529                 buf->lb_len = size;
2530 }
2531 EXPORT_SYMBOL(lu_buf_alloc);
2532
2533 void lu_buf_realloc(struct lu_buf *buf, size_t size)
2534 {
2535         lu_buf_free(buf);
2536         lu_buf_alloc(buf, size);
2537 }
2538 EXPORT_SYMBOL(lu_buf_realloc);
2539
2540 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len)
2541 {
2542         if (buf->lb_buf == NULL && buf->lb_len == 0)
2543                 lu_buf_alloc(buf, len);
2544
2545         if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2546                 lu_buf_realloc(buf, len);
2547
2548         return buf;
2549 }
2550 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2551
2552 /**
2553  * Increase the size of the \a buf.
2554  * preserves old data in buffer
2555  * old buffer remains unchanged on error
2556  * \retval 0 or -ENOMEM
2557  */
2558 int lu_buf_check_and_grow(struct lu_buf *buf, size_t len)
2559 {
2560         char *ptr;
2561
2562         if (len <= buf->lb_len)
2563                 return 0;
2564
2565         OBD_ALLOC_LARGE(ptr, len);
2566         if (ptr == NULL)
2567                 return -ENOMEM;
2568
2569         /* Free the old buf */
2570         if (buf->lb_buf != NULL) {
2571                 memcpy(ptr, buf->lb_buf, buf->lb_len);
2572                 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2573         }
2574
2575         buf->lb_buf = ptr;
2576         buf->lb_len = len;
2577         return 0;
2578 }
2579 EXPORT_SYMBOL(lu_buf_check_and_grow);