Whamcloud - gitweb
LU-13511 obdclass: don't initialize obj for zero FID
[fs/lustre-release.git] / lustre / obdclass / lu_object.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/obdclass/lu_object.c
33  *
34  * Lustre Object.
35  * These are the only exported functions, they provide some generic
36  * infrastructure for managing object devices
37  *
38  *   Author: Nikita Danilov <nikita.danilov@sun.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_CLASS
42
43 #include <linux/delay.h>
44 #include <linux/module.h>
45 #include <linux/list.h>
46 #include <linux/processor.h>
47 #include <linux/random.h>
48
49 #include <libcfs/libcfs.h>
50 #include <libcfs/linux/linux-mem.h>
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lustre_disk.h>
54 #include <lustre_fid.h>
55 #include <lu_object.h>
56 #include <lu_ref.h>
57
58 struct lu_site_bkt_data {
59         /**
60          * LRU list, updated on each access to object. Protected by
61          * lsb_waitq.lock.
62          *
63          * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
64          * moved to the lu_site::ls_lru.prev
65          */
66         struct list_head                lsb_lru;
67         /**
68          * Wait-queue signaled when an object in this site is ultimately
69          * destroyed (lu_object_free()) or initialized (lu_object_start()).
70          * It is used by lu_object_find() to wait before re-trying when
71          * object in the process of destruction is found in the hash table;
72          * or wait object to be initialized by the allocator.
73          *
74          * \see htable_lookup().
75          */
76         wait_queue_head_t               lsb_waitq;
77 };
78
79 enum {
80         LU_CACHE_PERCENT_MAX     = 50,
81         LU_CACHE_PERCENT_DEFAULT = 20
82 };
83
84 #define LU_CACHE_NR_MAX_ADJUST          512
85 #define LU_CACHE_NR_UNLIMITED           -1
86 #define LU_CACHE_NR_DEFAULT             LU_CACHE_NR_UNLIMITED
87 /** This is set to roughly (20 * OSS_NTHRS_MAX) to prevent thrashing */
88 #define LU_CACHE_NR_ZFS_LIMIT           10240
89
90 #define LU_CACHE_NR_MIN                 4096
91 #define LU_CACHE_NR_MAX                 0x80000000UL
92
93 /**
94  * Max 256 buckets, we don't want too many buckets because:
95  * - consume too much memory (currently max 16K)
96  * - avoid unbalanced LRU list
97  * With few cpus there is little gain from extra buckets, so
98  * we treat this as a maximum in lu_site_init().
99  */
100 #define LU_SITE_BKT_BITS    8
101
102 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
103 module_param(lu_cache_percent, int, 0644);
104 MODULE_PARM_DESC(lu_cache_percent, "Percentage of memory to be used as lu_object cache");
105
106 static long lu_cache_nr = LU_CACHE_NR_DEFAULT;
107 module_param(lu_cache_nr, long, 0644);
108 MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache");
109
110 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
111 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
112
113 static u32 lu_fid_hash(const void *data, u32 len, u32 seed)
114 {
115         const struct lu_fid *fid = data;
116
117         seed = cfs_hash_32(seed ^ fid->f_oid, 32);
118         seed ^= cfs_hash_64(fid->f_seq, 32);
119         return seed;
120 }
121
122 static const struct rhashtable_params obj_hash_params = {
123         .key_len        = sizeof(struct lu_fid),
124         .key_offset     = offsetof(struct lu_object_header, loh_fid),
125         .head_offset    = offsetof(struct lu_object_header, loh_hash),
126         .hashfn         = lu_fid_hash,
127         .automatic_shrinking = true,
128 };
129
130 static inline int lu_bkt_hash(struct lu_site *s, const struct lu_fid *fid)
131 {
132         return lu_fid_hash(fid, sizeof(*fid), s->ls_bkt_seed) &
133                (s->ls_bkt_cnt - 1);
134 }
135
136 wait_queue_head_t *
137 lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid)
138 {
139         struct lu_site_bkt_data *bkt;
140
141         bkt = &site->ls_bkts[lu_bkt_hash(site, fid)];
142         return &bkt->lsb_waitq;
143 }
144 EXPORT_SYMBOL(lu_site_wq_from_fid);
145
146 /**
147  * Decrease reference counter on object. If last reference is freed, return
148  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
149  * case, free object immediately.
150  */
151 void lu_object_put(const struct lu_env *env, struct lu_object *o)
152 {
153         struct lu_site_bkt_data *bkt;
154         struct lu_object_header *top = o->lo_header;
155         struct lu_site *site = o->lo_dev->ld_site;
156         struct lu_object *orig = o;
157         const struct lu_fid *fid = lu_object_fid(o);
158
159         /*
160          * till we have full fids-on-OST implemented anonymous objects
161          * are possible in OSP. such an object isn't listed in the site
162          * so we should not remove it from the site.
163          */
164         if (fid_is_zero(fid)) {
165                 LASSERT(list_empty(&top->loh_lru));
166                 if (!atomic_dec_and_test(&top->loh_ref))
167                         return;
168                 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
169                         if (o->lo_ops->loo_object_release != NULL)
170                                 o->lo_ops->loo_object_release(env, o);
171                 }
172                 lu_object_free(env, orig);
173                 return;
174         }
175
176         bkt = &site->ls_bkts[lu_bkt_hash(site, &top->loh_fid)];
177         if (atomic_add_unless(&top->loh_ref, -1, 1)) {
178 still_active:
179                 /*
180                  * At this point the object reference is dropped and lock is
181                  * not taken, so lu_object should not be touched because it
182                  * can be freed by concurrent thread.
183                  *
184                  * Somebody may be waiting for this, currently only used for
185                  * cl_object, see cl_object_put_last().
186                  */
187                 wake_up(&bkt->lsb_waitq);
188
189                 return;
190         }
191
192         spin_lock(&bkt->lsb_waitq.lock);
193         if (!atomic_dec_and_test(&top->loh_ref)) {
194                 spin_unlock(&bkt->lsb_waitq.lock);
195                 goto still_active;
196         }
197
198         /*
199          * Refcount is zero, and cannot be incremented without taking the bkt
200          * lock, so object is stable.
201          */
202
203         /*
204          * When last reference is released, iterate over object layers, and
205          * notify them that object is no longer busy.
206          */
207         list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
208                 if (o->lo_ops->loo_object_release != NULL)
209                         o->lo_ops->loo_object_release(env, o);
210         }
211
212         /*
213          * Don't use local 'is_dying' here because if was taken without lock but
214          * here we need the latest actual value of it so check lu_object
215          * directly here.
216          */
217         if (!lu_object_is_dying(top) &&
218             (lu_object_exists(orig) || lu_object_is_cl(orig))) {
219                 LASSERT(list_empty(&top->loh_lru));
220                 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
221                 spin_unlock(&bkt->lsb_waitq.lock);
222                 percpu_counter_inc(&site->ls_lru_len_counter);
223                 CDEBUG(D_INODE, "Add %p/%p to site lru. bkt: %p\n",
224                        orig, top, bkt);
225                 return;
226         }
227
228         /*
229          * If object is dying (will not be cached) then remove it from hash
230          * table (it is already not on the LRU).
231          *
232          * This is done with bucket lock held.  As the only way to acquire first
233          * reference to previously unreferenced object is through hash-table
234          * lookup (lu_object_find()) which takes the lock for first reference,
235          * no race with concurrent object lookup is possible and we can safely
236          * destroy object below.
237          */
238         if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
239                 rhashtable_remove_fast(&site->ls_obj_hash, &top->loh_hash,
240                                        obj_hash_params);
241
242         spin_unlock(&bkt->lsb_waitq.lock);
243         /* Object was already removed from hash above, can kill it. */
244         lu_object_free(env, orig);
245 }
246 EXPORT_SYMBOL(lu_object_put);
247
248 /**
249  * Put object and don't keep in cache. This is temporary solution for
250  * multi-site objects when its layering is not constant.
251  */
252 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
253 {
254         set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
255         return lu_object_put(env, o);
256 }
257 EXPORT_SYMBOL(lu_object_put_nocache);
258
259 /**
260  * Kill the object and take it out of LRU cache.
261  * Currently used by client code for layout change.
262  */
263 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
264 {
265         struct lu_object_header *top;
266
267         top = o->lo_header;
268         set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
269         if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
270                 struct lu_site *site = o->lo_dev->ld_site;
271                 struct rhashtable *obj_hash = &site->ls_obj_hash;
272                 struct lu_site_bkt_data *bkt;
273
274                 bkt = &site->ls_bkts[lu_bkt_hash(site, &top->loh_fid)];
275                 spin_lock(&bkt->lsb_waitq.lock);
276                 if (!list_empty(&top->loh_lru)) {
277                         list_del_init(&top->loh_lru);
278                         percpu_counter_dec(&site->ls_lru_len_counter);
279                 }
280                 spin_unlock(&bkt->lsb_waitq.lock);
281
282                 rhashtable_remove_fast(obj_hash, &top->loh_hash,
283                                        obj_hash_params);
284         }
285 }
286 EXPORT_SYMBOL(lu_object_unhash);
287
288 /**
289  * Allocate new object.
290  *
291  * This follows object creation protocol, described in the comment within
292  * struct lu_device_operations definition.
293  */
294 static struct lu_object *lu_object_alloc(const struct lu_env *env,
295                                          struct lu_device *dev,
296                                          const struct lu_fid *f)
297 {
298         struct lu_object *top;
299
300         /*
301          * Create top-level object slice. This will also create
302          * lu_object_header.
303          */
304         top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
305         if (top == NULL)
306                 return ERR_PTR(-ENOMEM);
307         if (IS_ERR(top))
308                 return top;
309         /*
310          * This is the only place where object fid is assigned. It's constant
311          * after this point.
312          */
313         top->lo_header->loh_fid = *f;
314
315         return top;
316 }
317
318 /**
319  * Initialize object.
320  *
321  * This is called after object hash insertion to avoid returning an object with
322  * stale attributes.
323  */
324 static int lu_object_start(const struct lu_env *env, struct lu_device *dev,
325                            struct lu_object *top,
326                            const struct lu_object_conf *conf)
327 {
328         struct lu_object *scan;
329         struct list_head *layers;
330         unsigned int init_mask = 0;
331         unsigned int init_flag;
332         int clean;
333         int result;
334
335         layers = &top->lo_header->loh_layers;
336
337         do {
338                 /*
339                  * Call ->loo_object_init() repeatedly, until no more new
340                  * object slices are created.
341                  */
342                 clean = 1;
343                 init_flag = 1;
344                 list_for_each_entry(scan, layers, lo_linkage) {
345                         if (init_mask & init_flag)
346                                 goto next;
347                         clean = 0;
348                         scan->lo_header = top->lo_header;
349                         result = scan->lo_ops->loo_object_init(env, scan, conf);
350                         if (result)
351                                 return result;
352
353                         init_mask |= init_flag;
354 next:
355                         init_flag <<= 1;
356                 }
357         } while (!clean);
358
359         list_for_each_entry_reverse(scan, layers, lo_linkage) {
360                 if (scan->lo_ops->loo_object_start != NULL) {
361                         result = scan->lo_ops->loo_object_start(env, scan);
362                         if (result)
363                                 return result;
364                 }
365         }
366
367         lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
368
369         set_bit(LU_OBJECT_INITED, &top->lo_header->loh_flags);
370
371         return 0;
372 }
373
374 /**
375  * Free an object.
376  */
377 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
378 {
379         wait_queue_head_t *wq;
380         struct lu_site *site;
381         struct lu_object *scan;
382         struct list_head *layers;
383         LIST_HEAD(splice);
384
385         site = o->lo_dev->ld_site;
386         layers = &o->lo_header->loh_layers;
387         wq = lu_site_wq_from_fid(site, &o->lo_header->loh_fid);
388         /*
389          * First call ->loo_object_delete() method to release all resources.
390          */
391         list_for_each_entry_reverse(scan, layers, lo_linkage) {
392                 if (scan->lo_ops->loo_object_delete != NULL)
393                         scan->lo_ops->loo_object_delete(env, scan);
394         }
395
396         /*
397          * Then, splice object layers into stand-alone list, and call
398          * ->loo_object_free() on all layers to free memory. Splice is
399          * necessary, because lu_object_header is freed together with the
400          * top-level slice.
401          */
402         list_splice_init(layers, &splice);
403         while (!list_empty(&splice)) {
404                 /*
405                  * Free layers in bottom-to-top order, so that object header
406                  * lives as long as possible and ->loo_object_free() methods
407                  * can look at its contents.
408                  */
409                 o = container_of(splice.prev, struct lu_object, lo_linkage);
410                 list_del_init(&o->lo_linkage);
411                 LASSERT(o->lo_ops->loo_object_free != NULL);
412                 o->lo_ops->loo_object_free(env, o);
413         }
414
415         if (waitqueue_active(wq))
416                 wake_up_all(wq);
417 }
418
419 /**
420  * Free \a nr objects from the cold end of the site LRU list.
421  * if canblock is 0, then don't block awaiting for another
422  * instance of lu_site_purge() to complete
423  */
424 int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s,
425                           int nr, int canblock)
426 {
427         struct lu_object_header *h;
428         struct lu_object_header *temp;
429         struct lu_site_bkt_data *bkt;
430         LIST_HEAD(dispose);
431         int                      did_sth;
432         unsigned int             start = 0;
433         int                      count;
434         int                      bnr;
435         unsigned int             i;
436
437         if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
438                 RETURN(0);
439
440         /*
441          * Under LRU list lock, scan LRU list and move unreferenced objects to
442          * the dispose list, removing them from LRU and hash table.
443          */
444         if (nr != ~0)
445                 start = s->ls_purge_start;
446         bnr = (nr == ~0) ? -1 : nr / s->ls_bkt_cnt + 1;
447 again:
448         /*
449          * It doesn't make any sense to make purge threads parallel, that can
450          * only bring troubles to us.  See LU-5331.
451          */
452         if (canblock != 0)
453                 mutex_lock(&s->ls_purge_mutex);
454         else if (mutex_trylock(&s->ls_purge_mutex) == 0)
455                 goto out;
456
457         did_sth = 0;
458         for (i = start; i < s->ls_bkt_cnt ; i++) {
459                 count = bnr;
460                 bkt = &s->ls_bkts[i];
461                 spin_lock(&bkt->lsb_waitq.lock);
462
463                 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
464                         LASSERT(atomic_read(&h->loh_ref) == 0);
465
466                         LINVRNT(lu_bkt_hash(s, &h->loh_fid) == i);
467
468                         set_bit(LU_OBJECT_UNHASHED, &h->loh_flags);
469                         rhashtable_remove_fast(&s->ls_obj_hash, &h->loh_hash,
470                                                obj_hash_params);
471                         list_move(&h->loh_lru, &dispose);
472                         percpu_counter_dec(&s->ls_lru_len_counter);
473                         if (did_sth == 0)
474                                 did_sth = 1;
475
476                         if (nr != ~0 && --nr == 0)
477                                 break;
478
479                         if (count > 0 && --count == 0)
480                                 break;
481
482                 }
483                 spin_unlock(&bkt->lsb_waitq.lock);
484                 cond_resched();
485                 /*
486                  * Free everything on the dispose list. This is safe against
487                  * races due to the reasons described in lu_object_put().
488                  */
489                 while ((h = list_first_entry_or_null(&dispose,
490                                                      struct lu_object_header,
491                                                      loh_lru)) != NULL) {
492                         list_del_init(&h->loh_lru);
493                         lu_object_free(env, lu_object_top(h));
494                         lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
495                 }
496
497                 if (nr == 0)
498                         break;
499         }
500         mutex_unlock(&s->ls_purge_mutex);
501
502         if (nr != 0 && did_sth && start != 0) {
503                 start = 0; /* restart from the first bucket */
504                 goto again;
505         }
506         /* race on s->ls_purge_start, but nobody cares */
507         s->ls_purge_start = i & (s->ls_bkt_cnt - 1);
508 out:
509         return nr;
510 }
511 EXPORT_SYMBOL(lu_site_purge_objects);
512
513 /*
514  * Object printing.
515  *
516  * Code below has to jump through certain loops to output object description
517  * into libcfs_debug_msg-based log. The problem is that lu_object_print()
518  * composes object description from strings that are parts of _lines_ of
519  * output (i.e., strings that are not terminated by newline). This doesn't fit
520  * very well into libcfs_debug_msg() interface that assumes that each message
521  * supplied to it is a self-contained output line.
522  *
523  * To work around this, strings are collected in a temporary buffer
524  * (implemented as a value of lu_cdebug_key key), until terminating newline
525  * character is detected.
526  *
527  */
528
529 enum {
530         /**
531          * Maximal line size.
532          *
533          * XXX overflow is not handled correctly.
534          */
535         LU_CDEBUG_LINE = 512
536 };
537
538 struct lu_cdebug_data {
539         /**
540          * Temporary buffer.
541          */
542         char lck_area[LU_CDEBUG_LINE];
543 };
544
545 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
546 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
547
548 /**
549  * Key, holding temporary buffer. This key is registered very early by
550  * lu_global_init().
551  */
552 static struct lu_context_key lu_global_key = {
553         .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
554                     LCT_MG_THREAD | LCT_CL_THREAD | LCT_LOCAL,
555         .lct_init = lu_global_key_init,
556         .lct_fini = lu_global_key_fini
557 };
558
559 /**
560  * Printer function emitting messages through libcfs_debug_msg().
561  */
562 int lu_cdebug_printer(const struct lu_env *env,
563                       void *cookie, const char *format, ...)
564 {
565         struct libcfs_debug_msg_data *msgdata = cookie;
566         struct lu_cdebug_data        *key;
567         int used;
568         int complete;
569         va_list args;
570
571         va_start(args, format);
572
573         key = lu_context_key_get(&env->le_ctx, &lu_global_key);
574         LASSERT(key != NULL);
575
576         used = strlen(key->lck_area);
577         complete = format[strlen(format) - 1] == '\n';
578         /*
579          * Append new chunk to the buffer.
580          */
581         vsnprintf(key->lck_area + used,
582                   ARRAY_SIZE(key->lck_area) - used, format, args);
583         if (complete) {
584                 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
585                         libcfs_debug_msg(msgdata, "%s\n", key->lck_area);
586                 key->lck_area[0] = 0;
587         }
588         va_end(args);
589         return 0;
590 }
591 EXPORT_SYMBOL(lu_cdebug_printer);
592
593 /**
594  * Print object header.
595  */
596 void lu_object_header_print(const struct lu_env *env, void *cookie,
597                             lu_printer_t printer,
598                             const struct lu_object_header *hdr)
599 {
600         (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
601                    hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
602                    PFID(&hdr->loh_fid),
603                    test_bit(LU_OBJECT_UNHASHED,
604                             &hdr->loh_flags) ? "" : " hash",
605                    list_empty(&hdr->loh_lru) ? "" : " lru",
606                    hdr->loh_attr & LOHA_EXISTS ? " exist" : "");
607 }
608 EXPORT_SYMBOL(lu_object_header_print);
609
610 /**
611  * Print human readable representation of the \a o to the \a printer.
612  */
613 void lu_object_print(const struct lu_env *env, void *cookie,
614                      lu_printer_t printer, const struct lu_object *o)
615 {
616         static const char ruler[] = "........................................";
617         struct lu_object_header *top;
618         int depth = 4;
619
620         top = o->lo_header;
621         lu_object_header_print(env, cookie, printer, top);
622         (*printer)(env, cookie, "{\n");
623
624         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
625                 /*
626                  * print `.' \a depth times followed by type name and address
627                  */
628                 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
629                            o->lo_dev->ld_type->ldt_name, o);
630
631                 if (o->lo_ops->loo_object_print != NULL)
632                         (*o->lo_ops->loo_object_print)(env, cookie, printer, o);
633
634                 (*printer)(env, cookie, "\n");
635         }
636
637         (*printer)(env, cookie, "} header@%p\n", top);
638 }
639 EXPORT_SYMBOL(lu_object_print);
640
641 /**
642  * Check object consistency.
643  */
644 int lu_object_invariant(const struct lu_object *o)
645 {
646         struct lu_object_header *top;
647
648         top = o->lo_header;
649         list_for_each_entry(o, &top->loh_layers, lo_linkage) {
650                 if (o->lo_ops->loo_object_invariant != NULL &&
651                     !o->lo_ops->loo_object_invariant(o))
652                         return 0;
653         }
654         return 1;
655 }
656
657 /*
658  * Limit the lu_object cache to a maximum of lu_cache_nr objects.  Because the
659  * calculation for the number of objects to reclaim is not covered by a lock the
660  * maximum number of objects is capped by LU_CACHE_MAX_ADJUST.  This ensures
661  * that many concurrent threads will not accidentally purge the entire cache.
662  */
663 static void lu_object_limit(const struct lu_env *env,
664                             struct lu_device *dev)
665 {
666         u64 size, nr;
667
668         if (lu_cache_nr == LU_CACHE_NR_UNLIMITED)
669                 return;
670
671         size = atomic_read(&dev->ld_site->ls_obj_hash.nelems);
672         nr = (u64)lu_cache_nr;
673         if (size <= nr)
674                 return;
675
676         lu_site_purge_objects(env, dev->ld_site,
677                               min_t(u64, size - nr, LU_CACHE_NR_MAX_ADJUST),
678                               0);
679 }
680
681 static struct lu_object *htable_lookup(const struct lu_env *env,
682                                        struct lu_device *dev,
683                                        struct lu_site_bkt_data *bkt,
684                                        const struct lu_fid *f,
685                                        struct lu_object_header *new)
686 {
687         struct lu_site *s = dev->ld_site;
688         struct lu_object_header *h;
689
690 try_again:
691         rcu_read_lock();
692         if (new)
693                 h = rhashtable_lookup_get_insert_fast(&s->ls_obj_hash,
694                                                       &new->loh_hash,
695                                                       obj_hash_params);
696         else
697                 h = rhashtable_lookup(&s->ls_obj_hash, f, obj_hash_params);
698
699         if (IS_ERR_OR_NULL(h)) {
700                 /* Not found */
701                 if (!new)
702                         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
703                 rcu_read_unlock();
704                 if (PTR_ERR(h) == -ENOMEM) {
705                         msleep(20);
706                         goto try_again;
707                 }
708                 lu_object_limit(env, dev);
709                 if (PTR_ERR(h) == -E2BIG)
710                         goto try_again;
711
712                 return ERR_PTR(-ENOENT);
713         }
714
715         if (atomic_inc_not_zero(&h->loh_ref)) {
716                 rcu_read_unlock();
717                 return lu_object_top(h);
718         }
719
720         spin_lock(&bkt->lsb_waitq.lock);
721         if (lu_object_is_dying(h) ||
722             test_bit(LU_OBJECT_UNHASHED, &h->loh_flags)) {
723                 spin_unlock(&bkt->lsb_waitq.lock);
724                 rcu_read_unlock();
725                 if (new) {
726                         /*
727                          * Old object might have already been removed, or will
728                          * be soon.  We need to insert our new object, so
729                          * remove the old one just in case it is still there.
730                          */
731                         rhashtable_remove_fast(&s->ls_obj_hash, &h->loh_hash,
732                                                obj_hash_params);
733                         goto try_again;
734                 }
735                 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
736                 return ERR_PTR(-ENOENT);
737         }
738         /* Now protected by spinlock */
739         rcu_read_unlock();
740
741         if (!list_empty(&h->loh_lru)) {
742                 list_del_init(&h->loh_lru);
743                 percpu_counter_dec(&s->ls_lru_len_counter);
744         }
745         atomic_inc(&h->loh_ref);
746         spin_unlock(&bkt->lsb_waitq.lock);
747         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
748         return lu_object_top(h);
749 }
750
751 /**
752  * Search cache for an object with the fid \a f. If such object is found,
753  * return it. Otherwise, create new object, insert it into cache and return
754  * it. In any case, additional reference is acquired on the returned object.
755  */
756 struct lu_object *lu_object_find(const struct lu_env *env,
757                                  struct lu_device *dev, const struct lu_fid *f,
758                                  const struct lu_object_conf *conf)
759 {
760         return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
761 }
762 EXPORT_SYMBOL(lu_object_find);
763
764 /*
765  * Get a 'first' reference to an object that was found while looking through the
766  * hash table.
767  */
768 struct lu_object *lu_object_get_first(struct lu_object_header *h,
769                                       struct lu_device *dev)
770 {
771         struct lu_site *s = dev->ld_site;
772         struct lu_object *ret;
773
774         if (IS_ERR_OR_NULL(h) || lu_object_is_dying(h))
775                 return NULL;
776
777         ret = lu_object_locate(h, dev->ld_type);
778         if (!ret)
779                 return ret;
780
781         if (!atomic_inc_not_zero(&h->loh_ref)) {
782                 struct lu_site_bkt_data *bkt;
783
784                 bkt = &s->ls_bkts[lu_bkt_hash(s, &h->loh_fid)];
785                 spin_lock(&bkt->lsb_waitq.lock);
786                 if (!lu_object_is_dying(h) &&
787                     !test_bit(LU_OBJECT_UNHASHED, &h->loh_flags))
788                         atomic_inc(&h->loh_ref);
789                 else
790                         ret = NULL;
791                 spin_unlock(&bkt->lsb_waitq.lock);
792         }
793         return ret;
794 }
795 EXPORT_SYMBOL(lu_object_get_first);
796
797 /**
798  * Core logic of lu_object_find*() functions.
799  *
800  * Much like lu_object_find(), but top level device of object is specifically
801  * \a dev rather than top level device of the site. This interface allows
802  * objects of different "stacking" to be created within the same site.
803  */
804 struct lu_object *lu_object_find_at(const struct lu_env *env,
805                                     struct lu_device *dev,
806                                     const struct lu_fid *f,
807                                     const struct lu_object_conf *conf)
808 {
809         struct lu_object *o;
810         struct lu_object *shadow;
811         struct lu_site *s;
812         struct lu_site_bkt_data *bkt;
813         struct rhashtable *hs;
814         int rc;
815
816         ENTRY;
817
818         /* FID is from disk or network, zero FID is meaningless, return error
819          * early to avoid assertion in lu_object_put. If a zero FID is wanted,
820          * it should be allocated via lu_object_anon().
821          */
822         if (fid_is_zero(f))
823                 RETURN(ERR_PTR(-EINVAL));
824
825         /*
826          * This uses standard index maintenance protocol:
827          *
828          *     - search index under lock, and return object if found;
829          *     - otherwise, unlock index, allocate new object;
830          *     - lock index and search again;
831          *     - if nothing is found (usual case), insert newly created
832          *       object into index;
833          *     - otherwise (race: other thread inserted object), free
834          *       object just allocated.
835          *     - unlock index;
836          *     - return object.
837          *
838          * For "LOC_F_NEW" case, we are sure the object is new established.
839          * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
840          * just alloc and insert directly.
841          *
842          */
843         s  = dev->ld_site;
844         hs = &s->ls_obj_hash;
845
846         if (unlikely(OBD_FAIL_PRECHECK(OBD_FAIL_OBD_ZERO_NLINK_RACE)))
847                 lu_site_purge(env, s, -1);
848
849         bkt = &s->ls_bkts[lu_bkt_hash(s, f)];
850         if (!(conf && conf->loc_flags & LOC_F_NEW)) {
851                 o = htable_lookup(env, dev, bkt, f, NULL);
852
853                 if (!IS_ERR(o)) {
854                         if (likely(lu_object_is_inited(o->lo_header)))
855                                 RETURN(o);
856
857                         wait_event_idle(bkt->lsb_waitq,
858                                         lu_object_is_inited(o->lo_header) ||
859                                         lu_object_is_dying(o->lo_header));
860
861                         if (lu_object_is_dying(o->lo_header)) {
862                                 lu_object_put(env, o);
863
864                                 RETURN(ERR_PTR(-ENOENT));
865                         }
866
867                         RETURN(o);
868                 }
869
870                 if (PTR_ERR(o) != -ENOENT)
871                         RETURN(o);
872         }
873
874         /*
875          * Allocate new object, NB, object is unitialized in case object
876          * is changed between allocation and hash insertion, thus the object
877          * with stale attributes is returned.
878          */
879         o = lu_object_alloc(env, dev, f);
880         if (IS_ERR(o))
881                 RETURN(o);
882
883         LASSERT(lu_fid_eq(lu_object_fid(o), f));
884
885         CFS_RACE_WAIT(OBD_FAIL_OBD_ZERO_NLINK_RACE);
886
887         if (conf && conf->loc_flags & LOC_F_NEW) {
888                 int status = rhashtable_insert_fast(hs, &o->lo_header->loh_hash,
889                                                     obj_hash_params);
890                 if (status)
891                         /* Strange error - go the slow way */
892                         shadow = htable_lookup(env, dev, bkt, f, o->lo_header);
893                 else
894                         shadow = ERR_PTR(-ENOENT);
895         } else {
896                 shadow = htable_lookup(env, dev, bkt, f, o->lo_header);
897         }
898         if (likely(PTR_ERR(shadow) == -ENOENT)) {
899                 /*
900                  * The new object has been successfully inserted.
901                  *
902                  * This may result in rather complicated operations, including
903                  * fld queries, inode loading, etc.
904                  */
905                 rc = lu_object_start(env, dev, o, conf);
906                 if (rc) {
907                         lu_object_put_nocache(env, o);
908                         RETURN(ERR_PTR(rc));
909                 }
910
911                 wake_up(&bkt->lsb_waitq);
912
913                 lu_object_limit(env, dev);
914
915                 RETURN(o);
916         }
917
918         lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
919         lu_object_free(env, o);
920
921         if (!(conf && conf->loc_flags & LOC_F_NEW) &&
922             !IS_ERR(shadow) &&
923             !lu_object_is_inited(shadow->lo_header)) {
924                 wait_event_idle(bkt->lsb_waitq,
925                                 lu_object_is_inited(shadow->lo_header) ||
926                                 lu_object_is_dying(shadow->lo_header));
927
928                 if (lu_object_is_dying(shadow->lo_header)) {
929                         lu_object_put(env, shadow);
930
931                         RETURN(ERR_PTR(-ENOENT));
932                 }
933         }
934
935         RETURN(shadow);
936 }
937 EXPORT_SYMBOL(lu_object_find_at);
938
939 /**
940  * Find object with given fid, and return its slice belonging to given device.
941  */
942 struct lu_object *lu_object_find_slice(const struct lu_env *env,
943                                        struct lu_device *dev,
944                                        const struct lu_fid *f,
945                                        const struct lu_object_conf *conf)
946 {
947         struct lu_object *top;
948         struct lu_object *obj;
949
950         top = lu_object_find(env, dev, f, conf);
951         if (IS_ERR(top))
952                 return top;
953
954         obj = lu_object_locate(top->lo_header, dev->ld_type);
955         if (unlikely(obj == NULL)) {
956                 lu_object_put(env, top);
957                 obj = ERR_PTR(-ENOENT);
958         }
959
960         return obj;
961 }
962 EXPORT_SYMBOL(lu_object_find_slice);
963
964 int lu_device_type_init(struct lu_device_type *ldt)
965 {
966         int result = 0;
967
968         atomic_set(&ldt->ldt_device_nr, 0);
969         if (ldt->ldt_ops->ldto_init)
970                 result = ldt->ldt_ops->ldto_init(ldt);
971
972         return result;
973 }
974 EXPORT_SYMBOL(lu_device_type_init);
975
976 void lu_device_type_fini(struct lu_device_type *ldt)
977 {
978         if (ldt->ldt_ops->ldto_fini)
979                 ldt->ldt_ops->ldto_fini(ldt);
980 }
981 EXPORT_SYMBOL(lu_device_type_fini);
982
983 /**
984  * Global list of all sites on this node
985  */
986 static LIST_HEAD(lu_sites);
987 static DECLARE_RWSEM(lu_sites_guard);
988
989 /**
990  * Global environment used by site shrinker.
991  */
992 static struct lu_env lu_shrink_env;
993
994 struct lu_site_print_arg {
995         struct lu_env   *lsp_env;
996         void            *lsp_cookie;
997         lu_printer_t     lsp_printer;
998 };
999
1000 static void
1001 lu_site_obj_print(struct lu_object_header *h, struct lu_site_print_arg *arg)
1002 {
1003         if (!list_empty(&h->loh_layers)) {
1004                 const struct lu_object *o;
1005
1006                 o = lu_object_top(h);
1007                 lu_object_print(arg->lsp_env, arg->lsp_cookie,
1008                                 arg->lsp_printer, o);
1009         } else {
1010                 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
1011                                        arg->lsp_printer, h);
1012         }
1013 }
1014
1015 /**
1016  * Print all objects in \a s.
1017  */
1018 void lu_site_print(const struct lu_env *env, struct lu_site *s, atomic_t *ref,
1019                    int msg_flag, lu_printer_t printer)
1020 {
1021         struct lu_site_print_arg arg = {
1022                 .lsp_env     = (struct lu_env *)env,
1023                 .lsp_printer = printer,
1024         };
1025         struct rhashtable_iter iter;
1026         struct lu_object_header *h;
1027         LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, msg_flag, NULL);
1028
1029         if (!s || !atomic_read(ref))
1030                 return;
1031
1032         arg.lsp_cookie = (void *)&msgdata;
1033
1034         rhashtable_walk_enter(&s->ls_obj_hash, &iter);
1035         rhashtable_walk_start(&iter);
1036         while ((h = rhashtable_walk_next(&iter)) != NULL) {
1037                 if (IS_ERR(h))
1038                         continue;
1039                 lu_site_obj_print(h, &arg);
1040         }
1041         rhashtable_walk_stop(&iter);
1042         rhashtable_walk_exit(&iter);
1043 }
1044 EXPORT_SYMBOL(lu_site_print);
1045
1046 /**
1047  * Return desired hash table order.
1048  */
1049 static void lu_htable_limits(struct lu_device *top)
1050 {
1051         unsigned long cache_size;
1052
1053         /*
1054          * For ZFS based OSDs the cache should be disabled by default.  This
1055          * allows the ZFS ARC maximum flexibility in determining what buffers
1056          * to cache.  If Lustre has objects or buffer which it wants to ensure
1057          * always stay cached it must maintain a hold on them.
1058          */
1059         if (strcmp(top->ld_type->ldt_name, LUSTRE_OSD_ZFS_NAME) == 0) {
1060                 lu_cache_nr = LU_CACHE_NR_ZFS_LIMIT;
1061                 return;
1062         }
1063
1064         /*
1065          * Calculate hash table size, assuming that we want reasonable
1066          * performance when 20% of total memory is occupied by cache of
1067          * lu_objects.
1068          *
1069          * Size of lu_object is (arbitrary) taken as 1K (together with inode).
1070          */
1071         cache_size = cfs_totalram_pages();
1072
1073 #if BITS_PER_LONG == 32
1074         /* limit hashtable size for lowmem systems to low RAM */
1075         if (cache_size > 1 << (30 - PAGE_SHIFT))
1076                 cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
1077 #endif
1078
1079         /* clear off unreasonable cache setting. */
1080         if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
1081                 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in the range of (0, %u]. Will use default value: %u.\n",
1082                       lu_cache_percent, LU_CACHE_PERCENT_MAX,
1083                       LU_CACHE_PERCENT_DEFAULT);
1084
1085                 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
1086         }
1087         cache_size = cache_size / 100 * lu_cache_percent *
1088                 (PAGE_SIZE / 1024);
1089
1090         lu_cache_nr = clamp_t(typeof(cache_size), cache_size,
1091                               LU_CACHE_NR_MIN, LU_CACHE_NR_MAX);
1092 }
1093
1094 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
1095 {
1096         spin_lock(&s->ls_ld_lock);
1097         if (list_empty(&d->ld_linkage))
1098                 list_add(&d->ld_linkage, &s->ls_ld_linkage);
1099         spin_unlock(&s->ls_ld_lock);
1100 }
1101 EXPORT_SYMBOL(lu_dev_add_linkage);
1102
1103 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
1104 {
1105         spin_lock(&s->ls_ld_lock);
1106         list_del_init(&d->ld_linkage);
1107         spin_unlock(&s->ls_ld_lock);
1108 }
1109 EXPORT_SYMBOL(lu_dev_del_linkage);
1110
1111 /**
1112   * Initialize site \a s, with \a d as the top level device.
1113   */
1114 int lu_site_init(struct lu_site *s, struct lu_device *top)
1115 {
1116         struct lu_site_bkt_data *bkt;
1117         unsigned int i;
1118         int rc;
1119         ENTRY;
1120
1121         memset(s, 0, sizeof *s);
1122         mutex_init(&s->ls_purge_mutex);
1123         lu_htable_limits(top);
1124
1125 #ifdef HAVE_PERCPU_COUNTER_INIT_GFP_FLAG
1126         rc = percpu_counter_init(&s->ls_lru_len_counter, 0, GFP_NOFS);
1127 #else
1128         rc = percpu_counter_init(&s->ls_lru_len_counter, 0);
1129 #endif
1130         if (rc)
1131                 return -ENOMEM;
1132
1133         if (rhashtable_init(&s->ls_obj_hash, &obj_hash_params) != 0) {
1134                 CERROR("failed to create lu_site hash\n");
1135                 return -ENOMEM;
1136         }
1137
1138         s->ls_bkt_seed = prandom_u32();
1139         s->ls_bkt_cnt = max_t(long, 1 << LU_SITE_BKT_BITS,
1140                               2 * num_possible_cpus());
1141         s->ls_bkt_cnt = roundup_pow_of_two(s->ls_bkt_cnt);
1142         OBD_ALLOC_PTR_ARRAY_LARGE(s->ls_bkts, s->ls_bkt_cnt);
1143         if (!s->ls_bkts) {
1144                 rhashtable_destroy(&s->ls_obj_hash);
1145                 s->ls_bkts = NULL;
1146                 return -ENOMEM;
1147         }
1148
1149         for (i = 0; i < s->ls_bkt_cnt; i++) {
1150                 bkt = &s->ls_bkts[i];
1151                 INIT_LIST_HEAD(&bkt->lsb_lru);
1152                 init_waitqueue_head(&bkt->lsb_waitq);
1153         }
1154
1155         s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1156         if (s->ls_stats == NULL) {
1157                 OBD_FREE_PTR_ARRAY_LARGE(s->ls_bkts, s->ls_bkt_cnt);
1158                 s->ls_bkts = NULL;
1159                 rhashtable_destroy(&s->ls_obj_hash);
1160                 return -ENOMEM;
1161         }
1162
1163         lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1164                              0, "created", "created");
1165         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1166                              0, "cache_hit", "cache_hit");
1167         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1168                              0, "cache_miss", "cache_miss");
1169         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1170                              0, "cache_race", "cache_race");
1171         lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1172                              0, "cache_death_race", "cache_death_race");
1173         lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1174                              0, "lru_purged", "lru_purged");
1175
1176         INIT_LIST_HEAD(&s->ls_linkage);
1177         s->ls_top_dev = top;
1178         top->ld_site = s;
1179         lu_device_get(top);
1180         lu_ref_add(&top->ld_reference, "site-top", s);
1181
1182         INIT_LIST_HEAD(&s->ls_ld_linkage);
1183         spin_lock_init(&s->ls_ld_lock);
1184
1185         lu_dev_add_linkage(s, top);
1186
1187         RETURN(0);
1188 }
1189 EXPORT_SYMBOL(lu_site_init);
1190
1191 /**
1192  * Finalize \a s and release its resources.
1193  */
1194 void lu_site_fini(struct lu_site *s)
1195 {
1196         down_write(&lu_sites_guard);
1197         list_del_init(&s->ls_linkage);
1198         up_write(&lu_sites_guard);
1199
1200         percpu_counter_destroy(&s->ls_lru_len_counter);
1201
1202         if (s->ls_bkts) {
1203                 rhashtable_destroy(&s->ls_obj_hash);
1204                 OBD_FREE_PTR_ARRAY_LARGE(s->ls_bkts, s->ls_bkt_cnt);
1205                 s->ls_bkts = NULL;
1206         }
1207
1208         if (s->ls_top_dev != NULL) {
1209                 s->ls_top_dev->ld_site = NULL;
1210                 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1211                 lu_device_put(s->ls_top_dev);
1212                 s->ls_top_dev = NULL;
1213         }
1214
1215         if (s->ls_stats != NULL)
1216                 lprocfs_free_stats(&s->ls_stats);
1217 }
1218 EXPORT_SYMBOL(lu_site_fini);
1219
1220 /**
1221  * Called when initialization of stack for this site is completed.
1222  */
1223 int lu_site_init_finish(struct lu_site *s)
1224 {
1225         int result;
1226         down_write(&lu_sites_guard);
1227         result = lu_context_refill(&lu_shrink_env.le_ctx);
1228         if (result == 0)
1229                 list_add(&s->ls_linkage, &lu_sites);
1230         up_write(&lu_sites_guard);
1231         return result;
1232 }
1233 EXPORT_SYMBOL(lu_site_init_finish);
1234
1235 /**
1236  * Acquire additional reference on device \a d
1237  */
1238 void lu_device_get(struct lu_device *d)
1239 {
1240         atomic_inc(&d->ld_ref);
1241 }
1242 EXPORT_SYMBOL(lu_device_get);
1243
1244 /**
1245  * Release reference on device \a d.
1246  */
1247 void lu_device_put(struct lu_device *d)
1248 {
1249         LASSERT(atomic_read(&d->ld_ref) > 0);
1250         atomic_dec(&d->ld_ref);
1251 }
1252 EXPORT_SYMBOL(lu_device_put);
1253
1254 enum { /* Maximal number of tld slots. */
1255         LU_CONTEXT_KEY_NR = 40
1256 };
1257 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1258 static DECLARE_RWSEM(lu_key_initing);
1259
1260 /**
1261  * Initialize device \a d of type \a t.
1262  */
1263 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1264 {
1265         if (atomic_add_unless(&t->ldt_device_nr, 1, 0) == 0) {
1266                 down_write(&lu_key_initing);
1267                 if (t->ldt_ops->ldto_start &&
1268                     atomic_read(&t->ldt_device_nr) == 0)
1269                         t->ldt_ops->ldto_start(t);
1270                 atomic_inc(&t->ldt_device_nr);
1271                 up_write(&lu_key_initing);
1272         }
1273
1274         memset(d, 0, sizeof *d);
1275         d->ld_type = t;
1276         lu_ref_init(&d->ld_reference);
1277         INIT_LIST_HEAD(&d->ld_linkage);
1278
1279         return 0;
1280 }
1281 EXPORT_SYMBOL(lu_device_init);
1282
1283 /**
1284  * Finalize device \a d.
1285  */
1286 void lu_device_fini(struct lu_device *d)
1287 {
1288         struct lu_device_type *t = d->ld_type;
1289
1290         if (d->ld_obd != NULL) {
1291                 d->ld_obd->obd_lu_dev = NULL;
1292                 d->ld_obd = NULL;
1293         }
1294
1295         lu_ref_fini(&d->ld_reference);
1296         LASSERTF(atomic_read(&d->ld_ref) == 0,
1297                  "Refcount is %u\n", atomic_read(&d->ld_ref));
1298         LASSERT(atomic_read(&t->ldt_device_nr) > 0);
1299
1300         if (atomic_dec_and_test(&t->ldt_device_nr) &&
1301             t->ldt_ops->ldto_stop != NULL)
1302                 t->ldt_ops->ldto_stop(t);
1303 }
1304 EXPORT_SYMBOL(lu_device_fini);
1305
1306 /**
1307  * Initialize object \a o that is part of compound object \a h and was created
1308  * by device \a d.
1309  */
1310 int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1311                    struct lu_device *d)
1312 {
1313         memset(o, 0, sizeof(*o));
1314         o->lo_header = h;
1315         o->lo_dev = d;
1316         lu_device_get(d);
1317         lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
1318         INIT_LIST_HEAD(&o->lo_linkage);
1319
1320         return 0;
1321 }
1322 EXPORT_SYMBOL(lu_object_init);
1323
1324 /**
1325  * Finalize object and release its resources.
1326  */
1327 void lu_object_fini(struct lu_object *o)
1328 {
1329         struct lu_device *dev = o->lo_dev;
1330
1331         LASSERT(list_empty(&o->lo_linkage));
1332
1333         if (dev != NULL) {
1334                 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1335                               "lu_object", o);
1336                 lu_device_put(dev);
1337                 o->lo_dev = NULL;
1338         }
1339 }
1340 EXPORT_SYMBOL(lu_object_fini);
1341
1342 /**
1343  * Add object \a o as first layer of compound object \a h
1344  *
1345  * This is typically called by the ->ldo_object_alloc() method of top-level
1346  * device.
1347  */
1348 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1349 {
1350         list_move(&o->lo_linkage, &h->loh_layers);
1351 }
1352 EXPORT_SYMBOL(lu_object_add_top);
1353
1354 /**
1355  * Add object \a o as a layer of compound object, going after \a before.
1356  *
1357  * This is typically called by the ->ldo_object_alloc() method of \a
1358  * before->lo_dev.
1359  */
1360 void lu_object_add(struct lu_object *before, struct lu_object *o)
1361 {
1362         list_move(&o->lo_linkage, &before->lo_linkage);
1363 }
1364 EXPORT_SYMBOL(lu_object_add);
1365
1366 /**
1367  * Initialize compound object.
1368  */
1369 int lu_object_header_init(struct lu_object_header *h)
1370 {
1371         memset(h, 0, sizeof *h);
1372         atomic_set(&h->loh_ref, 1);
1373         INIT_LIST_HEAD(&h->loh_lru);
1374         INIT_LIST_HEAD(&h->loh_layers);
1375         lu_ref_init(&h->loh_reference);
1376         return 0;
1377 }
1378 EXPORT_SYMBOL(lu_object_header_init);
1379
1380 /**
1381  * Finalize compound object.
1382  */
1383 void lu_object_header_fini(struct lu_object_header *h)
1384 {
1385         LASSERT(list_empty(&h->loh_layers));
1386         LASSERT(list_empty(&h->loh_lru));
1387         lu_ref_fini(&h->loh_reference);
1388 }
1389 EXPORT_SYMBOL(lu_object_header_fini);
1390
1391 /**
1392  * Given a compound object, find its slice, corresponding to the device type
1393  * \a dtype.
1394  */
1395 struct lu_object *lu_object_locate(struct lu_object_header *h,
1396                                    const struct lu_device_type *dtype)
1397 {
1398         struct lu_object *o;
1399
1400         list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1401                 if (o->lo_dev->ld_type == dtype)
1402                         return o;
1403         }
1404         return NULL;
1405 }
1406 EXPORT_SYMBOL(lu_object_locate);
1407
1408 /**
1409  * Finalize and free devices in the device stack.
1410  *
1411  * Finalize device stack by purging object cache, and calling
1412  * lu_device_type_operations::ldto_device_fini() and
1413  * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1414  */
1415 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1416 {
1417         struct lu_site   *site = top->ld_site;
1418         struct lu_device *scan;
1419         struct lu_device *next;
1420
1421         lu_site_purge(env, site, ~0);
1422         for (scan = top; scan != NULL; scan = next) {
1423                 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1424                 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1425                 lu_device_put(scan);
1426         }
1427
1428         /* purge again. */
1429         lu_site_purge(env, site, ~0);
1430
1431         for (scan = top; scan != NULL; scan = next) {
1432                 const struct lu_device_type *ldt = scan->ld_type;
1433
1434                 next = ldt->ldt_ops->ldto_device_free(env, scan);
1435         }
1436 }
1437
1438 /**
1439  * Global counter incremented whenever key is registered, unregistered,
1440  * revived or quiesced. This is used to void unnecessary calls to
1441  * lu_context_refill(). No locking is provided, as initialization and shutdown
1442  * are supposed to be externally serialized.
1443  */
1444 static atomic_t key_set_version = ATOMIC_INIT(0);
1445
1446 /**
1447  * Register new key.
1448  */
1449 int lu_context_key_register(struct lu_context_key *key)
1450 {
1451         int result;
1452         unsigned int i;
1453
1454         LASSERT(key->lct_init != NULL);
1455         LASSERT(key->lct_fini != NULL);
1456         LASSERT(key->lct_tags != 0);
1457         LASSERT(key->lct_owner != NULL);
1458
1459         result = -ENFILE;
1460         atomic_set(&key->lct_used, 1);
1461         lu_ref_init(&key->lct_reference);
1462         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1463                 if (lu_keys[i])
1464                         continue;
1465                 key->lct_index = i;
1466
1467                 if (strncmp("osd_", module_name(key->lct_owner), 4) == 0)
1468                         CFS_RACE_WAIT(OBD_FAIL_OBD_SETUP);
1469
1470                 if (cmpxchg(&lu_keys[i], NULL, key) != NULL)
1471                         continue;
1472
1473                 result = 0;
1474                 atomic_inc(&key_set_version);
1475                 break;
1476         }
1477         if (result) {
1478                 lu_ref_fini(&key->lct_reference);
1479                 atomic_set(&key->lct_used, 0);
1480         }
1481         return result;
1482 }
1483 EXPORT_SYMBOL(lu_context_key_register);
1484
1485 static void key_fini(struct lu_context *ctx, int index)
1486 {
1487         if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1488                 struct lu_context_key *key;
1489
1490                 key = lu_keys[index];
1491                 LASSERT(key != NULL);
1492                 LASSERT(key->lct_fini != NULL);
1493                 LASSERT(atomic_read(&key->lct_used) > 0);
1494
1495                 key->lct_fini(ctx, key, ctx->lc_value[index]);
1496                 lu_ref_del(&key->lct_reference, "ctx", ctx);
1497                 if (atomic_dec_and_test(&key->lct_used))
1498                         wake_up_var(&key->lct_used);
1499
1500                 LASSERT(key->lct_owner != NULL);
1501                 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1502                         LINVRNT(module_refcount(key->lct_owner) > 0);
1503                         module_put(key->lct_owner);
1504                 }
1505                 ctx->lc_value[index] = NULL;
1506         }
1507 }
1508
1509 /**
1510  * Deregister key.
1511  */
1512 void lu_context_key_degister(struct lu_context_key *key)
1513 {
1514         LASSERT(atomic_read(&key->lct_used) >= 1);
1515         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1516
1517         lu_context_key_quiesce(NULL, key);
1518
1519         key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1520
1521         /**
1522          * Wait until all transient contexts referencing this key have
1523          * run lu_context_key::lct_fini() method.
1524          */
1525         atomic_dec(&key->lct_used);
1526         wait_var_event(&key->lct_used, atomic_read(&key->lct_used) == 0);
1527
1528         if (!WARN_ON(lu_keys[key->lct_index] == NULL))
1529                 lu_ref_fini(&key->lct_reference);
1530
1531         smp_store_release(&lu_keys[key->lct_index], NULL);
1532 }
1533 EXPORT_SYMBOL(lu_context_key_degister);
1534
1535 /**
1536  * Register a number of keys. This has to be called after all keys have been
1537  * initialized by a call to LU_CONTEXT_KEY_INIT().
1538  */
1539 int lu_context_key_register_many(struct lu_context_key *k, ...)
1540 {
1541         struct lu_context_key *key = k;
1542         va_list args;
1543         int result;
1544
1545         va_start(args, k);
1546         do {
1547                 result = lu_context_key_register(key);
1548                 if (result)
1549                         break;
1550                 key = va_arg(args, struct lu_context_key *);
1551         } while (key != NULL);
1552         va_end(args);
1553
1554         if (result != 0) {
1555                 va_start(args, k);
1556                 while (k != key) {
1557                         lu_context_key_degister(k);
1558                         k = va_arg(args, struct lu_context_key *);
1559                 }
1560                 va_end(args);
1561         }
1562
1563         return result;
1564 }
1565 EXPORT_SYMBOL(lu_context_key_register_many);
1566
1567 /**
1568  * De-register a number of keys. This is a dual to
1569  * lu_context_key_register_many().
1570  */
1571 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1572 {
1573         va_list args;
1574
1575         va_start(args, k);
1576         do {
1577                 lu_context_key_degister(k);
1578                 k = va_arg(args, struct lu_context_key*);
1579         } while (k != NULL);
1580         va_end(args);
1581 }
1582 EXPORT_SYMBOL(lu_context_key_degister_many);
1583
1584 /**
1585  * Revive a number of keys.
1586  */
1587 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1588 {
1589         va_list args;
1590
1591         va_start(args, k);
1592         do {
1593                 lu_context_key_revive(k);
1594                 k = va_arg(args, struct lu_context_key*);
1595         } while (k != NULL);
1596         va_end(args);
1597 }
1598 EXPORT_SYMBOL(lu_context_key_revive_many);
1599
1600 /**
1601  * Quiescent a number of keys.
1602  */
1603 void lu_context_key_quiesce_many(struct lu_device_type *t,
1604                                  struct lu_context_key *k, ...)
1605 {
1606         va_list args;
1607
1608         va_start(args, k);
1609         do {
1610                 lu_context_key_quiesce(t, k);
1611                 k = va_arg(args, struct lu_context_key*);
1612         } while (k != NULL);
1613         va_end(args);
1614 }
1615 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1616
1617 /**
1618  * Return value associated with key \a key in context \a ctx.
1619  */
1620 void *lu_context_key_get(const struct lu_context *ctx,
1621                          const struct lu_context_key *key)
1622 {
1623         LINVRNT(ctx->lc_state == LCS_ENTERED);
1624         LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1625         LASSERT(lu_keys[key->lct_index] == key);
1626         return ctx->lc_value[key->lct_index];
1627 }
1628 EXPORT_SYMBOL(lu_context_key_get);
1629
1630 /**
1631  * List of remembered contexts. XXX document me.
1632  */
1633 static LIST_HEAD(lu_context_remembered);
1634 static DEFINE_SPINLOCK(lu_context_remembered_guard);
1635
1636 /**
1637  * Destroy \a key in all remembered contexts. This is used to destroy key
1638  * values in "shared" contexts (like service threads), when a module owning
1639  * the key is about to be unloaded.
1640  */
1641 void lu_context_key_quiesce(struct lu_device_type *t,
1642                             struct lu_context_key *key)
1643 {
1644         struct lu_context *ctx;
1645
1646         if (key->lct_tags & LCT_QUIESCENT)
1647                 return;
1648         /*
1649          * The write-lock on lu_key_initing will ensure that any
1650          * keys_fill() which didn't see LCT_QUIESCENT will have
1651          * finished before we call key_fini().
1652          */
1653         down_write(&lu_key_initing);
1654         if (!(key->lct_tags & LCT_QUIESCENT)) {
1655                 if (t == NULL || atomic_read(&t->ldt_device_nr) == 0)
1656                         key->lct_tags |= LCT_QUIESCENT;
1657                 up_write(&lu_key_initing);
1658
1659                 spin_lock(&lu_context_remembered_guard);
1660                 list_for_each_entry(ctx, &lu_context_remembered, lc_remember) {
1661                         spin_until_cond(READ_ONCE(ctx->lc_state) != LCS_LEAVING);
1662                         key_fini(ctx, key->lct_index);
1663                 }
1664                 spin_unlock(&lu_context_remembered_guard);
1665
1666                 return;
1667         }
1668         up_write(&lu_key_initing);
1669 }
1670
1671 void lu_context_key_revive(struct lu_context_key *key)
1672 {
1673         key->lct_tags &= ~LCT_QUIESCENT;
1674         atomic_inc(&key_set_version);
1675 }
1676
1677 static void keys_fini(struct lu_context *ctx)
1678 {
1679         unsigned int i;
1680
1681         if (ctx->lc_value == NULL)
1682                 return;
1683
1684         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1685                 key_fini(ctx, i);
1686
1687         OBD_FREE_PTR_ARRAY(ctx->lc_value, ARRAY_SIZE(lu_keys));
1688         ctx->lc_value = NULL;
1689 }
1690
1691 static int keys_fill(struct lu_context *ctx)
1692 {
1693         unsigned int i;
1694         int rc = 0;
1695
1696         /*
1697          * A serialisation with lu_context_key_quiesce() is needed, to
1698          * ensure we see LCT_QUIESCENT and don't allocate a new value
1699          * after it freed one.  The rwsem provides this.  As down_read()
1700          * does optimistic spinning while the writer is active, this is
1701          * unlikely to ever sleep.
1702          */
1703         down_read(&lu_key_initing);
1704         ctx->lc_version = atomic_read(&key_set_version);
1705
1706         LINVRNT(ctx->lc_value);
1707         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1708                 struct lu_context_key *key;
1709
1710                 key = lu_keys[i];
1711                 if (!ctx->lc_value[i] && key &&
1712                     (key->lct_tags & ctx->lc_tags) &&
1713                     /*
1714                      * Don't create values for a LCT_QUIESCENT key, as this
1715                      * will pin module owning a key.
1716                      */
1717                     !(key->lct_tags & LCT_QUIESCENT)) {
1718                         void *value;
1719
1720                         LINVRNT(key->lct_init != NULL);
1721                         LINVRNT(key->lct_index == i);
1722
1723                         LASSERT(key->lct_owner != NULL);
1724                         if (!(ctx->lc_tags & LCT_NOREF) &&
1725                             try_module_get(key->lct_owner) == 0) {
1726                                 /* module is unloading, skip this key */
1727                                 continue;
1728                         }
1729
1730                         value = key->lct_init(ctx, key);
1731                         if (unlikely(IS_ERR(value))) {
1732                                 rc = PTR_ERR(value);
1733                                 break;
1734                         }
1735
1736                         lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1737                         atomic_inc(&key->lct_used);
1738                         /*
1739                          * This is the only place in the code, where an
1740                          * element of ctx->lc_value[] array is set to non-NULL
1741                          * value.
1742                          */
1743                         ctx->lc_value[i] = value;
1744                         if (key->lct_exit != NULL)
1745                                 ctx->lc_tags |= LCT_HAS_EXIT;
1746                 }
1747         }
1748
1749         up_read(&lu_key_initing);
1750         return rc;
1751 }
1752
1753 static int keys_init(struct lu_context *ctx)
1754 {
1755         OBD_ALLOC_PTR_ARRAY(ctx->lc_value, ARRAY_SIZE(lu_keys));
1756         if (likely(ctx->lc_value != NULL))
1757                 return keys_fill(ctx);
1758
1759         return -ENOMEM;
1760 }
1761
1762 /**
1763  * Initialize context data-structure. Create values for all keys.
1764  */
1765 int lu_context_init(struct lu_context *ctx, __u32 tags)
1766 {
1767         int     rc;
1768
1769         memset(ctx, 0, sizeof *ctx);
1770         ctx->lc_state = LCS_INITIALIZED;
1771         ctx->lc_tags = tags;
1772         if (tags & LCT_REMEMBER) {
1773                 spin_lock(&lu_context_remembered_guard);
1774                 list_add(&ctx->lc_remember, &lu_context_remembered);
1775                 spin_unlock(&lu_context_remembered_guard);
1776         } else {
1777                 INIT_LIST_HEAD(&ctx->lc_remember);
1778         }
1779
1780         rc = keys_init(ctx);
1781         if (rc != 0)
1782                 lu_context_fini(ctx);
1783
1784         return rc;
1785 }
1786 EXPORT_SYMBOL(lu_context_init);
1787
1788 /**
1789  * Finalize context data-structure. Destroy key values.
1790  */
1791 void lu_context_fini(struct lu_context *ctx)
1792 {
1793         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1794         ctx->lc_state = LCS_FINALIZED;
1795
1796         if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1797                 LASSERT(list_empty(&ctx->lc_remember));
1798         } else {
1799                 /* could race with key degister */
1800                 spin_lock(&lu_context_remembered_guard);
1801                 list_del_init(&ctx->lc_remember);
1802                 spin_unlock(&lu_context_remembered_guard);
1803         }
1804         keys_fini(ctx);
1805 }
1806 EXPORT_SYMBOL(lu_context_fini);
1807
1808 /**
1809  * Called before entering context.
1810  */
1811 void lu_context_enter(struct lu_context *ctx)
1812 {
1813         LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1814         ctx->lc_state = LCS_ENTERED;
1815 }
1816 EXPORT_SYMBOL(lu_context_enter);
1817
1818 /**
1819  * Called after exiting from \a ctx
1820  */
1821 void lu_context_exit(struct lu_context *ctx)
1822 {
1823         unsigned int i;
1824
1825         LINVRNT(ctx->lc_state == LCS_ENTERED);
1826         /*
1827          * Disable preempt to ensure we get a warning if
1828          * any lct_exit ever tries to sleep.  That would hurt
1829          * lu_context_key_quiesce() which spins waiting for us.
1830          * This also ensure we aren't preempted while the state
1831          * is LCS_LEAVING, as that too would cause problems for
1832          * lu_context_key_quiesce().
1833          */
1834         preempt_disable();
1835         /*
1836          * Ensure lu_context_key_quiesce() sees LCS_LEAVING
1837          * or we see LCT_QUIESCENT
1838          */
1839         smp_store_mb(ctx->lc_state, LCS_LEAVING);
1840         if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value) {
1841                 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1842                         struct lu_context_key *key;
1843
1844                         key = lu_keys[i];
1845                         if (ctx->lc_value[i] &&
1846                             !(key->lct_tags & LCT_QUIESCENT) &&
1847                             key->lct_exit)
1848                                 key->lct_exit(ctx, key, ctx->lc_value[i]);
1849                 }
1850         }
1851
1852         smp_store_release(&ctx->lc_state, LCS_LEFT);
1853         preempt_enable();
1854 }
1855 EXPORT_SYMBOL(lu_context_exit);
1856
1857 /**
1858  * Allocate for context all missing keys that were registered after context
1859  * creation. key_set_version is only changed in rare cases when modules
1860  * are loaded and removed.
1861  */
1862 int lu_context_refill(struct lu_context *ctx)
1863 {
1864         if (likely(ctx->lc_version == atomic_read(&key_set_version)))
1865                 return 0;
1866
1867         return keys_fill(ctx);
1868 }
1869
1870 /**
1871  * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1872  * obd being added. Currently, this is only used on client side, specifically
1873  * for echo device client, for other stack (like ptlrpc threads), context are
1874  * predefined when the lu_device type are registered, during the module probe
1875  * phase.
1876  */
1877 u32 lu_context_tags_default = LCT_CL_THREAD;
1878 u32 lu_session_tags_default = LCT_SESSION;
1879
1880 void lu_context_tags_update(__u32 tags)
1881 {
1882         spin_lock(&lu_context_remembered_guard);
1883         lu_context_tags_default |= tags;
1884         atomic_inc(&key_set_version);
1885         spin_unlock(&lu_context_remembered_guard);
1886 }
1887 EXPORT_SYMBOL(lu_context_tags_update);
1888
1889 void lu_context_tags_clear(__u32 tags)
1890 {
1891         spin_lock(&lu_context_remembered_guard);
1892         lu_context_tags_default &= ~tags;
1893         atomic_inc(&key_set_version);
1894         spin_unlock(&lu_context_remembered_guard);
1895 }
1896 EXPORT_SYMBOL(lu_context_tags_clear);
1897
1898 void lu_session_tags_update(__u32 tags)
1899 {
1900         spin_lock(&lu_context_remembered_guard);
1901         lu_session_tags_default |= tags;
1902         atomic_inc(&key_set_version);
1903         spin_unlock(&lu_context_remembered_guard);
1904 }
1905 EXPORT_SYMBOL(lu_session_tags_update);
1906
1907 void lu_session_tags_clear(__u32 tags)
1908 {
1909         spin_lock(&lu_context_remembered_guard);
1910         lu_session_tags_default &= ~tags;
1911         atomic_inc(&key_set_version);
1912         spin_unlock(&lu_context_remembered_guard);
1913 }
1914 EXPORT_SYMBOL(lu_session_tags_clear);
1915
1916 int lu_env_init(struct lu_env *env, __u32 tags)
1917 {
1918         int result;
1919
1920         env->le_ses = NULL;
1921         result = lu_context_init(&env->le_ctx, tags);
1922         if (likely(result == 0))
1923                 lu_context_enter(&env->le_ctx);
1924         return result;
1925 }
1926 EXPORT_SYMBOL(lu_env_init);
1927
1928 void lu_env_fini(struct lu_env *env)
1929 {
1930         lu_context_exit(&env->le_ctx);
1931         lu_context_fini(&env->le_ctx);
1932         env->le_ses = NULL;
1933 }
1934 EXPORT_SYMBOL(lu_env_fini);
1935
1936 int lu_env_refill(struct lu_env *env)
1937 {
1938         int result;
1939
1940         result = lu_context_refill(&env->le_ctx);
1941         if (result == 0 && env->le_ses != NULL)
1942                 result = lu_context_refill(env->le_ses);
1943         return result;
1944 }
1945 EXPORT_SYMBOL(lu_env_refill);
1946
1947 /**
1948  * Currently, this API will only be used by echo client.
1949  * Because echo client and normal lustre client will share
1950  * same cl_env cache. So echo client needs to refresh
1951  * the env context after it get one from the cache, especially
1952  * when normal client and echo client co-exist in the same client.
1953  */
1954 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1955                           __u32 stags)
1956 {
1957         int    result;
1958
1959         if ((env->le_ctx.lc_tags & ctags) != ctags) {
1960                 env->le_ctx.lc_version = 0;
1961                 env->le_ctx.lc_tags |= ctags;
1962         }
1963
1964         if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1965                 env->le_ses->lc_version = 0;
1966                 env->le_ses->lc_tags |= stags;
1967         }
1968
1969         result = lu_env_refill(env);
1970
1971         return result;
1972 }
1973 EXPORT_SYMBOL(lu_env_refill_by_tags);
1974
1975
1976 struct lu_env_item {
1977         struct task_struct *lei_task;   /* rhashtable key */
1978         struct rhash_head lei_linkage;
1979         struct lu_env *lei_env;
1980         struct rcu_head lei_rcu_head;
1981 };
1982
1983 static const struct rhashtable_params lu_env_rhash_params = {
1984         .key_len     = sizeof(struct task_struct *),
1985         .key_offset  = offsetof(struct lu_env_item, lei_task),
1986         .head_offset = offsetof(struct lu_env_item, lei_linkage),
1987     };
1988
1989 struct rhashtable lu_env_rhash;
1990
1991 struct lu_env_percpu {
1992         struct task_struct *lep_task;
1993         struct lu_env *lep_env ____cacheline_aligned_in_smp;
1994 };
1995
1996 static struct lu_env_percpu lu_env_percpu[NR_CPUS];
1997
1998 int lu_env_add_task(struct lu_env *env, struct task_struct *task)
1999 {
2000         struct lu_env_item *lei, *old;
2001
2002         LASSERT(env);
2003
2004         OBD_ALLOC_PTR(lei);
2005         if (!lei)
2006                 return -ENOMEM;
2007
2008         lei->lei_task = task;
2009         lei->lei_env = env;
2010
2011         old = rhashtable_lookup_get_insert_fast(&lu_env_rhash,
2012                                                 &lei->lei_linkage,
2013                                                 lu_env_rhash_params);
2014         LASSERT(!old);
2015
2016         return 0;
2017 }
2018 EXPORT_SYMBOL(lu_env_add_task);
2019
2020 int lu_env_add(struct lu_env *env)
2021 {
2022         return lu_env_add_task(env, current);
2023 }
2024 EXPORT_SYMBOL(lu_env_add);
2025
2026 static void lu_env_item_free(struct rcu_head *head)
2027 {
2028         struct lu_env_item *lei;
2029
2030         lei = container_of(head, struct lu_env_item, lei_rcu_head);
2031         OBD_FREE_PTR(lei);
2032 }
2033
2034 void lu_env_remove(struct lu_env *env)
2035 {
2036         struct lu_env_item *lei;
2037         const void *task = current;
2038         int i;
2039
2040         for_each_possible_cpu(i) {
2041                 if (lu_env_percpu[i].lep_env == env) {
2042                         LASSERT(lu_env_percpu[i].lep_task == task);
2043                         lu_env_percpu[i].lep_task = NULL;
2044                         lu_env_percpu[i].lep_env = NULL;
2045                 }
2046         }
2047
2048         /* The rcu_lock is not taking in this case since the key
2049          * used is the actual task_struct. This implies that each
2050          * object is only removed by the owning thread, so there
2051          * can never be a race on a particular object.
2052          */
2053         lei = rhashtable_lookup_fast(&lu_env_rhash, &task,
2054                                      lu_env_rhash_params);
2055         if (lei && rhashtable_remove_fast(&lu_env_rhash, &lei->lei_linkage,
2056                                           lu_env_rhash_params) == 0)
2057                 call_rcu(&lei->lei_rcu_head, lu_env_item_free);
2058 }
2059 EXPORT_SYMBOL(lu_env_remove);
2060
2061 struct lu_env *lu_env_find(void)
2062 {
2063         struct lu_env *env = NULL;
2064         struct lu_env_item *lei;
2065         const void *task = current;
2066         int i = get_cpu();
2067
2068         if (lu_env_percpu[i].lep_task == current) {
2069                 env = lu_env_percpu[i].lep_env;
2070                 put_cpu();
2071                 LASSERT(env);
2072                 return env;
2073         }
2074
2075         lei = rhashtable_lookup_fast(&lu_env_rhash, &task,
2076                                      lu_env_rhash_params);
2077         if (lei) {
2078                 env = lei->lei_env;
2079                 lu_env_percpu[i].lep_task = current;
2080                 lu_env_percpu[i].lep_env = env;
2081         }
2082         put_cpu();
2083
2084         return env;
2085 }
2086 EXPORT_SYMBOL(lu_env_find);
2087
2088 static struct shrinker *lu_site_shrinker;
2089
2090 typedef struct lu_site_stats{
2091         unsigned        lss_populated;
2092         unsigned        lss_max_search;
2093         unsigned        lss_total;
2094         unsigned        lss_busy;
2095 } lu_site_stats_t;
2096
2097 static void lu_site_stats_get(const struct lu_site *s,
2098                               lu_site_stats_t *stats)
2099 {
2100         int cnt = atomic_read(&s->ls_obj_hash.nelems);
2101         /*
2102          * percpu_counter_sum_positive() won't accept a const pointer
2103          * as it does modify the struct by taking a spinlock
2104          */
2105         struct lu_site *s2 = (struct lu_site *)s;
2106
2107         stats->lss_busy += cnt -
2108                 percpu_counter_sum_positive(&s2->ls_lru_len_counter);
2109
2110         stats->lss_total += cnt;
2111         stats->lss_max_search = 0;
2112         stats->lss_populated = 0;
2113 }
2114
2115
2116 /*
2117  * lu_cache_shrink_count() returns an approximate number of cached objects
2118  * that can be freed by shrink_slab(). A counter, which tracks the
2119  * number of items in the site's lru, is maintained in a percpu_counter
2120  * for each site. The percpu values are incremented and decremented as
2121  * objects are added or removed from the lru. The percpu values are summed
2122  * and saved whenever a percpu value exceeds a threshold. Thus the saved,
2123  * summed value at any given time may not accurately reflect the current
2124  * lru length. But this value is sufficiently accurate for the needs of
2125  * a shrinker.
2126  *
2127  * Using a per cpu counter is a compromise solution to concurrent access:
2128  * lu_object_put() can update the counter without locking the site and
2129  * lu_cache_shrink_count can sum the counters without locking each
2130  * ls_obj_hash bucket.
2131  */
2132 static unsigned long lu_cache_shrink_count(struct shrinker *sk,
2133                                            struct shrink_control *sc)
2134 {
2135         struct lu_site *s;
2136         struct lu_site *tmp;
2137         unsigned long cached = 0;
2138
2139         if (!(sc->gfp_mask & __GFP_FS))
2140                 return 0;
2141
2142         down_read(&lu_sites_guard);
2143         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage)
2144                 cached += percpu_counter_read_positive(&s->ls_lru_len_counter);
2145         up_read(&lu_sites_guard);
2146
2147         cached = (cached / 100) * sysctl_vfs_cache_pressure;
2148         CDEBUG(D_INODE, "%ld objects cached, cache pressure %d\n",
2149                cached, sysctl_vfs_cache_pressure);
2150
2151         return cached;
2152 }
2153
2154 static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
2155                                           struct shrink_control *sc)
2156 {
2157         struct lu_site *s;
2158         struct lu_site *tmp;
2159         unsigned long remain = sc->nr_to_scan;
2160         LIST_HEAD(splice);
2161
2162         if (!(sc->gfp_mask & __GFP_FS))
2163                 /* We must not take the lu_sites_guard lock when
2164                  * __GFP_FS is *not* set because of the deadlock
2165                  * possibility detailed above. Additionally,
2166                  * since we cannot determine the number of
2167                  * objects in the cache without taking this
2168                  * lock, we're in a particularly tough spot. As
2169                  * a result, we'll just lie and say our cache is
2170                  * empty. This _should_ be ok, as we can't
2171                  * reclaim objects when __GFP_FS is *not* set
2172                  * anyways.
2173                  */
2174                 return SHRINK_STOP;
2175
2176         down_write(&lu_sites_guard);
2177         list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
2178                 remain = lu_site_purge(&lu_shrink_env, s, remain);
2179                 /*
2180                  * Move just shrunk site to the tail of site list to
2181                  * assure shrinking fairness.
2182                  */
2183                 list_move_tail(&s->ls_linkage, &splice);
2184         }
2185         list_splice(&splice, lu_sites.prev);
2186         up_write(&lu_sites_guard);
2187
2188         return sc->nr_to_scan - remain;
2189 }
2190
2191 #ifndef HAVE_SHRINKER_COUNT
2192 /*
2193  * There exists a potential lock inversion deadlock scenario when using
2194  * Lustre on top of ZFS. This occurs between one of ZFS's
2195  * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
2196  * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
2197  * while thread B will take the ht_lock and sleep on the lu_sites_guard
2198  * lock. Obviously neither thread will wake and drop their respective hold
2199  * on their lock.
2200  *
2201  * To prevent this from happening we must ensure the lu_sites_guard lock is
2202  * not taken while down this code path. ZFS reliably does not set the
2203  * __GFP_FS bit in its code paths, so this can be used to determine if it
2204  * is safe to take the lu_sites_guard lock.
2205  *
2206  * Ideally we should accurately return the remaining number of cached
2207  * objects without taking the lu_sites_guard lock, but this is not
2208  * possible in the current implementation.
2209  */
2210 static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
2211 {
2212         int cached = 0;
2213         struct shrink_control scv = {
2214                  .nr_to_scan = shrink_param(sc, nr_to_scan),
2215                  .gfp_mask   = shrink_param(sc, gfp_mask)
2216         };
2217
2218         CDEBUG(D_INODE, "Shrink %lu objects\n", scv.nr_to_scan);
2219
2220         if (scv.nr_to_scan != 0)
2221                 lu_cache_shrink_scan(shrinker, &scv);
2222
2223         cached = lu_cache_shrink_count(shrinker, &scv);
2224         return cached;
2225 }
2226
2227 #endif /* HAVE_SHRINKER_COUNT */
2228
2229
2230 /*
2231  * Debugging stuff.
2232  */
2233
2234 /**
2235  * Environment to be used in debugger, contains all tags.
2236  */
2237 static struct lu_env lu_debugging_env;
2238
2239 /**
2240  * Debugging printer function using printk().
2241  */
2242 int lu_printk_printer(const struct lu_env *env,
2243                       void *unused, const char *format, ...)
2244 {
2245         va_list args;
2246
2247         va_start(args, format);
2248         vprintk(format, args);
2249         va_end(args);
2250         return 0;
2251 }
2252
2253 int lu_debugging_setup(void)
2254 {
2255         return lu_env_init(&lu_debugging_env, ~0);
2256 }
2257
2258 void lu_context_keys_dump(void)
2259 {
2260         unsigned int i;
2261
2262         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
2263                 struct lu_context_key *key;
2264
2265                 key = lu_keys[i];
2266                 if (key != NULL) {
2267                         CERROR("[%d]: %p %x (%p,%p,%p) %d %d \"%s\"@%p\n",
2268                                i, key, key->lct_tags,
2269                                key->lct_init, key->lct_fini, key->lct_exit,
2270                                key->lct_index, atomic_read(&key->lct_used),
2271                                key->lct_owner ? key->lct_owner->name : "",
2272                                key->lct_owner);
2273                         lu_ref_print(&key->lct_reference);
2274                 }
2275         }
2276 }
2277
2278 /**
2279  * Initialization of global lu_* data.
2280  */
2281 int lu_global_init(void)
2282 {
2283         int result;
2284         DEF_SHRINKER_VAR(shvar, lu_cache_shrink,
2285                          lu_cache_shrink_count, lu_cache_shrink_scan);
2286
2287         CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
2288
2289         result = lu_ref_global_init();
2290         if (result != 0)
2291                 return result;
2292
2293         LU_CONTEXT_KEY_INIT(&lu_global_key);
2294         result = lu_context_key_register(&lu_global_key);
2295         if (result != 0)
2296                 return result;
2297
2298         /*
2299          * At this level, we don't know what tags are needed, so allocate them
2300          * conservatively. This should not be too bad, because this
2301          * environment is global.
2302          */
2303         down_write(&lu_sites_guard);
2304         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
2305         up_write(&lu_sites_guard);
2306         if (result != 0)
2307                 return result;
2308
2309         /*
2310          * seeks estimation: 3 seeks to read a record from oi, one to read
2311          * inode, one for ea. Unfortunately setting this high value results in
2312          * lu_object/inode cache consuming all the memory.
2313          */
2314         lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, &shvar);
2315         if (lu_site_shrinker == NULL)
2316                 return -ENOMEM;
2317
2318         result = rhashtable_init(&lu_env_rhash, &lu_env_rhash_params);
2319
2320         return result;
2321 }
2322
2323 /**
2324  * Dual to lu_global_init().
2325  */
2326 void lu_global_fini(void)
2327 {
2328         if (lu_site_shrinker != NULL) {
2329                 remove_shrinker(lu_site_shrinker);
2330                 lu_site_shrinker = NULL;
2331         }
2332
2333         lu_context_key_degister(&lu_global_key);
2334
2335         /*
2336          * Tear shrinker environment down _after_ de-registering
2337          * lu_global_key, because the latter has a value in the former.
2338          */
2339         down_write(&lu_sites_guard);
2340         lu_env_fini(&lu_shrink_env);
2341         up_write(&lu_sites_guard);
2342
2343         rhashtable_destroy(&lu_env_rhash);
2344
2345         lu_ref_global_fini();
2346 }
2347
2348 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
2349 {
2350 #ifdef CONFIG_PROC_FS
2351         struct lprocfs_counter ret;
2352
2353         lprocfs_stats_collect(stats, idx, &ret);
2354         return (__u32)ret.lc_count;
2355 #else
2356         return 0;
2357 #endif
2358 }
2359
2360 /**
2361  * Output site statistical counters into a buffer. Suitable for
2362  * lprocfs_rd_*()-style functions.
2363  */
2364 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m)
2365 {
2366         const struct bucket_table *tbl;
2367         lu_site_stats_t stats;
2368         unsigned int chains;
2369
2370         memset(&stats, 0, sizeof(stats));
2371         lu_site_stats_get(s, &stats);
2372
2373         rcu_read_lock();
2374         tbl = rht_dereference_rcu(s->ls_obj_hash.tbl,
2375                                   &((struct lu_site *)s)->ls_obj_hash);
2376         chains = tbl->size;
2377         rcu_read_unlock();
2378         seq_printf(m, "%d/%d %d/%u %d %d %d %d %d %d %d\n",
2379                    stats.lss_busy,
2380                    stats.lss_total,
2381                    stats.lss_populated,
2382                    chains,
2383                    stats.lss_max_search,
2384                    ls_stats_read(s->ls_stats, LU_SS_CREATED),
2385                    ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2386                    ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2387                    ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2388                    ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2389                    ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2390         return 0;
2391 }
2392 EXPORT_SYMBOL(lu_site_stats_seq_print);
2393
2394 /**
2395  * Helper function to initialize a number of kmem slab caches at once.
2396  */
2397 int lu_kmem_init(struct lu_kmem_descr *caches)
2398 {
2399         int result;
2400         struct lu_kmem_descr *iter = caches;
2401
2402         for (result = 0; iter->ckd_cache != NULL; ++iter) {
2403                 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2404                                                      iter->ckd_size,
2405                                                      0, 0, NULL);
2406                 if (*iter->ckd_cache == NULL) {
2407                         result = -ENOMEM;
2408                         /* free all previously allocated caches */
2409                         lu_kmem_fini(caches);
2410                         break;
2411                 }
2412         }
2413         return result;
2414 }
2415 EXPORT_SYMBOL(lu_kmem_init);
2416
2417 /**
2418  * Helper function to finalize a number of kmem slab cached at once. Dual to
2419  * lu_kmem_init().
2420  */
2421 void lu_kmem_fini(struct lu_kmem_descr *caches)
2422 {
2423         for (; caches->ckd_cache != NULL; ++caches) {
2424                 if (*caches->ckd_cache != NULL) {
2425                         kmem_cache_destroy(*caches->ckd_cache);
2426                         *caches->ckd_cache = NULL;
2427                 }
2428         }
2429 }
2430 EXPORT_SYMBOL(lu_kmem_fini);
2431
2432 /**
2433  * Temporary solution to be able to assign fid in ->do_create()
2434  * till we have fully-functional OST fids
2435  */
2436 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2437                           const struct lu_fid *fid)
2438 {
2439         struct lu_site          *s = o->lo_dev->ld_site;
2440         struct lu_fid           *old = &o->lo_header->loh_fid;
2441         int rc;
2442
2443         LASSERT(fid_is_zero(old));
2444         *old = *fid;
2445 try_again:
2446         rc = rhashtable_lookup_insert_fast(&s->ls_obj_hash,
2447                                            &o->lo_header->loh_hash,
2448                                            obj_hash_params);
2449         /* supposed to be unique */
2450         LASSERT(rc != -EEXIST);
2451         /* handle hash table resizing */
2452         if (rc == -ENOMEM) {
2453                 msleep(20);
2454                 goto try_again;
2455         }
2456         /* trim the hash if its growing to big */
2457         lu_object_limit(env, o->lo_dev);
2458         if (rc == -E2BIG)
2459                 goto try_again;
2460
2461         LASSERTF(rc == 0, "failed hashtable insertion: rc = %d\n", rc);
2462 }
2463 EXPORT_SYMBOL(lu_object_assign_fid);
2464
2465 /**
2466  * allocates object with 0 (non-assiged) fid
2467  * XXX: temporary solution to be able to assign fid in ->do_create()
2468  *      till we have fully-functional OST fids
2469  */
2470 struct lu_object *lu_object_anon(const struct lu_env *env,
2471                                  struct lu_device *dev,
2472                                  const struct lu_object_conf *conf)
2473 {
2474         struct lu_fid fid;
2475         struct lu_object *o;
2476         int rc;
2477
2478         fid_zero(&fid);
2479         o = lu_object_alloc(env, dev, &fid);
2480         if (!IS_ERR(o)) {
2481                 rc = lu_object_start(env, dev, o, conf);
2482                 if (rc) {
2483                         lu_object_free(env, o);
2484                         return ERR_PTR(rc);
2485                 }
2486         }
2487
2488         return o;
2489 }
2490 EXPORT_SYMBOL(lu_object_anon);
2491
2492 struct lu_buf LU_BUF_NULL = {
2493         .lb_buf = NULL,
2494         .lb_len = 0
2495 };
2496 EXPORT_SYMBOL(LU_BUF_NULL);
2497
2498 void lu_buf_free(struct lu_buf *buf)
2499 {
2500         LASSERT(buf);
2501         if (buf->lb_buf) {
2502                 LASSERT(buf->lb_len > 0);
2503                 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2504                 buf->lb_buf = NULL;
2505                 buf->lb_len = 0;
2506         }
2507 }
2508 EXPORT_SYMBOL(lu_buf_free);
2509
2510 void lu_buf_alloc(struct lu_buf *buf, size_t size)
2511 {
2512         LASSERT(buf);
2513         LASSERT(buf->lb_buf == NULL);
2514         LASSERT(buf->lb_len == 0);
2515         OBD_ALLOC_LARGE(buf->lb_buf, size);
2516         if (likely(buf->lb_buf))
2517                 buf->lb_len = size;
2518 }
2519 EXPORT_SYMBOL(lu_buf_alloc);
2520
2521 void lu_buf_realloc(struct lu_buf *buf, size_t size)
2522 {
2523         lu_buf_free(buf);
2524         lu_buf_alloc(buf, size);
2525 }
2526 EXPORT_SYMBOL(lu_buf_realloc);
2527
2528 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len)
2529 {
2530         if (buf->lb_buf == NULL && buf->lb_len == 0)
2531                 lu_buf_alloc(buf, len);
2532
2533         if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2534                 lu_buf_realloc(buf, len);
2535
2536         return buf;
2537 }
2538 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2539
2540 /**
2541  * Increase the size of the \a buf.
2542  * preserves old data in buffer
2543  * old buffer remains unchanged on error
2544  * \retval 0 or -ENOMEM
2545  */
2546 int lu_buf_check_and_grow(struct lu_buf *buf, size_t len)
2547 {
2548         char *ptr;
2549
2550         if (len <= buf->lb_len)
2551                 return 0;
2552
2553         OBD_ALLOC_LARGE(ptr, len);
2554         if (ptr == NULL)
2555                 return -ENOMEM;
2556
2557         /* Free the old buf */
2558         if (buf->lb_buf != NULL) {
2559                 memcpy(ptr, buf->lb_buf, buf->lb_len);
2560                 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2561         }
2562
2563         buf->lb_buf = ptr;
2564         buf->lb_len = len;
2565         return 0;
2566 }
2567 EXPORT_SYMBOL(lu_buf_check_and_grow);