1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #ifndef __LUSTRE_LU_OBJECT_H
24 #define __LUSTRE_LU_OBJECT_H
31 #include <lustre/lustre_idl.h>
33 #include <libcfs/list.h>
34 #include <libcfs/kp30.h>
37 * Layered objects support for CMD3/C5.
41 struct proc_dir_entry;
46 * lu_* data-types represent server-side entities shared by data and meta-data
51 * 0. support for layering.
53 * Server side object is split into layers, one per device in the
54 * corresponding device stack. Individual layer is represented by struct
55 * lu_object. Compound layered object --- by struct lu_object_header. Most
56 * interface functions take lu_object as an argument and operate on the
57 * whole compound object. This decision was made due to the following
60 * - it's envisaged that lu_object will be used much more often than
63 * - we want lower (non-top) layers to be able to initiate operations
64 * on the whole object.
66 * Generic code supports layering more complex than simple stacking, e.g.,
67 * it is possible that at some layer object "spawns" multiple sub-objects
70 * 1. fid-based identification.
72 * Compound object is uniquely identified by its fid. Objects are indexed
73 * by their fids (hash table is used for index).
75 * 2. caching and life-cycle management.
77 * Object's life-time is controlled by reference counting. When reference
78 * count drops to 0, object is returned to cache. Cached objects still
79 * retain their identity (i.e., fid), and can be recovered from cache.
81 * Objects are kept in the global LRU list, and lu_site_purge() function
82 * can be used to reclaim given number of unused objects from the tail of
85 * 3. avoiding recursion.
87 * Generic code tries to replace recursion through layers by iterations
88 * where possible. Additionally to the end of reducing stack consumption,
89 * data, when practically possible, are allocated through lu_context_key
90 * interface rather than on stack.
97 struct lu_object_header;
102 * Operations common for data and meta-data devices.
104 struct lu_device_operations {
106 * Object creation protocol.
108 * Due to design goal of avoiding recursion, object creation (see
109 * lu_object_alloc()) is somewhat involved:
111 * - first, ->ldo_object_alloc() method of the top-level device
112 * in the stack is called. It should allocate top level object
113 * (including lu_object_header), but without any lower-layer
116 * - then lu_object_alloc() sets fid in the header of newly created
119 * - then ->loo_object_init() (a method from struct
120 * lu_object_operations) is called. It has to allocate lower-layer
121 * object(s). To do this, ->loo_object_init() calls
122 * ldo_object_alloc() of the lower-layer device(s).
124 * - for all new objects allocated by ->loo_object_init() (and
125 * inserted into object stack), ->loo_object_init() is called again
126 * repeatedly, until no new objects are created.
131 * Allocate object for the given device (without lower-layer
132 * parts). This is called by ->loo_object_init() from the parent
133 * layer, and should setup at least ->lo_dev and ->lo_ops fields of
134 * resulting lu_object.
136 * postcondition: ergo(!IS_ERR(result), result->lo_dev == d &&
137 * result->lo_ops != NULL);
139 struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
140 const struct lu_object_header *h,
141 struct lu_device *d);
143 * process config specific for device
145 int (*ldo_process_config)(const struct lu_env *env,
146 struct lu_device *, struct lustre_cfg *);
147 int (*ldo_recovery_complete)(const struct lu_env *,
153 * Type of "printer" function used by ->loo_object_print() method.
155 * Printer function is needed to provide some flexibility in (semi-)debugging
156 * output: possible implementations: printk, CDEBUG, sysfs/seq_file
158 typedef int (*lu_printer_t)(const struct lu_env *env,
159 void *cookie, const char *format, ...)
160 __attribute__ ((format (printf, 3, 4)));
163 * Operations specific for particular lu_object.
165 struct lu_object_operations {
168 * Allocate lower-layer parts of the object by calling
169 * ->ldo_object_alloc() of the corresponding underlying device.
171 * This method is called once for each object inserted into object
172 * stack. It's responsibility of this method to insert lower-layer
173 * object(s) it create into appropriate places of object stack.
175 int (*loo_object_init)(const struct lu_env *env,
176 struct lu_object *o);
178 * Called (in top-to-bottom order) during object allocation after all
179 * layers were allocated and initialized. Can be used to perform
180 * initialization depending on lower layers.
182 int (*loo_object_start)(const struct lu_env *env,
183 struct lu_object *o);
185 * Called before ->loo_object_free() to signal that object is being
186 * destroyed. Dual to ->loo_object_init().
188 void (*loo_object_delete)(const struct lu_env *env,
189 struct lu_object *o);
192 * Dual to ->ldo_object_alloc(). Called when object is removed from
195 void (*loo_object_free)(const struct lu_env *env,
196 struct lu_object *o);
199 * Called when last active reference to the object is released (and
200 * object returns to the cache). This method is optional.
202 void (*loo_object_release)(const struct lu_env *env,
203 struct lu_object *o);
205 * Debugging helper. Print given object.
207 int (*loo_object_print)(const struct lu_env *env, void *cookie,
208 lu_printer_t p, const struct lu_object *o);
210 * Optional debugging method. Returns true iff method is internally
213 int (*loo_object_invariant)(const struct lu_object *o);
219 struct lu_device_type;
222 * Device: a layer in the server side abstraction stacking.
226 * reference count. This is incremented, in particular, on each object
227 * created at this layer.
229 * XXX which means that atomic_t is probably too small.
233 * Pointer to device type. Never modified once set.
235 struct lu_device_type *ld_type;
237 * Operation vector for this device.
239 struct lu_device_operations *ld_ops;
241 * Stack this device belongs to.
243 struct lu_site *ld_site;
244 struct proc_dir_entry *ld_proc_entry;
246 /* XXX: temporary back pointer into obd. */
247 struct obd_device *ld_obd;
250 struct lu_device_type_operations;
253 * Tag bits for device type. They are used to distinguish certain groups of
257 /* this is meta-data device */
258 LU_DEVICE_MD = (1 << 0),
259 /* this is data device */
260 LU_DEVICE_DT = (1 << 1)
266 struct lu_device_type {
268 * Tag bits. Taken from enum lu_device_tag. Never modified once set.
272 * Name of this class. Unique system-wide. Never modified once set.
276 * Operations for this type.
278 struct lu_device_type_operations *ldt_ops;
280 * XXX: temporary pointer to associated obd_type.
282 struct obd_type *ldt_obd_type;
284 * XXX: temporary: context tags used by obd_*() calls.
290 * Operations on a device type.
292 struct lu_device_type_operations {
294 * Allocate new device.
296 struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
297 struct lu_device_type *t,
298 struct lustre_cfg *lcfg);
300 * Free device. Dual to ->ldto_device_alloc(). Returns pointer to
301 * the next device in the stack.
303 struct lu_device *(*ldto_device_free)(const struct lu_env *,
307 * Initialize the devices after allocation
309 int (*ldto_device_init)(const struct lu_env *env,
310 struct lu_device *, const char *,
313 * Finalize device. Dual to ->ldto_device_init(). Returns pointer to
314 * the next device in the stack.
316 struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
320 * Initialize device type. This is called on module load.
322 int (*ldto_init)(struct lu_device_type *t);
324 * Finalize device type. Dual to ->ldto_init(). Called on module
327 void (*ldto_fini)(struct lu_device_type *t);
331 * Flags for the object layers.
333 enum lu_object_flags {
335 * this flags is set if ->loo_object_init() has been called for this
336 * layer. Used by lu_object_alloc().
338 LU_OBJECT_ALLOCATED = (1 << 0)
342 * Common object attributes.
358 LA_BLKSIZE = 1 << 12,
362 __u64 la_size; /* size in bytes */
363 __u64 la_mtime; /* modification time in seconds since Epoch */
364 __u64 la_atime; /* access time in seconds since Epoch */
365 __u64 la_ctime; /* change time in seconds since Epoch */
366 __u64 la_blocks; /* 512-byte blocks allocated to object */
367 __u32 la_mode; /* permission bits and file type */
368 __u32 la_uid; /* owner id */
369 __u32 la_gid; /* group id */
370 __u32 la_flags; /* object flags */
371 __u32 la_nlink; /* number of persistent references to this
373 __u32 la_blkbits; /* blk bits of the object*/
374 __u32 la_blksize; /* blk size of the object*/
376 __u32 la_rdev; /* real device */
377 __u64 la_valid; /* valid bits */
381 * Layer in the layered object.
385 * Header for this object.
387 struct lu_object_header *lo_header;
389 * Device for this layer.
391 struct lu_device *lo_dev;
393 * Operations for this object.
395 struct lu_object_operations *lo_ops;
397 * Linkage into list of all layers.
399 struct list_head lo_linkage;
401 * Depth. Top level layer depth is 0.
405 * Flags from enum lu_object_flags.
407 unsigned long lo_flags;
410 enum lu_object_header_flags {
412 * Don't keep this object in cache. Object will be destroyed as soon
413 * as last reference to it is released. This flag cannot be cleared
416 LU_OBJECT_HEARD_BANSHEE = 0
419 enum lu_object_header_attr {
420 LOHA_EXISTS = 1 << 0,
421 LOHA_REMOTE = 1 << 1,
423 * UNIX file type is stored in S_IFMT bits.
425 LOHA_FT_START = 1 << 12, /* S_IFIFO */
426 LOHA_FT_END = 1 << 15, /* S_IFREG */
430 * "Compound" object, consisting of multiple layers.
432 * Compound object with given fid is unique with given lu_site.
434 * Note, that object does *not* necessary correspond to the real object in the
435 * persistent storage: object is an anchor for locking and method calling, so
436 * it is created for things like not-yet-existing child created by mkdir or
437 * create calls. ->loo_exists() can be used to check whether object is backed
438 * by persistent storage entity.
440 struct lu_object_header {
442 * Object flags from enum lu_object_header_flags. Set and checked
445 unsigned long loh_flags;
447 * Object reference count. Protected by site guard lock.
451 * Fid, uniquely identifying this object.
453 struct lu_fid loh_fid;
455 * Common object attributes, cached for efficiency. From enum
456 * lu_object_header_attr.
460 * Linkage into per-site hash table. Protected by site guard lock.
462 struct hlist_node loh_hash;
464 * Linkage into per-site LRU list. Protected by site guard lock.
466 struct list_head loh_lru;
468 * Linkage into list of layers. Never modified once set (except lately
469 * during object destruction). No locking is necessary.
471 struct list_head loh_layers;
477 * lu_site is a "compartment" within which objects are unique, and LRU
478 * discipline is maintained.
480 * lu_site exists so that multiple layered stacks can co-exist in the same
483 * lu_site has the same relation to lu_device as lu_object_header to
490 * - ->ls_hash hash table (and its linkages in objects);
492 * - ->ls_lru list (and its linkages in objects);
494 * - 0/1 transitions of object ->loh_ref reference count;
500 * Hash-table where objects are indexed by fid.
502 struct hlist_head *ls_hash;
504 * Bit-mask for hash-table size.
508 * Order of hash-table.
512 * Number of buckets in the hash-table.
517 * LRU list, updated on each access to object. Protected by
520 * "Cold" end of LRU is ->ls_lru.next. Accessed object are moved to
521 * the ->ls_lru.prev (this is due to the non-existence of
522 * list_for_each_entry_safe_reverse()).
524 struct list_head ls_lru;
526 * Total number of objects in this site. Protected by ->ls_guard.
530 * Total number of objects in this site with reference counter greater
531 * than 0. Protected by ->ls_guard.
536 * Top-level device for this stack.
538 struct lu_device *ls_top_dev;
540 * mds number of this site.
544 * Fid location database
546 struct lu_server_fld *ls_server_fld;
547 struct lu_client_fld *ls_client_fld;
552 struct lu_server_seq *ls_server_seq;
555 * Controller Seq Manager
557 struct lu_server_seq *ls_control_seq;
558 struct obd_export *ls_control_exp;
563 struct lu_client_seq *ls_client_seq;
565 /* statistical counters. Protected by nothing, races are accepted. */
571 * Number of hash-table entry checks made.
573 * ->s_cache_check / (->s_cache_miss + ->s_cache_hit)
575 * is an average number of hash slots inspected during single
579 /* raced cache insertions */
585 * Linkage into global list of sites.
587 struct list_head ls_linkage;
588 struct lprocfs_stats *ls_time_stats;
592 * Constructors/destructors.
596 * Initialize site @s, with @d as the top level device.
598 int lu_site_init(struct lu_site *s, struct lu_device *d);
600 * Finalize @s and release its resources.
602 void lu_site_fini(struct lu_site *s);
605 * Called when initialization of stack for this site is completed.
607 int lu_site_init_finish(struct lu_site *s);
610 * Acquire additional reference on device @d
612 void lu_device_get(struct lu_device *d);
614 * Release reference on device @d.
616 void lu_device_put(struct lu_device *d);
619 * Initialize device @d of type @t.
621 int lu_device_init(struct lu_device *d, struct lu_device_type *t);
623 * Finalize device @d.
625 void lu_device_fini(struct lu_device *d);
628 * Initialize compound object.
630 int lu_object_header_init(struct lu_object_header *h);
632 * Finalize compound object.
634 void lu_object_header_fini(struct lu_object_header *h);
637 * Initialize object @o that is part of compound object @h and was created by
640 int lu_object_init(struct lu_object *o,
641 struct lu_object_header *h, struct lu_device *d);
643 * Finalize object and release its resources.
645 void lu_object_fini(struct lu_object *o);
647 * Add object @o as first layer of compound object @h.
649 * This is typically called by the ->ldo_object_alloc() method of top-level
652 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
654 * Add object @o as a layer of compound object, going after @before.1
656 * This is typically called by the ->ldo_object_alloc() method of
659 void lu_object_add(struct lu_object *before, struct lu_object *o);
662 * Caching and reference counting.
666 * Acquire additional reference to the given object. This function is used to
667 * attain additional reference. To acquire initial reference use
670 static inline void lu_object_get(struct lu_object *o)
672 LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
673 atomic_inc(&o->lo_header->loh_ref);
677 * Return true of object will not be cached after last reference to it is
680 static inline int lu_object_is_dying(const struct lu_object_header *h)
682 return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
686 * Decrease reference counter on object. If last reference is freed, return
687 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
688 * case, free object immediately.
690 void lu_object_put(const struct lu_env *env,
691 struct lu_object *o);
694 * Free @nr objects from the cold end of the site LRU list.
696 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr);
699 * Print all objects in @s.
701 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
702 lu_printer_t printer);
704 * Search cache for an object with the fid @f. If such object is found, return
705 * it. Otherwise, create new object, insert it into cache and return it. In
706 * any case, additional reference is acquired on the returned object.
708 struct lu_object *lu_object_find(const struct lu_env *env,
709 struct lu_site *s, const struct lu_fid *f);
716 * First (topmost) sub-object of given compound object
718 static inline struct lu_object *lu_object_top(struct lu_object_header *h)
720 LASSERT(!list_empty(&h->loh_layers));
721 return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
725 * Next sub-object in the layering
727 static inline struct lu_object *lu_object_next(const struct lu_object *o)
729 return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
733 * Pointer to the fid of this object.
735 static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
737 return &o->lo_header->loh_fid;
741 * return device operations vector for this object
743 static inline struct lu_device_operations *
744 lu_object_ops(const struct lu_object *o)
746 return o->lo_dev->ld_ops;
750 * Given a compound object, find its slice, corresponding to the device type
753 struct lu_object *lu_object_locate(struct lu_object_header *h,
754 struct lu_device_type *dtype);
756 struct lu_cdebug_print_info {
759 const char *lpi_file;
765 * Printer function emitting messages through libcfs_debug_msg().
767 int lu_cdebug_printer(const struct lu_env *env,
768 void *cookie, const char *format, ...);
770 #define DECLARE_LU_CDEBUG_PRINT_INFO(var, mask) \
771 struct lu_cdebug_print_info var = { \
772 .lpi_subsys = DEBUG_SUBSYSTEM, \
773 .lpi_mask = (mask), \
774 .lpi_file = __FILE__, \
775 .lpi_fn = __FUNCTION__, \
776 .lpi_line = __LINE__ \
780 * Print object description followed by user-supplied message.
782 #define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
784 static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \
786 lu_object_print(env, &__info, lu_cdebug_printer, object); \
787 CDEBUG(mask, format , ## __VA_ARGS__); \
791 * Print human readable representation of the @o to the @f.
793 void lu_object_print(const struct lu_env *env, void *cookie,
794 lu_printer_t printer, const struct lu_object *o);
797 * Check object consistency.
799 int lu_object_invariant(const struct lu_object *o);
802 * Returns 1 iff object @o exists on the stable storage,
803 * returns -1 iff object @o is on remote server.
805 static inline int lu_object_exists(const struct lu_object *o)
809 attr = o->lo_header->loh_attr;
810 if (attr & LOHA_REMOTE)
812 else if (attr & LOHA_EXISTS)
818 static inline int lu_object_assert_exists(const struct lu_object *o)
820 return lu_object_exists(o) != 0;
823 static inline int lu_object_assert_not_exists(const struct lu_object *o)
825 return lu_object_exists(o) <= 0;
829 * Attr of this object.
831 static inline __u32 lu_object_attr(const struct lu_object *o)
833 LASSERT(lu_object_exists(o) > 0);
834 return o->lo_header->loh_attr;
838 /* input params, should be filled out by mdt */
839 __u32 rp_hash; /* hash */
840 int rp_count; /* count in bytes */
841 int rp_npages; /* number of pages */
842 struct page **rp_pages; /* pointers to pages */
845 enum lu_xattr_flags {
846 LU_XATTR_REPLACE = (1 << 0),
847 LU_XATTR_CREATE = (1 << 1)
850 /* For lu_context health-checks */
851 enum lu_context_state {
859 * lu_context. Execution context for lu_object methods. Currently associated
862 * All lu_object methods, except device and device type methods (called during
863 * system initialization and shutdown) are executed "within" some
864 * lu_context. This means, that pointer to some "current" lu_context is passed
865 * as an argument to all methods.
867 * All service ptlrpc threads create lu_context as part of their
868 * initialization. It is possible to create "stand-alone" context for other
869 * execution environments (like system calls).
871 * lu_object methods mainly use lu_context through lu_context_key interface
872 * that allows each layer to associate arbitrary pieces of data with each
873 * context (see pthread_key_create(3) for similar interface).
878 * Theoretically we'd want to use lu_objects and lu_contexts on the
879 * client side too. On the other hand, we don't want to allocate
880 * values of server-side keys for the client contexts and vice versa.
882 * To achieve this, set of tags in introduced. Contexts and keys are
883 * marked with tags. Key value are created only for context whose set
884 * of tags has non-empty intersection with one for key. Tags are taken
885 * from enum lu_context_tag.
889 * Pointer to the home service thread. NULL for other execution
892 struct ptlrpc_thread *lc_thread;
894 * Pointer to an array with key values. Internal implementation
898 enum lu_context_state lc_state;
902 * lu_context_key interface. Similar to pthread_key.
905 enum lu_context_tag {
907 * Thread on md server
909 LCT_MD_THREAD = 1 << 0,
911 * Thread on dt server
913 LCT_DT_THREAD = 1 << 1,
915 * Context for transaction handle
917 LCT_TX_HANDLE = 1 << 2,
921 LCT_CL_THREAD = 1 << 3,
923 * Per-request session on server
925 LCT_SESSION = 1 << 4,
927 * Don't add references for modules creating key values in that context.
928 * This is only for contexts used internally by lu_object framework.
932 * Contexts usable in cache shrinker thread.
934 LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF
938 * Key. Represents per-context value slot.
940 struct lu_context_key {
942 * Set of tags for which values of this key are to be instantiated.
946 * Value constructor. This is called when new value is created for a
947 * context. Returns pointer to new value of error pointer.
949 void *(*lct_init)(const struct lu_context *ctx,
950 struct lu_context_key *key);
952 * Value destructor. Called when context with previously allocated
953 * value of this slot is destroyed. @data is a value that was returned
954 * by a matching call to ->lct_init().
956 void (*lct_fini)(const struct lu_context *ctx,
957 struct lu_context_key *key, void *data);
959 * Optional method called on lu_context_exit() for all allocated
960 * keys. Can be used by debugging code checking that locks are
963 void (*lct_exit)(const struct lu_context *ctx,
964 struct lu_context_key *key, void *data);
966 * Internal implementation detail: index within ->lc_value[] reserved
971 * Internal implementation detail: number of values created for this
976 * Internal implementation detail: module for this key.
978 struct module *lct_owner;
981 #define LU_KEY_INIT(mod, type) \
982 static void* mod##_key_init(const struct lu_context *ctx, \
983 struct lu_context_key *key) \
987 CLASSERT(CFS_PAGE_SIZE >= sizeof (*value)); \
989 OBD_ALLOC_PTR(value); \
991 value = ERR_PTR(-ENOMEM); \
995 struct __##mod##__dummy_init {;} /* semicolon catcher */
997 #define LU_KEY_FINI(mod, type) \
998 static void mod##_key_fini(const struct lu_context *ctx, \
999 struct lu_context_key *key, void* data) \
1001 type *info = data; \
1003 OBD_FREE_PTR(info); \
1005 struct __##mod##__dummy_fini {;} /* semicolon catcher */
1007 #define LU_KEY_INIT_FINI(mod, type) \
1008 LU_KEY_INIT(mod,type); \
1009 LU_KEY_FINI(mod,type)
1011 #define LU_CONTEXT_KEY_DEFINE(mod, tags) \
1012 struct lu_context_key mod##_thread_key = { \
1014 .lct_init = mod##_key_init, \
1015 .lct_fini = mod##_key_fini \
1018 #define LU_CONTEXT_KEY_INIT(key) \
1020 (key)->lct_owner = THIS_MODULE; \
1027 int lu_context_key_register(struct lu_context_key *key);
1031 void lu_context_key_degister(struct lu_context_key *key);
1033 #define LU_KEY_REGISTER_GENERIC(mod) \
1034 static int mod##_key_register_generic(struct lu_context_key *k, ...) \
1036 struct lu_context_key* key = k; \
1040 va_start(args, k); \
1043 LU_CONTEXT_KEY_INIT(key); \
1044 result = lu_context_key_register(key); \
1047 key = va_arg(args, struct lu_context_key*); \
1048 } while (key != NULL); \
1053 va_start(args, k); \
1054 while (k != key) { \
1055 lu_context_key_degister(k); \
1056 k = va_arg(args, struct lu_context_key*); \
1064 #define LU_KEY_DEGISTER_GENERIC(mod) \
1065 static void mod##_key_degister_generic(struct lu_context_key *k, ...) \
1069 va_start(args, k); \
1072 lu_context_key_degister(k); \
1073 k = va_arg(args, struct lu_context_key*); \
1074 } while (k != NULL); \
1079 #define LU_TYPE_INIT(mod, ...) \
1080 LU_KEY_REGISTER_GENERIC(mod) \
1081 static int mod##_type_init(struct lu_device_type *t) \
1083 return mod##_key_register_generic(__VA_ARGS__, NULL); \
1085 struct __##mod##_dummy_type_init {;}
1087 #define LU_TYPE_FINI(mod, ...) \
1088 LU_KEY_DEGISTER_GENERIC(mod) \
1089 static void mod##_type_fini(struct lu_device_type *t) \
1091 mod##_key_degister_generic(__VA_ARGS__, NULL); \
1093 struct __##mod##_dummy_type_fini {;}
1095 #define LU_TYPE_INIT_FINI(mod, ...) \
1096 LU_TYPE_INIT(mod, __VA_ARGS__); \
1097 LU_TYPE_FINI(mod, __VA_ARGS__)
1100 * Return value associated with key @key in context @ctx.
1102 void *lu_context_key_get(const struct lu_context *ctx,
1103 struct lu_context_key *key);
1106 * Initialize context data-structure. Create values for all keys.
1108 int lu_context_init(struct lu_context *ctx, __u32 tags);
1110 * Finalize context data-structure. Destroy key values.
1112 void lu_context_fini(struct lu_context *ctx);
1115 * Called before entering context.
1117 void lu_context_enter(struct lu_context *ctx);
1119 * Called after exiting from @ctx
1121 void lu_context_exit(struct lu_context *ctx);
1124 * Allocate for context all missing keys that were registered after context
1127 int lu_context_refill(const struct lu_context *ctx);
1134 * "Local" context, used to store data instead of stack.
1136 struct lu_context le_ctx;
1138 * "Session" context for per-request data.
1140 struct lu_context *le_ses;
1143 int lu_env_init(struct lu_env *env, struct lu_context *ses, __u32 tags);
1144 void lu_env_fini(struct lu_env *env);
1147 * Common name structure to be passed around for various name related methods.
1155 * Common buffer structure to be passed around for various xattr_{s,g}et()
1163 extern struct lu_buf LU_BUF_NULL; /* null buffer */
1165 #define DLUBUF "(%p %z)"
1166 #define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
1168 * One-time initializers, called at obdclass module initialization, not
1173 * Initialization of global lu_* data.
1175 int lu_global_init(void);
1178 * Dual to lu_global_init().
1180 void lu_global_fini(void);
1183 LU_TIME_FIND_LOOKUP,
1185 LU_TIME_FIND_INSERT,
1189 extern const char *lu_time_names[LU_TIME_NR];
1191 #endif /* __LUSTRE_LU_OBJECT_H */