4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef __LUSTRE_LU_OBJECT_H
33 #define __LUSTRE_LU_OBJECT_H
36 #include <libcfs/libcfs.h>
37 #include <uapi/linux/lustre/lustre_idl.h>
39 #include <linux/percpu_counter.h>
40 #include <linux/rhashtable.h>
41 #include <linux/ctype.h>
42 #include <obd_target.h>
45 struct proc_dir_entry;
51 * lu_* data-types represent server-side entities shared by data and meta-data
56 * -# support for layering.
58 * Server side object is split into layers, one per device in the
59 * corresponding device stack. Individual layer is represented by struct
60 * lu_object. Compound layered object --- by struct lu_object_header. Most
61 * interface functions take lu_object as an argument and operate on the
62 * whole compound object. This decision was made due to the following
65 * - it's envisaged that lu_object will be used much more often than
68 * - we want lower (non-top) layers to be able to initiate operations
69 * on the whole object.
71 * Generic code supports layering more complex than simple stacking, e.g.,
72 * it is possible that at some layer object "spawns" multiple sub-objects
75 * -# fid-based identification.
77 * Compound object is uniquely identified by its fid. Objects are indexed
78 * by their fids (hash table is used for index).
80 * -# caching and life-cycle management.
82 * Object's life-time is controlled by reference counting. When reference
83 * count drops to 0, object is returned to cache. Cached objects still
84 * retain their identity (i.e., fid), and can be recovered from cache.
86 * Objects are kept in the global LRU list, and lu_site_purge() function
87 * can be used to reclaim given number of unused objects from the tail of
90 * -# avoiding recursion.
92 * Generic code tries to replace recursion through layers by iterations
93 * where possible. Additionally to the end of reducing stack consumption,
94 * data, when practically possible, are allocated through lu_context_key
95 * interface rather than on stack.
102 struct lu_object_header;
108 * Operations common for data and meta-data devices.
110 struct lu_device_operations {
112 * Allocate object for the given device (without lower-layer
113 * parts). This is called by lu_object_operations::loo_object_init()
114 * from the parent layer, and should setup at least lu_object::lo_dev
115 * and lu_object::lo_ops fields of resulting lu_object.
117 * Object creation protocol.
119 * Due to design goal of avoiding recursion, object creation (see
120 * lu_object_alloc()) is somewhat involved:
122 * - first, lu_device_operations::ldo_object_alloc() method of the
123 * top-level device in the stack is called. It should allocate top
124 * level object (including lu_object_header), but without any
125 * lower-layer sub-object(s).
127 * - then lu_object_alloc() sets fid in the header of newly created
130 * - then lu_object_operations::loo_object_init() is called. It has
131 * to allocate lower-layer object(s). To do this,
132 * lu_object_operations::loo_object_init() calls ldo_object_alloc()
133 * of the lower-layer device(s).
135 * - for all new objects allocated by
136 * lu_object_operations::loo_object_init() (and inserted into object
137 * stack), lu_object_operations::loo_object_init() is called again
138 * repeatedly, until no new objects are created.
140 * \post ergo(!IS_ERR(result), result->lo_dev == d &&
141 * result->lo_ops != NULL);
143 struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
144 const struct lu_object_header *h,
145 struct lu_device *d);
147 * process config specific for device.
149 int (*ldo_process_config)(const struct lu_env *env,
150 struct lu_device *, struct lustre_cfg *);
151 int (*ldo_recovery_complete)(const struct lu_env *,
155 * initialize local objects for device. this method called after layer has
156 * been initialized (after LCFG_SETUP stage) and before it starts serving
160 int (*ldo_prepare)(const struct lu_env *,
161 struct lu_device *parent,
162 struct lu_device *dev);
166 * Allocate new FID for file with @name under @parent
168 * \param[in] env execution environment for this thread
169 * \param[in] dev dt device
170 * \param[out] fid new FID allocated
171 * \param[in] parent parent object
172 * \param[in] name lu_name
174 * \retval 0 0 FID allocated successfully.
175 * \retval 1 1 FID allocated successfully and new sequence
176 * requested from seq meta server
177 * \retval negative negative errno if FID allocation failed.
179 int (*ldo_fid_alloc)(const struct lu_env *env,
180 struct lu_device *dev,
182 struct lu_object *parent,
183 const struct lu_name *name);
187 * For lu_object_conf flags
190 /* This is a new object to be allocated, or the file
191 * corresponding to the object does not exists. */
192 LOC_F_NEW = 0x00000001,
196 * Object configuration, describing particulars of object being created. On
197 * server this is not used, as server objects are full identified by fid. On
198 * client configuration contains struct lustre_md.
200 struct lu_object_conf {
202 * Some hints for obj find and alloc.
204 loc_flags_t loc_flags;
208 * Type of "printer" function used by lu_object_operations::loo_object_print()
211 * Printer function is needed to provide some flexibility in (semi-)debugging
212 * output: possible implementations: printk, CDEBUG, sysfs/seq_file
214 typedef int (*lu_printer_t)(const struct lu_env *env,
215 void *cookie, const char *format, ...)
216 __attribute__ ((format (printf, 3, 4)));
219 * Operations specific for particular lu_object.
221 struct lu_object_operations {
224 * Allocate lower-layer parts of the object by calling
225 * lu_device_operations::ldo_object_alloc() of the corresponding
228 * This method is called once for each object inserted into object
229 * stack. It's responsibility of this method to insert lower-layer
230 * object(s) it create into appropriate places of object stack.
232 int (*loo_object_init)(const struct lu_env *env,
234 const struct lu_object_conf *conf);
236 * Called (in top-to-bottom order) during object allocation after all
237 * layers were allocated and initialized. Can be used to perform
238 * initialization depending on lower layers.
240 int (*loo_object_start)(const struct lu_env *env,
241 struct lu_object *o);
243 * Called before lu_object_operations::loo_object_free() to signal
244 * that object is being destroyed. Dual to
245 * lu_object_operations::loo_object_init().
247 void (*loo_object_delete)(const struct lu_env *env,
248 struct lu_object *o);
250 * Dual to lu_device_operations::ldo_object_alloc(). Called when
251 * object is removed from memory. Must use call_rcu or kfree_rcu
252 * if the object contains an lu_object_header.
254 void (*loo_object_free)(const struct lu_env *env,
255 struct lu_object *o);
257 * Called when last active reference to the object is released (and
258 * object returns to the cache). This method is optional.
260 void (*loo_object_release)(const struct lu_env *env,
261 struct lu_object *o);
263 * Optional debugging helper. Print given object.
265 int (*loo_object_print)(const struct lu_env *env, void *cookie,
266 lu_printer_t p, const struct lu_object *o);
268 * Optional debugging method. Returns true iff method is internally
271 int (*loo_object_invariant)(const struct lu_object *o);
277 struct lu_device_type;
280 * Device: a layer in the server side abstraction stacking.
284 * reference count. This is incremented, in particular, on each object
285 * created at this layer.
287 * \todo XXX which means that atomic_t is probably too small.
291 * Pointer to device type. Never modified once set.
293 struct lu_device_type *ld_type;
295 * Operation vector for this device.
297 const struct lu_device_operations *ld_ops;
299 * Stack this device belongs to.
301 struct lu_site *ld_site;
302 struct proc_dir_entry *ld_proc_entry;
304 /** \todo XXX: temporary back pointer into obd. */
305 struct obd_device *ld_obd;
307 * A list of references to this object, for debugging.
309 struct lu_ref ld_reference;
311 * Link the device to the site.
313 struct list_head ld_linkage;
316 struct lu_device_type_operations;
319 * Tag bits for device type. They are used to distinguish certain groups of
323 /** this is meta-data device */
324 LU_DEVICE_MD = BIT(0),
325 /** this is data device */
326 LU_DEVICE_DT = BIT(1),
327 /** data device in the client stack */
328 LU_DEVICE_CL = BIT(2)
334 struct lu_device_type {
336 * Tag bits. Taken from enum lu_device_tag. Never modified once set.
340 * Name of this class. Unique system-wide. Never modified once set.
344 * Operations for this type.
346 const struct lu_device_type_operations *ldt_ops;
348 * \todo XXX: temporary: context tags used by obd_*() calls.
352 * Number of existing device type instances.
354 atomic_t ldt_device_nr;
358 * Operations on a device type.
360 struct lu_device_type_operations {
362 * Allocate new device.
364 struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
365 struct lu_device_type *t,
366 struct lustre_cfg *lcfg);
368 * Free device. Dual to
369 * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
370 * the next device in the stack.
372 struct lu_device *(*ldto_device_free)(const struct lu_env *,
376 * Initialize the devices after allocation
378 int (*ldto_device_init)(const struct lu_env *env,
379 struct lu_device *, const char *,
382 * Finalize device. Dual to
383 * lu_device_type_operations::ldto_device_init(). Returns pointer to
384 * the next device in the stack.
386 struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
389 * Initialize device type. This is called on module load.
391 int (*ldto_init)(struct lu_device_type *t);
393 * Finalize device type. Dual to
394 * lu_device_type_operations::ldto_init(). Called on module unload.
396 void (*ldto_fini)(struct lu_device_type *t);
398 * Called when the first device is created.
400 void (*ldto_start)(struct lu_device_type *t);
402 * Called when number of devices drops to 0.
404 void (*ldto_stop)(struct lu_device_type *t);
407 static inline int lu_device_is_md(const struct lu_device *d)
409 return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
413 * Common object attributes.
424 /** modification time in seconds since Epoch */
426 /** access time in seconds since Epoch */
428 /** change time in seconds since Epoch */
430 /** create time in seconds since Epoch */
432 /** 512-byte blocks allocated to object */
434 /** permission bits and file type */
442 /** number of persistent references to this object */
444 /** blk bits of the object*/
446 /** blk size of the object*/
452 /** set layout version to OST objects. */
453 __u32 la_layout_version;
455 __u64 la_dirent_count;
458 #define LU_DIRENT_COUNT_UNSET ~0ULL
461 * Layer in the layered object.
465 * Header for this object.
467 struct lu_object_header *lo_header;
469 * Device for this layer.
471 struct lu_device *lo_dev;
473 * Operations for this object.
475 const struct lu_object_operations *lo_ops;
477 * Linkage into list of all layers.
479 struct list_head lo_linkage;
481 * Link to the device, for debugging.
483 struct lu_ref_link lo_dev_ref;
486 enum lu_object_header_flags {
488 * Don't keep this object in cache. Object will be destroyed as soon
489 * as last reference to it is released. This flag cannot be cleared
492 LU_OBJECT_HEARD_BANSHEE = 0,
494 * Mark this object has already been taken out of cache.
496 LU_OBJECT_UNHASHED = 1,
498 * Object is initialized, when object is found in cache, it may not be
499 * intialized yet, the object allocator will initialize it.
501 LU_OBJECT_INITED = 2,
504 enum lu_object_header_attr {
505 LOHA_EXISTS = BIT(0),
506 LOHA_REMOTE = BIT(1),
507 LOHA_HAS_AGENT_ENTRY = BIT(2),
509 * UNIX file type is stored in S_IFMT bits.
511 LOHA_FT_START = 001 << 12, /**< S_IFIFO */
512 LOHA_FT_END = 017 << 12, /**< S_IFMT */
516 * "Compound" object, consisting of multiple layers.
518 * Compound object with given fid is unique with given lu_site.
520 * Note, that object does *not* necessary correspond to the real object in the
521 * persistent storage: object is an anchor for locking and method calling, so
522 * it is created for things like not-yet-existing child created by mkdir or
523 * create calls. lu_object_operations::loo_exists() can be used to check
524 * whether object is backed by persistent storage entity.
525 * Any object containing this structre which might be placed in an
526 * rhashtable via loh_hash MUST be freed using call_rcu() or rcu_kfree().
528 struct lu_object_header {
530 * Fid, uniquely identifying this object.
532 struct lu_fid loh_fid;
534 * Object flags from enum lu_object_header_flags. Set and checked
537 unsigned long loh_flags;
539 * Object reference count. Protected by lu_site::ls_guard.
543 * Common object attributes, cached for efficiency. From enum
544 * lu_object_header_attr.
548 * Linkage into per-site hash table.
550 struct rhash_head loh_hash;
552 * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
554 struct list_head loh_lru;
556 * Linkage into list of layers. Never modified once set (except lately
557 * during object destruction). No locking is necessary.
559 struct list_head loh_layers;
561 * A list of references to this object, for debugging.
563 struct lu_ref loh_reference;
565 * Handle used for kfree_rcu() or similar.
567 struct rcu_head loh_rcu;
577 LU_SS_CACHE_DEATH_RACE,
583 * lu_site is a "compartment" within which objects are unique, and LRU
584 * discipline is maintained.
586 * lu_site exists so that multiple layered stacks can co-exist in the same
589 * lu_site has the same relation to lu_device as lu_object_header to
596 struct rhashtable ls_obj_hash;
598 * buckets for summary data
600 struct lu_site_bkt_data *ls_bkts;
604 * index of bucket on hash table while purging
606 unsigned int ls_purge_start;
608 * Top-level device for this stack.
610 struct lu_device *ls_top_dev;
612 * Bottom-level device for this stack
614 struct lu_device *ls_bottom_dev;
616 * Linkage into global list of sites.
618 struct list_head ls_linkage;
620 * List for lu device for this site, protected
623 struct list_head ls_ld_linkage;
624 spinlock_t ls_ld_lock;
626 * Lock to serialize site purge.
628 struct mutex ls_purge_mutex;
632 struct lprocfs_stats *ls_stats;
634 * XXX: a hack! fld has to find md_site via site, remove when possible
636 struct seq_server_site *ld_seq_site;
638 * Pointer to the lu_target for this site.
640 struct lu_target *ls_tgt;
643 * Number of objects in lsb_lru_lists - used for shrinking
645 struct percpu_counter ls_lru_len_counter;
649 lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid);
651 static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
653 return s->ld_seq_site;
657 * Constructors/destructors.
661 int lu_site_init (struct lu_site *s, struct lu_device *d);
662 void lu_site_fini (struct lu_site *s);
663 int lu_site_init_finish (struct lu_site *s);
664 void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
665 void lu_device_get (struct lu_device *d);
666 void lu_device_put (struct lu_device *d);
667 int lu_device_init (struct lu_device *d, struct lu_device_type *t);
668 void lu_device_fini (struct lu_device *d);
669 int lu_object_header_init(struct lu_object_header *h);
670 void lu_object_header_fini(struct lu_object_header *h);
671 void lu_object_header_free(struct lu_object_header *h);
672 int lu_object_init (struct lu_object *o,
673 struct lu_object_header *h, struct lu_device *d);
674 void lu_object_fini (struct lu_object *o);
675 void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
676 void lu_object_add (struct lu_object *before, struct lu_object *o);
677 struct lu_object *lu_object_get_first(struct lu_object_header *h,
678 struct lu_device *dev);
679 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d);
680 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d);
683 * Helpers to initialize and finalize device types.
686 int lu_device_type_init(struct lu_device_type *ldt);
687 void lu_device_type_fini(struct lu_device_type *ldt);
692 * Caching and reference counting.
697 * Acquire additional reference to the given object. This function is used to
698 * attain additional reference. To acquire initial reference use
701 static inline void lu_object_get(struct lu_object *o)
703 LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
704 atomic_inc(&o->lo_header->loh_ref);
708 * Return true if object will not be cached after last reference to it is
711 static inline int lu_object_is_dying(const struct lu_object_header *h)
713 return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
717 * Return true if object is initialized.
719 static inline int lu_object_is_inited(const struct lu_object_header *h)
721 return test_bit(LU_OBJECT_INITED, &h->loh_flags);
724 void lu_object_put(const struct lu_env *env, struct lu_object *o);
725 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o);
726 void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
727 int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, int nr,
730 static inline int lu_site_purge(const struct lu_env *env, struct lu_site *s,
733 return lu_site_purge_objects(env, s, nr, 1);
736 void lu_site_print(const struct lu_env *env, struct lu_site *s, atomic_t *ref,
737 int msg_flags, lu_printer_t printer);
738 struct lu_object *lu_object_find(const struct lu_env *env,
739 struct lu_device *dev, const struct lu_fid *f,
740 const struct lu_object_conf *conf);
741 struct lu_object *lu_object_find_at(const struct lu_env *env,
742 struct lu_device *dev,
743 const struct lu_fid *f,
744 const struct lu_object_conf *conf);
745 struct lu_object *lu_object_find_slice(const struct lu_env *env,
746 struct lu_device *dev,
747 const struct lu_fid *f,
748 const struct lu_object_conf *conf);
757 * First (topmost) sub-object of given compound object
759 static inline struct lu_object *lu_object_top(struct lu_object_header *h)
761 LASSERT(!list_empty(&h->loh_layers));
762 return container_of(h->loh_layers.next, struct lu_object, lo_linkage);
766 * Next sub-object in the layering
768 static inline struct lu_object *lu_object_next(const struct lu_object *o)
770 return container_of(o->lo_linkage.next, struct lu_object, lo_linkage);
774 * Pointer to the fid of this object.
776 static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
778 return &o->lo_header->loh_fid;
782 * return device operations vector for this object
784 static const inline struct lu_device_operations *
785 lu_object_ops(const struct lu_object *o)
787 return o->lo_dev->ld_ops;
791 * Given a compound object, find its slice, corresponding to the device type
794 struct lu_object *lu_object_locate(struct lu_object_header *h,
795 const struct lu_device_type *dtype);
798 * Printer function emitting messages through libcfs_debug_msg().
800 int lu_cdebug_printer(const struct lu_env *env,
801 void *cookie, const char *format, ...);
804 * Print object description followed by a user-supplied message.
806 #define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
808 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
809 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
810 lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
811 CDEBUG(mask, format "\n", ## __VA_ARGS__); \
816 * Print short object description followed by a user-supplied message.
818 #define LU_OBJECT_HEADER(mask, env, object, format, ...) \
820 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
821 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
822 lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
823 (object)->lo_header); \
824 lu_cdebug_printer(env, &msgdata, "\n"); \
825 CDEBUG(mask, format , ## __VA_ARGS__); \
829 void lu_object_print (const struct lu_env *env, void *cookie,
830 lu_printer_t printer, const struct lu_object *o);
831 void lu_object_header_print(const struct lu_env *env, void *cookie,
832 lu_printer_t printer,
833 const struct lu_object_header *hdr);
836 * Check object consistency.
838 int lu_object_invariant(const struct lu_object *o);
842 * Check whether object exists, no matter on local or remote storage.
843 * Note: LOHA_EXISTS will be set once some one created the object,
844 * and it does not needs to be committed to storage.
846 #define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
849 * Check whether object on the remote storage.
851 #define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
854 * Check whether the object as agent entry on current target
856 #define lu_object_has_agent_entry(o) \
857 unlikely((o)->lo_header->loh_attr & LOHA_HAS_AGENT_ENTRY)
859 static inline void lu_object_set_agent_entry(struct lu_object *o)
861 o->lo_header->loh_attr |= LOHA_HAS_AGENT_ENTRY;
864 static inline void lu_object_clear_agent_entry(struct lu_object *o)
866 o->lo_header->loh_attr &= ~LOHA_HAS_AGENT_ENTRY;
869 static inline int lu_object_assert_exists(const struct lu_object *o)
871 return lu_object_exists(o);
874 static inline int lu_object_assert_not_exists(const struct lu_object *o)
876 return !lu_object_exists(o);
880 * Attr of this object.
882 static inline __u32 lu_object_attr(const struct lu_object *o)
884 LASSERT(lu_object_exists(o) != 0);
886 return o->lo_header->loh_attr & S_IFMT;
889 static inline void lu_object_ref_add(struct lu_object *o,
893 lu_ref_add(&o->lo_header->loh_reference, scope, source);
896 static inline void lu_object_ref_add_at(struct lu_object *o,
897 struct lu_ref_link *link,
901 lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
904 static inline void lu_object_ref_del(struct lu_object *o,
905 const char *scope, const void *source)
907 lu_ref_del(&o->lo_header->loh_reference, scope, source);
910 static inline void lu_object_ref_del_at(struct lu_object *o,
911 struct lu_ref_link *link,
912 const char *scope, const void *source)
914 lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
917 /** input params, should be filled out by mdt */
921 /** count in bytes */
922 unsigned int rp_count;
923 /** number of pages */
924 unsigned int rp_npages;
925 /** requested attr */
927 /** pointers to pages */
928 struct page **rp_pages;
931 enum lu_xattr_flags {
932 LU_XATTR_REPLACE = BIT(0),
933 LU_XATTR_CREATE = BIT(1),
934 LU_XATTR_MERGE = BIT(2),
935 LU_XATTR_SPLIT = BIT(3),
936 LU_XATTR_PURGE = BIT(4),
944 /** For lu_context health-checks */
945 enum lu_context_state {
954 * lu_context. Execution context for lu_object methods. Currently associated
957 * All lu_object methods, except device and device type methods (called during
958 * system initialization and shutdown) are executed "within" some
959 * lu_context. This means, that pointer to some "current" lu_context is passed
960 * as an argument to all methods.
962 * All service ptlrpc threads create lu_context as part of their
963 * initialization. It is possible to create "stand-alone" context for other
964 * execution environments (like system calls).
966 * lu_object methods mainly use lu_context through lu_context_key interface
967 * that allows each layer to associate arbitrary pieces of data with each
968 * context (see pthread_key_create(3) for similar interface).
970 * On a client, lu_context is bound to a thread, see cl_env_get().
972 * \see lu_context_key
976 * lu_context is used on the client side too. Yet we don't want to
977 * allocate values of server-side keys for the client contexts and
980 * To achieve this, set of tags in introduced. Contexts and keys are
981 * marked with tags. Key value are created only for context whose set
982 * of tags has non-empty intersection with one for key. Tags are taken
983 * from enum lu_context_tag.
986 enum lu_context_state lc_state;
988 * Pointer to the home service thread. NULL for other execution
991 struct ptlrpc_thread *lc_thread;
993 * Pointer to an array with key values. Internal implementation
998 * Linkage into a list of all remembered contexts. Only
999 * `non-transient' contexts, i.e., ones created for service threads
1002 struct list_head lc_remember;
1004 * Version counter used to skip calls to lu_context_refill() when no
1005 * keys were registered.
1007 unsigned lc_version;
1015 * lu_context_key interface. Similar to pthread_key.
1018 enum lu_context_tag {
1020 * Thread on md server
1022 LCT_MD_THREAD = BIT(0),
1024 * Thread on dt server
1026 LCT_DT_THREAD = BIT(1),
1030 LCT_CL_THREAD = BIT(3),
1032 * A per-request session on a server, and a per-system-call session on
1035 LCT_SESSION = BIT(4),
1037 * A per-request data on OSP device
1039 LCT_OSP_THREAD = BIT(5),
1043 LCT_MG_THREAD = BIT(6),
1045 * Context for local operations
1049 * session for server thread
1051 LCT_SERVER_SESSION = BIT(8),
1053 * Set when at least one of keys, having values in this context has
1054 * non-NULL lu_context_key::lct_exit() method. This is used to
1055 * optimize lu_context_exit() call.
1057 LCT_HAS_EXIT = BIT(28),
1059 * Don't add references for modules creating key values in that context.
1060 * This is only for contexts used internally by lu_object framework.
1062 LCT_NOREF = BIT(29),
1064 * Key is being prepared for retiring, don't create new values for it.
1066 LCT_QUIESCENT = BIT(30),
1068 * Context should be remembered.
1070 LCT_REMEMBER = BIT(31),
1072 * Contexts usable in cache shrinker thread.
1074 LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF,
1078 * Key. Represents per-context value slot.
1080 * Keys are usually registered when module owning the key is initialized, and
1081 * de-registered when module is unloaded. Once key is registered, all new
1082 * contexts with matching tags, will get key value. "Old" contexts, already
1083 * initialized at the time of key registration, can be forced to get key value
1084 * by calling lu_context_refill().
1086 * Every key value is counted in lu_context_key::lct_used and acquires a
1087 * reference on an owning module. This means, that all key values have to be
1088 * destroyed before module can be unloaded. This is usually achieved by
1089 * stopping threads started by the module, that created contexts in their
1090 * entry functions. Situation is complicated by the threads shared by multiple
1091 * modules, like ptlrpcd daemon on a client. To work around this problem,
1092 * contexts, created in such threads, are `remembered' (see
1093 * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
1094 * for unloading it does the following:
1096 * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
1097 * preventing new key values from being allocated in the new contexts,
1100 * - scans a list of remembered contexts, destroying values of module
1101 * keys, thus releasing references to the module.
1103 * This is done by lu_context_key_quiesce(). If module is re-activated
1104 * before key has been de-registered, lu_context_key_revive() call clears
1105 * `quiescent' marker.
1107 * lu_context code doesn't provide any internal synchronization for these
1108 * activities---it's assumed that startup (including threads start-up) and
1109 * shutdown are serialized by some external means.
1113 struct lu_context_key {
1115 * Set of tags for which values of this key are to be instantiated.
1119 * Value constructor. This is called when new value is created for a
1120 * context. Returns pointer to new value of error pointer.
1122 void *(*lct_init)(const struct lu_context *ctx,
1123 struct lu_context_key *key);
1125 * Value destructor. Called when context with previously allocated
1126 * value of this slot is destroyed. \a data is a value that was returned
1127 * by a matching call to lu_context_key::lct_init().
1129 void (*lct_fini)(const struct lu_context *ctx,
1130 struct lu_context_key *key, void *data);
1132 * Optional method called on lu_context_exit() for all allocated
1133 * keys. Can be used by debugging code checking that locks are
1136 void (*lct_exit)(const struct lu_context *ctx,
1137 struct lu_context_key *key, void *data);
1139 * Internal implementation detail: index within lu_context::lc_value[]
1140 * reserved for this key.
1144 * Internal implementation detail: number of values created for this
1149 * Internal implementation detail: module for this key.
1151 struct module *lct_owner;
1153 * References to this key. For debugging.
1155 struct lu_ref lct_reference;
1158 #define LU_KEY_INIT(mod, type) \
1159 static void *mod##_key_init(const struct lu_context *ctx, \
1160 struct lu_context_key *key) \
1164 BUILD_BUG_ON(PAGE_SIZE < sizeof(*value)); \
1166 OBD_ALLOC_PTR(value); \
1167 if (value == NULL) \
1168 value = ERR_PTR(-ENOMEM); \
1172 struct __##mod##__dummy_init { ; } /* semicolon catcher */
1174 #define LU_KEY_FINI(mod, type) \
1175 static void mod##_key_fini(const struct lu_context *ctx, \
1176 struct lu_context_key *key, void* data) \
1178 type *info = data; \
1180 OBD_FREE_PTR(info); \
1182 struct __##mod##__dummy_fini {;} /* semicolon catcher */
1184 #define LU_KEY_INIT_FINI(mod, type) \
1185 LU_KEY_INIT(mod,type); \
1186 LU_KEY_FINI(mod,type)
1188 #define LU_CONTEXT_KEY_DEFINE(mod, tags) \
1189 struct lu_context_key mod##_thread_key = { \
1191 .lct_init = mod##_key_init, \
1192 .lct_fini = mod##_key_fini \
1195 #define LU_CONTEXT_KEY_INIT(key) \
1197 (key)->lct_owner = THIS_MODULE; \
1200 int lu_context_key_register(struct lu_context_key *key);
1201 void lu_context_key_degister(struct lu_context_key *key);
1202 void *lu_context_key_get (const struct lu_context *ctx,
1203 const struct lu_context_key *key);
1204 void lu_context_key_quiesce(struct lu_device_type *t,
1205 struct lu_context_key *key);
1206 void lu_context_key_revive(struct lu_context_key *key);
1210 * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
1214 #define LU_KEY_INIT_GENERIC(mod) \
1215 static void mod##_key_init_generic(struct lu_context_key *k, ...) \
1217 struct lu_context_key *key = k; \
1220 va_start(args, k); \
1222 LU_CONTEXT_KEY_INIT(key); \
1223 key = va_arg(args, struct lu_context_key *); \
1224 } while (key != NULL); \
1228 #define LU_TYPE_INIT(mod, ...) \
1229 LU_KEY_INIT_GENERIC(mod) \
1230 static int mod##_type_init(struct lu_device_type *t) \
1232 mod##_key_init_generic(__VA_ARGS__, NULL); \
1233 return lu_context_key_register_many(__VA_ARGS__, NULL); \
1235 struct __##mod##_dummy_type_init {;}
1237 #define LU_TYPE_FINI(mod, ...) \
1238 static void mod##_type_fini(struct lu_device_type *t) \
1240 lu_context_key_degister_many(__VA_ARGS__, NULL); \
1242 struct __##mod##_dummy_type_fini {;}
1244 #define LU_TYPE_START(mod, ...) \
1245 static void mod##_type_start(struct lu_device_type *t) \
1247 lu_context_key_revive_many(__VA_ARGS__, NULL); \
1249 struct __##mod##_dummy_type_start {;}
1251 #define LU_TYPE_STOP(mod, ...) \
1252 static void mod##_type_stop(struct lu_device_type *t) \
1254 lu_context_key_quiesce_many(t, __VA_ARGS__, NULL); \
1256 struct __##mod##_dummy_type_stop { }
1260 #define LU_TYPE_INIT_FINI(mod, ...) \
1261 LU_TYPE_INIT(mod, __VA_ARGS__); \
1262 LU_TYPE_FINI(mod, __VA_ARGS__); \
1263 LU_TYPE_START(mod, __VA_ARGS__); \
1264 LU_TYPE_STOP(mod, __VA_ARGS__)
1266 int lu_context_init (struct lu_context *ctx, __u32 tags);
1267 void lu_context_fini (struct lu_context *ctx);
1268 void lu_context_enter (struct lu_context *ctx);
1269 void lu_context_exit (struct lu_context *ctx);
1270 int lu_context_refill(struct lu_context *ctx);
1273 * Helper functions to operate on multiple keys. These are used by the default
1274 * device type operations, defined by LU_TYPE_INIT_FINI().
1277 int lu_context_key_register_many(struct lu_context_key *k, ...);
1278 void lu_context_key_degister_many(struct lu_context_key *k, ...);
1279 void lu_context_key_revive_many (struct lu_context_key *k, ...);
1280 void lu_context_key_quiesce_many(struct lu_device_type *t,
1281 struct lu_context_key *k, ...);
1284 * update/clear ctx/ses tags.
1286 void lu_context_tags_update(__u32 tags);
1287 void lu_context_tags_clear(__u32 tags);
1288 void lu_session_tags_update(__u32 tags);
1289 void lu_session_tags_clear(__u32 tags);
1296 * "Local" context, used to store data instead of stack.
1298 struct lu_context le_ctx;
1300 * "Session" context for per-request data.
1302 struct lu_context *le_ses;
1305 int lu_env_init (struct lu_env *env, __u32 tags);
1306 void lu_env_fini (struct lu_env *env);
1307 int lu_env_refill(struct lu_env *env);
1308 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, __u32 stags);
1310 static inline void* lu_env_info(const struct lu_env *env,
1311 const struct lu_context_key *key)
1314 info = lu_context_key_get(&env->le_ctx, key);
1316 if (!lu_env_refill((struct lu_env *)env))
1317 info = lu_context_key_get(&env->le_ctx, key);
1323 struct lu_env *lu_env_find(void);
1324 int lu_env_add(struct lu_env *env);
1325 int lu_env_add_task(struct lu_env *env, struct task_struct *task);
1326 void lu_env_remove(struct lu_env *env);
1328 /** @} lu_context */
1331 * Output site statistical counters into a buffer. Suitable for
1332 * ll_rd_*()-style functions.
1334 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m);
1337 * Common name structure to be passed around for various name related methods.
1340 const char *ln_name;
1344 static inline bool name_is_dot_or_dotdot(const char *name, int namelen)
1346 return name[0] == '.' &&
1347 (namelen == 1 || (namelen == 2 && name[1] == '.'));
1350 static inline bool lu_name_is_dot_or_dotdot(const struct lu_name *lname)
1352 return name_is_dot_or_dotdot(lname->ln_name, lname->ln_namelen);
1355 static inline bool lu_name_is_temp_file(const char *name, int namelen,
1356 bool dot_prefix, int suffixlen)
1361 int len = suffixlen;
1363 if (dot_prefix && name[0] != '.')
1366 if (namelen < dot_prefix + suffixlen + 2 ||
1367 name[namelen - suffixlen - 1] != '.')
1371 lower += islower(name[namelen - len]);
1372 upper += isupper(name[namelen - len]);
1373 digit += isdigit(name[namelen - len]);
1376 /* mktemp() filename suffixes will have a mix of upper- and lower-case
1377 * letters and/or numbers, not all numbers, or all upper or lower-case.
1378 * About 0.07% of randomly-generated names will slip through,
1379 * but this avoids 99.93% of cross-MDT renames for those files.
1381 if ((digit >= suffixlen - 1 && !isdigit(name[namelen - suffixlen])) ||
1382 upper == suffixlen || lower == suffixlen)
1388 static inline bool lu_name_is_backup_file(const char *name, int namelen,
1392 name[namelen - 2] != '.' && name[namelen - 1] == '~') {
1398 if (namelen > 4 && name[namelen - 4] == '.' &&
1399 (!strncasecmp(name + namelen - 3, "bak", 3) ||
1400 !strncasecmp(name + namelen - 3, "sav", 3))) {
1406 if (namelen > 5 && name[namelen - 5] == '.' &&
1407 !strncasecmp(name + namelen - 4, "orig", 4)) {
1416 static inline bool lu_name_is_valid_len(const char *name, size_t name_len)
1418 return name != NULL &&
1420 name_len < INT_MAX &&
1421 strlen(name) == name_len &&
1422 memchr(name, '/', name_len) == NULL;
1426 * Validate names (path components)
1428 * To be valid \a name must be non-empty, '\0' terminated of length \a
1429 * name_len, and not contain '/'. The maximum length of a name (before
1430 * say -ENAMETOOLONG will be returned) is really controlled by llite
1431 * and the server. We only check for something insane coming from bad
1432 * integer handling here.
1434 static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
1436 return lu_name_is_valid_len(name, name_len) && name[name_len] == '\0';
1439 static inline bool lu_name_is_valid(const struct lu_name *ln)
1441 return lu_name_is_valid_2(ln->ln_name, ln->ln_namelen);
1444 #define DNAME "%.*s"
1446 (lu_name_is_valid(ln) ? (ln)->ln_namelen : 0), \
1447 (lu_name_is_valid(ln) ? (ln)->ln_name : "")
1450 * Common buffer structure to be passed around for various xattr_{s,g}et()
1458 #define DLUBUF "(%p %zu)"
1459 #define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
1461 /* read buffer params, should be filled out by out */
1463 /** number of buffers */
1464 unsigned int rb_nbufs;
1465 /** pointers to buffers */
1466 struct lu_buf rb_bufs[];
1470 * One-time initializers, called at obdclass module initialization, not
1475 * Initialization of global lu_* data.
1477 int lu_global_init(void);
1480 * Dual to lu_global_init().
1482 void lu_global_fini(void);
1484 struct lu_kmem_descr {
1485 struct kmem_cache **ckd_cache;
1486 const char *ckd_name;
1487 const size_t ckd_size;
1490 int lu_kmem_init(struct lu_kmem_descr *caches);
1491 void lu_kmem_fini(struct lu_kmem_descr *caches);
1493 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
1494 const struct lu_fid *fid);
1495 struct lu_object *lu_object_anon(const struct lu_env *env,
1496 struct lu_device *dev,
1497 const struct lu_object_conf *conf);
1500 extern struct lu_buf LU_BUF_NULL;
1502 void lu_buf_free(struct lu_buf *buf);
1503 void lu_buf_alloc(struct lu_buf *buf, size_t size);
1504 void lu_buf_realloc(struct lu_buf *buf, size_t size);
1506 int lu_buf_check_and_grow(struct lu_buf *buf, size_t len);
1507 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len);
1509 extern __u32 lu_context_tags_default;
1510 extern __u32 lu_session_tags_default;
1512 static inline bool lu_device_is_cl(const struct lu_device *d)
1514 return d->ld_type->ldt_tags & LU_DEVICE_CL;
1517 static inline bool lu_object_is_cl(const struct lu_object *o)
1519 return lu_device_is_cl(o->lo_dev);
1522 /* bitflags used in rr / qos allocation */
1524 LQ_DIRTY = 0, /* recalc qos data */
1525 LQ_SAME_SPACE, /* the OSTs all have approx.
1526 * the same space avail */
1527 LQ_RESET, /* zero current penalties */
1530 #ifdef HAVE_SERVER_SUPPORT
1531 /* round-robin QoS data for LOD/LMV */
1533 spinlock_t lqr_alloc; /* protect allocation index */
1534 __u32 lqr_start_idx; /* start index of new inode */
1535 __u32 lqr_offset_idx;/* aliasing for start_idx */
1536 int lqr_start_count;/* reseed counter */
1537 struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
1538 unsigned long lqr_flags;
1541 static inline void lu_qos_rr_init(struct lu_qos_rr *lqr)
1543 spin_lock_init(&lqr->lqr_alloc);
1544 set_bit(LQ_DIRTY, &lqr->lqr_flags);
1547 #endif /* HAVE_SERVER_SUPPORT */
1549 /* QoS data per MDS/OSS */
1551 struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
1552 struct list_head lsq_svr_list; /* link to lq_svr_list */
1553 __u64 lsq_bavail; /* total bytes avail on svr */
1554 __u64 lsq_iavail; /* tital inode avail on svr */
1555 __u64 lsq_penalty; /* current penalty */
1556 __u64 lsq_penalty_per_obj; /* penalty decrease
1558 time64_t lsq_used; /* last used time, seconds */
1559 __u32 lsq_tgt_count; /* number of tgts on this svr */
1560 __u32 lsq_id; /* unique svr id */
1563 /* QoS data per MDT/OST */
1565 struct lu_svr_qos *ltq_svr; /* svr info */
1566 __u64 ltq_penalty; /* current penalty */
1567 __u64 ltq_penalty_per_obj; /* penalty decrease
1569 __u64 ltq_weight; /* net weighting */
1570 time64_t ltq_used; /* last used time, seconds */
1571 bool ltq_usable:1; /* usable for striping */
1574 /* target descriptor */
1575 struct lu_tgt_desc {
1577 struct dt_device *ltd_tgt;
1578 struct obd_device *ltd_obd;
1580 struct obd_export *ltd_exp;
1581 struct obd_uuid ltd_uuid;
1584 struct list_head ltd_kill;
1585 struct task_struct *ltd_recovery_task;
1586 struct mutex ltd_fid_mutex;
1587 struct lu_tgt_qos ltd_qos; /* qos info per target */
1588 struct obd_statfs ltd_statfs;
1589 time64_t ltd_statfs_age;
1590 unsigned long ltd_active:1,/* is this target up for requests */
1591 ltd_activate:1,/* should target be activated */
1592 ltd_reap:1, /* should this target be deleted */
1593 ltd_got_update_log:1, /* Already got update log */
1594 ltd_connecting:1; /* target is connecting */
1597 /* number of pointers at 2nd level */
1598 #define TGT_PTRS_PER_BLOCK (PAGE_SIZE / sizeof(void *))
1599 /* number of pointers at 1st level - only need as many as max OST/MDT count */
1600 #define TGT_PTRS ((LOV_ALL_STRIPES + 1) / TGT_PTRS_PER_BLOCK)
1602 struct lu_tgt_desc_idx {
1603 struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
1606 /* QoS data for LOD/LMV */
1608 struct list_head lq_svr_list; /* lu_svr_qos list */
1609 struct rw_semaphore lq_rw_sem;
1610 __u32 lq_active_svr_count;
1611 unsigned int lq_prio_free; /* priority for free space */
1612 unsigned int lq_threshold_rr;/* priority for rr */
1613 #ifdef HAVE_SERVER_SUPPORT
1614 struct lu_qos_rr lq_rr; /* round robin qos data */
1616 unsigned long lq_flags;
1618 unsigned long lq_dirty:1, /* recalc qos data */
1619 lq_same_space:1,/* the servers all have approx.
1620 * the same space avail */
1621 lq_reset:1; /* zero current penalties */
1625 struct lu_tgt_descs {
1627 struct lov_desc ltd_lov_desc;
1628 struct lmv_desc ltd_lmv_desc;
1630 /* list of known TGTs */
1631 struct lu_tgt_desc_idx *ltd_tgt_idx[TGT_PTRS];
1632 /* Size of the lu_tgts array, granted to be a power of 2 */
1633 __u32 ltd_tgts_size;
1634 /* bitmap of TGTs available */
1635 unsigned long *ltd_tgt_bitmap;
1636 /* TGTs scheduled to be deleted */
1637 __u32 ltd_death_row;
1638 /* Table refcount used for delayed deletion */
1640 /* mutex to serialize concurrent updates to the tgt table */
1641 struct mutex ltd_mutex;
1642 /* read/write semaphore used for array relocation */
1643 struct rw_semaphore ltd_rw_sem;
1645 struct lu_qos ltd_qos;
1646 /* all tgts in a packed array */
1647 struct lu_tgt_pool ltd_tgt_pool;
1648 /* true if tgt is MDT */
1652 #define LTD_TGT(ltd, index) \
1653 (ltd)->ltd_tgt_idx[(index) / TGT_PTRS_PER_BLOCK]-> \
1654 ldi_tgt[(index) % TGT_PTRS_PER_BLOCK]
1656 u64 lu_prandom_u64_max(u64 ep_ro);
1657 int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
1658 void lu_tgt_qos_weight_calc(struct lu_tgt_desc *tgt);
1660 int lu_tgt_descs_init(struct lu_tgt_descs *ltd, bool is_mdt);
1661 void lu_tgt_descs_fini(struct lu_tgt_descs *ltd);
1662 int ltd_add_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
1663 void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
1664 bool ltd_qos_is_usable(struct lu_tgt_descs *ltd);
1665 int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd);
1666 int ltd_qos_update(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt,
1669 static inline struct lu_tgt_desc *ltd_first_tgt(struct lu_tgt_descs *ltd)
1673 index = find_first_bit(ltd->ltd_tgt_bitmap,
1674 ltd->ltd_tgts_size);
1675 return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
1678 static inline struct lu_tgt_desc *ltd_next_tgt(struct lu_tgt_descs *ltd,
1679 struct lu_tgt_desc *tgt)
1686 index = tgt->ltd_index;
1687 LASSERT(index < ltd->ltd_tgts_size);
1688 index = find_next_bit(ltd->ltd_tgt_bitmap,
1689 ltd->ltd_tgts_size, index + 1);
1690 return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
1693 #define ltd_foreach_tgt(ltd, tgt) \
1694 for (tgt = ltd_first_tgt(ltd); tgt; tgt = ltd_next_tgt(ltd, tgt))
1696 #define ltd_foreach_tgt_safe(ltd, tgt, tmp) \
1697 for (tgt = ltd_first_tgt(ltd), tmp = ltd_next_tgt(ltd, tgt); tgt; \
1698 tgt = tmp, tmp = ltd_next_tgt(ltd, tgt))
1701 #endif /* __LUSTRE_LU_OBJECT_H */