4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef __LUSTRE_LU_OBJECT_H
33 #define __LUSTRE_LU_OBJECT_H
36 #include <libcfs/libcfs.h>
37 #include <uapi/linux/lustre/lustre_idl.h>
39 #include <linux/percpu_counter.h>
40 #include <linux/rhashtable.h>
41 #include <linux/ctype.h>
44 struct proc_dir_entry;
50 * lu_* data-types represent server-side entities shared by data and meta-data
55 * -# support for layering.
57 * Server side object is split into layers, one per device in the
58 * corresponding device stack. Individual layer is represented by struct
59 * lu_object. Compound layered object --- by struct lu_object_header. Most
60 * interface functions take lu_object as an argument and operate on the
61 * whole compound object. This decision was made due to the following
64 * - it's envisaged that lu_object will be used much more often than
67 * - we want lower (non-top) layers to be able to initiate operations
68 * on the whole object.
70 * Generic code supports layering more complex than simple stacking, e.g.,
71 * it is possible that at some layer object "spawns" multiple sub-objects
74 * -# fid-based identification.
76 * Compound object is uniquely identified by its fid. Objects are indexed
77 * by their fids (hash table is used for index).
79 * -# caching and life-cycle management.
81 * Object's life-time is controlled by reference counting. When reference
82 * count drops to 0, object is returned to cache. Cached objects still
83 * retain their identity (i.e., fid), and can be recovered from cache.
85 * Objects are kept in the global LRU list, and lu_site_purge() function
86 * can be used to reclaim given number of unused objects from the tail of
89 * -# avoiding recursion.
91 * Generic code tries to replace recursion through layers by iterations
92 * where possible. Additionally to the end of reducing stack consumption,
93 * data, when practically possible, are allocated through lu_context_key
94 * interface rather than on stack.
101 struct lu_object_header;
107 * Operations common for data and meta-data devices.
109 struct lu_device_operations {
111 * Allocate object for the given device (without lower-layer
112 * parts). This is called by lu_object_operations::loo_object_init()
113 * from the parent layer, and should setup at least lu_object::lo_dev
114 * and lu_object::lo_ops fields of resulting lu_object.
116 * Object creation protocol.
118 * Due to design goal of avoiding recursion, object creation (see
119 * lu_object_alloc()) is somewhat involved:
121 * - first, lu_device_operations::ldo_object_alloc() method of the
122 * top-level device in the stack is called. It should allocate top
123 * level object (including lu_object_header), but without any
124 * lower-layer sub-object(s).
126 * - then lu_object_alloc() sets fid in the header of newly created
129 * - then lu_object_operations::loo_object_init() is called. It has
130 * to allocate lower-layer object(s). To do this,
131 * lu_object_operations::loo_object_init() calls ldo_object_alloc()
132 * of the lower-layer device(s).
134 * - for all new objects allocated by
135 * lu_object_operations::loo_object_init() (and inserted into object
136 * stack), lu_object_operations::loo_object_init() is called again
137 * repeatedly, until no new objects are created.
139 * \post ergo(!IS_ERR(result), result->lo_dev == d &&
140 * result->lo_ops != NULL);
142 struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
143 const struct lu_object_header *h,
144 struct lu_device *d);
146 * process config specific for device.
148 int (*ldo_process_config)(const struct lu_env *env,
149 struct lu_device *, struct lustre_cfg *);
150 int (*ldo_recovery_complete)(const struct lu_env *,
154 * initialize local objects for device. this method called after layer has
155 * been initialized (after LCFG_SETUP stage) and before it starts serving
159 int (*ldo_prepare)(const struct lu_env *,
160 struct lu_device *parent,
161 struct lu_device *dev);
165 * Allocate new FID for file with @name under @parent
167 * \param[in] env execution environment for this thread
168 * \param[in] dev dt device
169 * \param[out] fid new FID allocated
170 * \param[in] parent parent object
171 * \param[in] name lu_name
173 * \retval 0 0 FID allocated successfully.
174 * \retval 1 1 FID allocated successfully and new sequence
175 * requested from seq meta server
176 * \retval negative negative errno if FID allocation failed.
178 int (*ldo_fid_alloc)(const struct lu_env *env,
179 struct lu_device *dev,
181 struct lu_object *parent,
182 const struct lu_name *name);
186 * For lu_object_conf flags
189 /* This is a new object to be allocated, or the file
190 * corresponding to the object does not exists. */
191 LOC_F_NEW = 0x00000001,
195 * Object configuration, describing particulars of object being created. On
196 * server this is not used, as server objects are full identified by fid. On
197 * client configuration contains struct lustre_md.
199 struct lu_object_conf {
201 * Some hints for obj find and alloc.
203 loc_flags_t loc_flags;
207 * Type of "printer" function used by lu_object_operations::loo_object_print()
210 * Printer function is needed to provide some flexibility in (semi-)debugging
211 * output: possible implementations: printk, CDEBUG, sysfs/seq_file
213 typedef int (*lu_printer_t)(const struct lu_env *env,
214 void *cookie, const char *format, ...)
215 __attribute__ ((format (printf, 3, 4)));
218 * Operations specific for particular lu_object.
220 struct lu_object_operations {
223 * Allocate lower-layer parts of the object by calling
224 * lu_device_operations::ldo_object_alloc() of the corresponding
227 * This method is called once for each object inserted into object
228 * stack. It's responsibility of this method to insert lower-layer
229 * object(s) it create into appropriate places of object stack.
231 int (*loo_object_init)(const struct lu_env *env,
233 const struct lu_object_conf *conf);
235 * Called (in top-to-bottom order) during object allocation after all
236 * layers were allocated and initialized. Can be used to perform
237 * initialization depending on lower layers.
239 int (*loo_object_start)(const struct lu_env *env,
240 struct lu_object *o);
242 * Called before lu_object_operations::loo_object_free() to signal
243 * that object is being destroyed. Dual to
244 * lu_object_operations::loo_object_init().
246 void (*loo_object_delete)(const struct lu_env *env,
247 struct lu_object *o);
249 * Dual to lu_device_operations::ldo_object_alloc(). Called when
250 * object is removed from memory. Must use call_rcu or kfree_rcu
251 * if the object contains an lu_object_header.
253 void (*loo_object_free)(const struct lu_env *env,
254 struct lu_object *o);
256 * Called when last active reference to the object is released (and
257 * object returns to the cache). This method is optional.
259 void (*loo_object_release)(const struct lu_env *env,
260 struct lu_object *o);
262 * Optional debugging helper. Print given object.
264 int (*loo_object_print)(const struct lu_env *env, void *cookie,
265 lu_printer_t p, const struct lu_object *o);
267 * Optional debugging method. Returns true iff method is internally
270 int (*loo_object_invariant)(const struct lu_object *o);
276 struct lu_device_type;
279 * Device: a layer in the server side abstraction stacking.
283 * reference count. This is incremented, in particular, on each object
284 * created at this layer.
286 * \todo XXX which means that atomic_t is probably too small.
290 * Pointer to device type. Never modified once set.
292 struct lu_device_type *ld_type;
294 * Operation vector for this device.
296 const struct lu_device_operations *ld_ops;
298 * Stack this device belongs to.
300 struct lu_site *ld_site;
301 struct proc_dir_entry *ld_proc_entry;
303 /** \todo XXX: temporary back pointer into obd. */
304 struct obd_device *ld_obd;
306 * A list of references to this object, for debugging.
308 struct lu_ref ld_reference;
310 * Link the device to the site.
312 struct list_head ld_linkage;
315 struct lu_device_type_operations;
318 * Tag bits for device type. They are used to distinguish certain groups of
322 /** this is meta-data device */
323 LU_DEVICE_MD = BIT(0),
324 /** this is data device */
325 LU_DEVICE_DT = BIT(1),
326 /** data device in the client stack */
327 LU_DEVICE_CL = BIT(2)
333 struct lu_device_type {
335 * Tag bits. Taken from enum lu_device_tag. Never modified once set.
339 * Name of this class. Unique system-wide. Never modified once set.
343 * Operations for this type.
345 const struct lu_device_type_operations *ldt_ops;
347 * \todo XXX: temporary: context tags used by obd_*() calls.
351 * Number of existing device type instances.
353 atomic_t ldt_device_nr;
357 * Operations on a device type.
359 struct lu_device_type_operations {
361 * Allocate new device.
363 struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
364 struct lu_device_type *t,
365 struct lustre_cfg *lcfg);
367 * Free device. Dual to
368 * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
369 * the next device in the stack.
371 struct lu_device *(*ldto_device_free)(const struct lu_env *,
375 * Initialize the devices after allocation
377 int (*ldto_device_init)(const struct lu_env *env,
378 struct lu_device *, const char *,
381 * Finalize device. Dual to
382 * lu_device_type_operations::ldto_device_init(). Returns pointer to
383 * the next device in the stack.
385 struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
388 * Initialize device type. This is called on module load.
390 int (*ldto_init)(struct lu_device_type *t);
392 * Finalize device type. Dual to
393 * lu_device_type_operations::ldto_init(). Called on module unload.
395 void (*ldto_fini)(struct lu_device_type *t);
397 * Called when the first device is created.
399 void (*ldto_start)(struct lu_device_type *t);
401 * Called when number of devices drops to 0.
403 void (*ldto_stop)(struct lu_device_type *t);
406 static inline int lu_device_is_md(const struct lu_device *d)
408 return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
412 * Common object attributes.
423 /** modification time in seconds since Epoch */
425 /** access time in seconds since Epoch */
427 /** change time in seconds since Epoch */
429 /** create time in seconds since Epoch */
431 /** 512-byte blocks allocated to object */
433 /** permission bits and file type */
441 /** number of persistent references to this object */
443 /** blk bits of the object*/
445 /** blk size of the object*/
451 /** set layout version to OST objects. */
452 __u32 la_layout_version;
454 __u64 la_dirent_count;
457 #define LU_DIRENT_COUNT_UNSET ~0ULL
460 * Layer in the layered object.
464 * Header for this object.
466 struct lu_object_header *lo_header;
468 * Device for this layer.
470 struct lu_device *lo_dev;
472 * Operations for this object.
474 const struct lu_object_operations *lo_ops;
476 * Linkage into list of all layers.
478 struct list_head lo_linkage;
480 * Link to the device, for debugging.
482 struct lu_ref_link lo_dev_ref;
485 enum lu_object_header_flags {
487 * Don't keep this object in cache. Object will be destroyed as soon
488 * as last reference to it is released. This flag cannot be cleared
491 LU_OBJECT_HEARD_BANSHEE = 0,
493 * Mark this object has already been taken out of cache.
495 LU_OBJECT_UNHASHED = 1,
497 * Object is initialized, when object is found in cache, it may not be
498 * intialized yet, the object allocator will initialize it.
500 LU_OBJECT_INITED = 2,
503 enum lu_object_header_attr {
504 LOHA_EXISTS = BIT(0),
505 LOHA_REMOTE = BIT(1),
506 LOHA_HAS_AGENT_ENTRY = BIT(2),
508 * UNIX file type is stored in S_IFMT bits.
510 LOHA_FT_START = 001 << 12, /**< S_IFIFO */
511 LOHA_FT_END = 017 << 12, /**< S_IFMT */
515 * "Compound" object, consisting of multiple layers.
517 * Compound object with given fid is unique with given lu_site.
519 * Note, that object does *not* necessary correspond to the real object in the
520 * persistent storage: object is an anchor for locking and method calling, so
521 * it is created for things like not-yet-existing child created by mkdir or
522 * create calls. lu_object_operations::loo_exists() can be used to check
523 * whether object is backed by persistent storage entity.
524 * Any object containing this structre which might be placed in an
525 * rhashtable via loh_hash MUST be freed using call_rcu() or rcu_kfree().
527 struct lu_object_header {
529 * Fid, uniquely identifying this object.
531 struct lu_fid loh_fid;
533 * Object flags from enum lu_object_header_flags. Set and checked
536 unsigned long loh_flags;
538 * Object reference count. Protected by lu_site::ls_guard.
542 * Common object attributes, cached for efficiency. From enum
543 * lu_object_header_attr.
547 * Linkage into per-site hash table.
549 struct rhash_head loh_hash;
551 * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
553 struct list_head loh_lru;
555 * Linkage into list of layers. Never modified once set (except lately
556 * during object destruction). No locking is necessary.
558 struct list_head loh_layers;
560 * A list of references to this object, for debugging.
562 struct lu_ref loh_reference;
564 * Handle used for kfree_rcu() or similar.
566 struct rcu_head loh_rcu;
576 LU_SS_CACHE_DEATH_RACE,
582 * lu_site is a "compartment" within which objects are unique, and LRU
583 * discipline is maintained.
585 * lu_site exists so that multiple layered stacks can co-exist in the same
588 * lu_site has the same relation to lu_device as lu_object_header to
595 struct rhashtable ls_obj_hash;
597 * buckets for summary data
599 struct lu_site_bkt_data *ls_bkts;
603 * index of bucket on hash table while purging
605 unsigned int ls_purge_start;
607 * Top-level device for this stack.
609 struct lu_device *ls_top_dev;
611 * Bottom-level device for this stack
613 struct lu_device *ls_bottom_dev;
615 * Linkage into global list of sites.
617 struct list_head ls_linkage;
619 * List for lu device for this site, protected
622 struct list_head ls_ld_linkage;
623 spinlock_t ls_ld_lock;
625 * Lock to serialize site purge.
627 struct mutex ls_purge_mutex;
631 struct lprocfs_stats *ls_stats;
633 * XXX: a hack! fld has to find md_site via site, remove when possible
635 struct seq_server_site *ld_seq_site;
637 * Pointer to the lu_target for this site.
639 struct lu_target *ls_tgt;
642 * Number of objects in lsb_lru_lists - used for shrinking
644 struct percpu_counter ls_lru_len_counter;
648 lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid);
650 static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
652 return s->ld_seq_site;
656 * Constructors/destructors.
660 int lu_site_init (struct lu_site *s, struct lu_device *d);
661 void lu_site_fini (struct lu_site *s);
662 int lu_site_init_finish (struct lu_site *s);
663 void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
664 void lu_device_get (struct lu_device *d);
665 void lu_device_put (struct lu_device *d);
666 int lu_device_init (struct lu_device *d, struct lu_device_type *t);
667 void lu_device_fini (struct lu_device *d);
668 int lu_object_header_init(struct lu_object_header *h);
669 void lu_object_header_fini(struct lu_object_header *h);
670 void lu_object_header_free(struct lu_object_header *h);
671 int lu_object_init (struct lu_object *o,
672 struct lu_object_header *h, struct lu_device *d);
673 void lu_object_fini (struct lu_object *o);
674 void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
675 void lu_object_add (struct lu_object *before, struct lu_object *o);
676 struct lu_object *lu_object_get_first(struct lu_object_header *h,
677 struct lu_device *dev);
678 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d);
679 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d);
682 * Helpers to initialize and finalize device types.
685 int lu_device_type_init(struct lu_device_type *ldt);
686 void lu_device_type_fini(struct lu_device_type *ldt);
691 * Caching and reference counting.
696 * Acquire additional reference to the given object. This function is used to
697 * attain additional reference. To acquire initial reference use
700 static inline void lu_object_get(struct lu_object *o)
702 LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
703 atomic_inc(&o->lo_header->loh_ref);
707 * Return true if object will not be cached after last reference to it is
710 static inline int lu_object_is_dying(const struct lu_object_header *h)
712 return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
716 * Return true if object is initialized.
718 static inline int lu_object_is_inited(const struct lu_object_header *h)
720 return test_bit(LU_OBJECT_INITED, &h->loh_flags);
723 void lu_object_put(const struct lu_env *env, struct lu_object *o);
724 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o);
725 void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
726 int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, int nr,
729 static inline int lu_site_purge(const struct lu_env *env, struct lu_site *s,
732 return lu_site_purge_objects(env, s, nr, 1);
735 void lu_site_print(const struct lu_env *env, struct lu_site *s, atomic_t *ref,
736 int msg_flags, lu_printer_t printer);
737 struct lu_object *lu_object_find(const struct lu_env *env,
738 struct lu_device *dev, const struct lu_fid *f,
739 const struct lu_object_conf *conf);
740 struct lu_object *lu_object_find_at(const struct lu_env *env,
741 struct lu_device *dev,
742 const struct lu_fid *f,
743 const struct lu_object_conf *conf);
744 struct lu_object *lu_object_find_slice(const struct lu_env *env,
745 struct lu_device *dev,
746 const struct lu_fid *f,
747 const struct lu_object_conf *conf);
756 * First (topmost) sub-object of given compound object
758 static inline struct lu_object *lu_object_top(struct lu_object_header *h)
760 LASSERT(!list_empty(&h->loh_layers));
761 return container_of(h->loh_layers.next, struct lu_object, lo_linkage);
765 * Next sub-object in the layering
767 static inline struct lu_object *lu_object_next(const struct lu_object *o)
769 return container_of(o->lo_linkage.next, struct lu_object, lo_linkage);
773 * Pointer to the fid of this object.
775 static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
777 return &o->lo_header->loh_fid;
781 * return device operations vector for this object
783 static const inline struct lu_device_operations *
784 lu_object_ops(const struct lu_object *o)
786 return o->lo_dev->ld_ops;
790 * Given a compound object, find its slice, corresponding to the device type
793 struct lu_object *lu_object_locate(struct lu_object_header *h,
794 const struct lu_device_type *dtype);
797 * Printer function emitting messages through libcfs_debug_msg().
799 int lu_cdebug_printer(const struct lu_env *env,
800 void *cookie, const char *format, ...);
803 * Print object description followed by a user-supplied message.
805 #define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
807 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
808 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
809 lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
810 CDEBUG(mask, format "\n", ## __VA_ARGS__); \
815 * Print short object description followed by a user-supplied message.
817 #define LU_OBJECT_HEADER(mask, env, object, format, ...) \
819 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
820 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
821 lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
822 (object)->lo_header); \
823 lu_cdebug_printer(env, &msgdata, "\n"); \
824 CDEBUG(mask, format , ## __VA_ARGS__); \
828 void lu_object_print (const struct lu_env *env, void *cookie,
829 lu_printer_t printer, const struct lu_object *o);
830 void lu_object_header_print(const struct lu_env *env, void *cookie,
831 lu_printer_t printer,
832 const struct lu_object_header *hdr);
835 * Check object consistency.
837 int lu_object_invariant(const struct lu_object *o);
841 * Check whether object exists, no matter on local or remote storage.
842 * Note: LOHA_EXISTS will be set once some one created the object,
843 * and it does not needs to be committed to storage.
845 #define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
848 * Check whether object on the remote storage.
850 #define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
853 * Check whether the object as agent entry on current target
855 #define lu_object_has_agent_entry(o) \
856 unlikely((o)->lo_header->loh_attr & LOHA_HAS_AGENT_ENTRY)
858 static inline void lu_object_set_agent_entry(struct lu_object *o)
860 o->lo_header->loh_attr |= LOHA_HAS_AGENT_ENTRY;
863 static inline void lu_object_clear_agent_entry(struct lu_object *o)
865 o->lo_header->loh_attr &= ~LOHA_HAS_AGENT_ENTRY;
868 static inline int lu_object_assert_exists(const struct lu_object *o)
870 return lu_object_exists(o);
873 static inline int lu_object_assert_not_exists(const struct lu_object *o)
875 return !lu_object_exists(o);
879 * Attr of this object.
881 static inline __u32 lu_object_attr(const struct lu_object *o)
883 LASSERT(lu_object_exists(o) != 0);
885 return o->lo_header->loh_attr & S_IFMT;
888 static inline void lu_object_ref_add(struct lu_object *o,
892 lu_ref_add(&o->lo_header->loh_reference, scope, source);
895 static inline void lu_object_ref_add_at(struct lu_object *o,
896 struct lu_ref_link *link,
900 lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
903 static inline void lu_object_ref_del(struct lu_object *o,
904 const char *scope, const void *source)
906 lu_ref_del(&o->lo_header->loh_reference, scope, source);
909 static inline void lu_object_ref_del_at(struct lu_object *o,
910 struct lu_ref_link *link,
911 const char *scope, const void *source)
913 lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
916 /** input params, should be filled out by mdt */
920 /** count in bytes */
921 unsigned int rp_count;
922 /** number of pages */
923 unsigned int rp_npages;
924 /** requested attr */
926 /** pointers to pages */
927 struct page **rp_pages;
930 enum lu_xattr_flags {
931 LU_XATTR_REPLACE = BIT(0),
932 LU_XATTR_CREATE = BIT(1),
933 LU_XATTR_MERGE = BIT(2),
934 LU_XATTR_SPLIT = BIT(3),
935 LU_XATTR_PURGE = BIT(4),
943 /** For lu_context health-checks */
944 enum lu_context_state {
953 * lu_context. Execution context for lu_object methods. Currently associated
956 * All lu_object methods, except device and device type methods (called during
957 * system initialization and shutdown) are executed "within" some
958 * lu_context. This means, that pointer to some "current" lu_context is passed
959 * as an argument to all methods.
961 * All service ptlrpc threads create lu_context as part of their
962 * initialization. It is possible to create "stand-alone" context for other
963 * execution environments (like system calls).
965 * lu_object methods mainly use lu_context through lu_context_key interface
966 * that allows each layer to associate arbitrary pieces of data with each
967 * context (see pthread_key_create(3) for similar interface).
969 * On a client, lu_context is bound to a thread, see cl_env_get().
971 * \see lu_context_key
975 * lu_context is used on the client side too. Yet we don't want to
976 * allocate values of server-side keys for the client contexts and
979 * To achieve this, set of tags in introduced. Contexts and keys are
980 * marked with tags. Key value are created only for context whose set
981 * of tags has non-empty intersection with one for key. Tags are taken
982 * from enum lu_context_tag.
985 enum lu_context_state lc_state;
987 * Pointer to the home service thread. NULL for other execution
990 struct ptlrpc_thread *lc_thread;
992 * Pointer to an array with key values. Internal implementation
997 * Linkage into a list of all remembered contexts. Only
998 * `non-transient' contexts, i.e., ones created for service threads
1001 struct list_head lc_remember;
1003 * Version counter used to skip calls to lu_context_refill() when no
1004 * keys were registered.
1006 unsigned lc_version;
1014 * lu_context_key interface. Similar to pthread_key.
1017 enum lu_context_tag {
1019 * Thread on md server
1021 LCT_MD_THREAD = BIT(0),
1023 * Thread on dt server
1025 LCT_DT_THREAD = BIT(1),
1029 LCT_CL_THREAD = BIT(3),
1031 * A per-request session on a server, and a per-system-call session on
1034 LCT_SESSION = BIT(4),
1036 * A per-request data on OSP device
1038 LCT_OSP_THREAD = BIT(5),
1042 LCT_MG_THREAD = BIT(6),
1044 * Context for local operations
1048 * session for server thread
1050 LCT_SERVER_SESSION = BIT(8),
1052 * Set when at least one of keys, having values in this context has
1053 * non-NULL lu_context_key::lct_exit() method. This is used to
1054 * optimize lu_context_exit() call.
1056 LCT_HAS_EXIT = BIT(28),
1058 * Don't add references for modules creating key values in that context.
1059 * This is only for contexts used internally by lu_object framework.
1061 LCT_NOREF = BIT(29),
1063 * Key is being prepared for retiring, don't create new values for it.
1065 LCT_QUIESCENT = BIT(30),
1067 * Context should be remembered.
1069 LCT_REMEMBER = BIT(31),
1071 * Contexts usable in cache shrinker thread.
1073 LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF,
1077 * Key. Represents per-context value slot.
1079 * Keys are usually registered when module owning the key is initialized, and
1080 * de-registered when module is unloaded. Once key is registered, all new
1081 * contexts with matching tags, will get key value. "Old" contexts, already
1082 * initialized at the time of key registration, can be forced to get key value
1083 * by calling lu_context_refill().
1085 * Every key value is counted in lu_context_key::lct_used and acquires a
1086 * reference on an owning module. This means, that all key values have to be
1087 * destroyed before module can be unloaded. This is usually achieved by
1088 * stopping threads started by the module, that created contexts in their
1089 * entry functions. Situation is complicated by the threads shared by multiple
1090 * modules, like ptlrpcd daemon on a client. To work around this problem,
1091 * contexts, created in such threads, are `remembered' (see
1092 * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
1093 * for unloading it does the following:
1095 * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
1096 * preventing new key values from being allocated in the new contexts,
1099 * - scans a list of remembered contexts, destroying values of module
1100 * keys, thus releasing references to the module.
1102 * This is done by lu_context_key_quiesce(). If module is re-activated
1103 * before key has been de-registered, lu_context_key_revive() call clears
1104 * `quiescent' marker.
1106 * lu_context code doesn't provide any internal synchronization for these
1107 * activities---it's assumed that startup (including threads start-up) and
1108 * shutdown are serialized by some external means.
1112 struct lu_context_key {
1114 * Set of tags for which values of this key are to be instantiated.
1118 * Value constructor. This is called when new value is created for a
1119 * context. Returns pointer to new value of error pointer.
1121 void *(*lct_init)(const struct lu_context *ctx,
1122 struct lu_context_key *key);
1124 * Value destructor. Called when context with previously allocated
1125 * value of this slot is destroyed. \a data is a value that was returned
1126 * by a matching call to lu_context_key::lct_init().
1128 void (*lct_fini)(const struct lu_context *ctx,
1129 struct lu_context_key *key, void *data);
1131 * Optional method called on lu_context_exit() for all allocated
1132 * keys. Can be used by debugging code checking that locks are
1135 void (*lct_exit)(const struct lu_context *ctx,
1136 struct lu_context_key *key, void *data);
1138 * Internal implementation detail: index within lu_context::lc_value[]
1139 * reserved for this key.
1143 * Internal implementation detail: number of values created for this
1148 * Internal implementation detail: module for this key.
1150 struct module *lct_owner;
1152 * References to this key. For debugging.
1154 struct lu_ref lct_reference;
1157 #define LU_KEY_INIT(mod, type) \
1158 static void *mod##_key_init(const struct lu_context *ctx, \
1159 struct lu_context_key *key) \
1163 BUILD_BUG_ON(PAGE_SIZE < sizeof(*value)); \
1165 OBD_ALLOC_PTR(value); \
1166 if (value == NULL) \
1167 value = ERR_PTR(-ENOMEM); \
1171 struct __##mod##__dummy_init { ; } /* semicolon catcher */
1173 #define LU_KEY_FINI(mod, type) \
1174 static void mod##_key_fini(const struct lu_context *ctx, \
1175 struct lu_context_key *key, void* data) \
1177 type *info = data; \
1179 OBD_FREE_PTR(info); \
1181 struct __##mod##__dummy_fini {;} /* semicolon catcher */
1183 #define LU_KEY_INIT_FINI(mod, type) \
1184 LU_KEY_INIT(mod,type); \
1185 LU_KEY_FINI(mod,type)
1187 #define LU_CONTEXT_KEY_DEFINE(mod, tags) \
1188 struct lu_context_key mod##_thread_key = { \
1190 .lct_init = mod##_key_init, \
1191 .lct_fini = mod##_key_fini \
1194 #define LU_CONTEXT_KEY_INIT(key) \
1196 (key)->lct_owner = THIS_MODULE; \
1199 int lu_context_key_register(struct lu_context_key *key);
1200 void lu_context_key_degister(struct lu_context_key *key);
1201 void *lu_context_key_get (const struct lu_context *ctx,
1202 const struct lu_context_key *key);
1203 void lu_context_key_quiesce(struct lu_device_type *t,
1204 struct lu_context_key *key);
1205 void lu_context_key_revive(struct lu_context_key *key);
1209 * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
1213 #define LU_KEY_INIT_GENERIC(mod) \
1214 static void mod##_key_init_generic(struct lu_context_key *k, ...) \
1216 struct lu_context_key *key = k; \
1219 va_start(args, k); \
1221 LU_CONTEXT_KEY_INIT(key); \
1222 key = va_arg(args, struct lu_context_key *); \
1223 } while (key != NULL); \
1227 #define LU_TYPE_INIT(mod, ...) \
1228 LU_KEY_INIT_GENERIC(mod) \
1229 static int mod##_type_init(struct lu_device_type *t) \
1231 mod##_key_init_generic(__VA_ARGS__, NULL); \
1232 return lu_context_key_register_many(__VA_ARGS__, NULL); \
1234 struct __##mod##_dummy_type_init {;}
1236 #define LU_TYPE_FINI(mod, ...) \
1237 static void mod##_type_fini(struct lu_device_type *t) \
1239 lu_context_key_degister_many(__VA_ARGS__, NULL); \
1241 struct __##mod##_dummy_type_fini {;}
1243 #define LU_TYPE_START(mod, ...) \
1244 static void mod##_type_start(struct lu_device_type *t) \
1246 lu_context_key_revive_many(__VA_ARGS__, NULL); \
1248 struct __##mod##_dummy_type_start {;}
1250 #define LU_TYPE_STOP(mod, ...) \
1251 static void mod##_type_stop(struct lu_device_type *t) \
1253 lu_context_key_quiesce_many(t, __VA_ARGS__, NULL); \
1255 struct __##mod##_dummy_type_stop { }
1259 #define LU_TYPE_INIT_FINI(mod, ...) \
1260 LU_TYPE_INIT(mod, __VA_ARGS__); \
1261 LU_TYPE_FINI(mod, __VA_ARGS__); \
1262 LU_TYPE_START(mod, __VA_ARGS__); \
1263 LU_TYPE_STOP(mod, __VA_ARGS__)
1265 int lu_context_init (struct lu_context *ctx, __u32 tags);
1266 void lu_context_fini (struct lu_context *ctx);
1267 void lu_context_enter (struct lu_context *ctx);
1268 void lu_context_exit (struct lu_context *ctx);
1269 int lu_context_refill(struct lu_context *ctx);
1272 * Helper functions to operate on multiple keys. These are used by the default
1273 * device type operations, defined by LU_TYPE_INIT_FINI().
1276 int lu_context_key_register_many(struct lu_context_key *k, ...);
1277 void lu_context_key_degister_many(struct lu_context_key *k, ...);
1278 void lu_context_key_revive_many (struct lu_context_key *k, ...);
1279 void lu_context_key_quiesce_many(struct lu_device_type *t,
1280 struct lu_context_key *k, ...);
1283 * update/clear ctx/ses tags.
1285 void lu_context_tags_update(__u32 tags);
1286 void lu_context_tags_clear(__u32 tags);
1287 void lu_session_tags_update(__u32 tags);
1288 void lu_session_tags_clear(__u32 tags);
1295 * "Local" context, used to store data instead of stack.
1297 struct lu_context le_ctx;
1299 * "Session" context for per-request data.
1301 struct lu_context *le_ses;
1304 int lu_env_init (struct lu_env *env, __u32 tags);
1305 void lu_env_fini (struct lu_env *env);
1306 int lu_env_refill(struct lu_env *env);
1307 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, __u32 stags);
1309 static inline void* lu_env_info(const struct lu_env *env,
1310 const struct lu_context_key *key)
1313 info = lu_context_key_get(&env->le_ctx, key);
1315 if (!lu_env_refill((struct lu_env *)env))
1316 info = lu_context_key_get(&env->le_ctx, key);
1322 struct lu_env *lu_env_find(void);
1323 int lu_env_add(struct lu_env *env);
1324 int lu_env_add_task(struct lu_env *env, struct task_struct *task);
1325 void lu_env_remove(struct lu_env *env);
1327 /** @} lu_context */
1330 * Output site statistical counters into a buffer. Suitable for
1331 * ll_rd_*()-style functions.
1333 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m);
1336 * Common name structure to be passed around for various name related methods.
1339 const char *ln_name;
1343 static inline bool name_is_dot_or_dotdot(const char *name, int namelen)
1345 return name[0] == '.' &&
1346 (namelen == 1 || (namelen == 2 && name[1] == '.'));
1349 static inline bool lu_name_is_dot_or_dotdot(const struct lu_name *lname)
1351 return name_is_dot_or_dotdot(lname->ln_name, lname->ln_namelen);
1354 static inline bool lu_name_is_temp_file(const char *name, int namelen,
1355 bool dot_prefix, int suffixlen)
1360 int len = suffixlen;
1362 if (dot_prefix && name[0] != '.')
1365 if (namelen < dot_prefix + suffixlen + 2 ||
1366 name[namelen - suffixlen - 1] != '.')
1370 lower += islower(name[namelen - len]);
1371 upper += isupper(name[namelen - len]);
1372 digit += isdigit(name[namelen - len]);
1375 /* mktemp() filename suffixes will have a mix of upper- and lower-case
1376 * letters and/or numbers, not all numbers, or all upper or lower-case.
1377 * About 0.07% of randomly-generated names will slip through,
1378 * but this avoids 99.93% of cross-MDT renames for those files.
1380 if ((digit >= suffixlen - 1 && !isdigit(name[namelen - suffixlen])) ||
1381 upper == suffixlen || lower == suffixlen)
1387 static inline bool lu_name_is_backup_file(const char *name, int namelen,
1391 name[namelen - 2] != '.' && name[namelen - 1] == '~') {
1397 if (namelen > 4 && name[namelen - 4] == '.' &&
1398 (!strncasecmp(name + namelen - 3, "bak", 3) ||
1399 !strncasecmp(name + namelen - 3, "sav", 3))) {
1405 if (namelen > 5 && name[namelen - 5] == '.' &&
1406 !strncasecmp(name + namelen - 4, "orig", 4)) {
1415 static inline bool lu_name_is_valid_len(const char *name, size_t name_len)
1417 return name != NULL &&
1419 name_len < INT_MAX &&
1420 strlen(name) == name_len &&
1421 memchr(name, '/', name_len) == NULL;
1425 * Validate names (path components)
1427 * To be valid \a name must be non-empty, '\0' terminated of length \a
1428 * name_len, and not contain '/'. The maximum length of a name (before
1429 * say -ENAMETOOLONG will be returned) is really controlled by llite
1430 * and the server. We only check for something insane coming from bad
1431 * integer handling here.
1433 static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
1435 return lu_name_is_valid_len(name, name_len) && name[name_len] == '\0';
1438 static inline bool lu_name_is_valid(const struct lu_name *ln)
1440 return lu_name_is_valid_2(ln->ln_name, ln->ln_namelen);
1443 #define DNAME "%.*s"
1445 (lu_name_is_valid(ln) ? (ln)->ln_namelen : 0), \
1446 (lu_name_is_valid(ln) ? (ln)->ln_name : "")
1449 * Common buffer structure to be passed around for various xattr_{s,g}et()
1457 #define DLUBUF "(%p %zu)"
1458 #define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
1460 /* read buffer params, should be filled out by out */
1462 /** number of buffers */
1463 unsigned int rb_nbufs;
1464 /** pointers to buffers */
1465 struct lu_buf rb_bufs[];
1469 * One-time initializers, called at obdclass module initialization, not
1474 * Initialization of global lu_* data.
1476 int lu_global_init(void);
1479 * Dual to lu_global_init().
1481 void lu_global_fini(void);
1483 struct lu_kmem_descr {
1484 struct kmem_cache **ckd_cache;
1485 const char *ckd_name;
1486 const size_t ckd_size;
1489 int lu_kmem_init(struct lu_kmem_descr *caches);
1490 void lu_kmem_fini(struct lu_kmem_descr *caches);
1492 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
1493 const struct lu_fid *fid);
1494 struct lu_object *lu_object_anon(const struct lu_env *env,
1495 struct lu_device *dev,
1496 const struct lu_object_conf *conf);
1499 extern struct lu_buf LU_BUF_NULL;
1501 void lu_buf_free(struct lu_buf *buf);
1502 void lu_buf_alloc(struct lu_buf *buf, size_t size);
1503 void lu_buf_realloc(struct lu_buf *buf, size_t size);
1505 int lu_buf_check_and_grow(struct lu_buf *buf, size_t len);
1506 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len);
1508 extern __u32 lu_context_tags_default;
1509 extern __u32 lu_session_tags_default;
1511 static inline bool lu_device_is_cl(const struct lu_device *d)
1513 return d->ld_type->ldt_tags & LU_DEVICE_CL;
1516 static inline bool lu_object_is_cl(const struct lu_object *o)
1518 return lu_device_is_cl(o->lo_dev);
1521 /* Generic subset of tgts */
1522 struct lu_tgt_pool {
1523 __u32 *op_array; /* array of index of
1526 unsigned int op_count; /* number of tgts in the array */
1527 unsigned int op_size; /* allocated size of op_array */
1528 struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
1531 int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
1532 int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
1533 int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
1534 int lu_tgt_pool_free(struct lu_tgt_pool *op);
1535 int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
1536 int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
1538 /* bitflags used in rr / qos allocation */
1540 LQ_DIRTY = 0, /* recalc qos data */
1541 LQ_SAME_SPACE, /* the OSTs all have approx.
1542 * the same space avail */
1543 LQ_RESET, /* zero current penalties */
1546 #ifdef HAVE_SERVER_SUPPORT
1547 /* round-robin QoS data for LOD/LMV */
1549 spinlock_t lqr_alloc; /* protect allocation index */
1550 __u32 lqr_start_idx; /* start index of new inode */
1551 __u32 lqr_offset_idx;/* aliasing for start_idx */
1552 int lqr_start_count;/* reseed counter */
1553 struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
1554 unsigned long lqr_flags;
1557 static inline void lu_qos_rr_init(struct lu_qos_rr *lqr)
1559 spin_lock_init(&lqr->lqr_alloc);
1560 set_bit(LQ_DIRTY, &lqr->lqr_flags);
1563 #endif /* HAVE_SERVER_SUPPORT */
1565 /* QoS data per MDS/OSS */
1567 struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
1568 struct list_head lsq_svr_list; /* link to lq_svr_list */
1569 __u64 lsq_bavail; /* total bytes avail on svr */
1570 __u64 lsq_iavail; /* total inode avail on svr */
1571 __u64 lsq_penalty; /* current penalty */
1572 __u64 lsq_penalty_per_obj; /* penalty decrease
1574 time64_t lsq_used; /* last used time, seconds */
1575 __u32 lsq_tgt_count; /* number of tgts on this svr */
1576 __u32 lsq_id; /* unique svr id */
1579 /* QoS data per MDT/OST */
1581 struct lu_svr_qos *ltq_svr; /* svr info */
1582 __u64 ltq_penalty; /* current penalty */
1583 __u64 ltq_penalty_per_obj; /* penalty decrease
1585 __u64 ltq_avail; /* bytes/inode avail */
1586 __u64 ltq_weight; /* net weighting */
1587 time64_t ltq_used; /* last used time, seconds */
1588 bool ltq_usable:1; /* usable for striping */
1591 /* target descriptor */
1592 #define LOV_QOS_DEF_THRESHOLD_RR_PCT 17
1593 #define LMV_QOS_DEF_THRESHOLD_RR_PCT 5
1595 #define LOV_QOS_DEF_PRIO_FREE 90
1596 #define LMV_QOS_DEF_PRIO_FREE 90
1598 struct lu_tgt_desc {
1600 struct dt_device *ltd_tgt;
1601 struct obd_device *ltd_obd;
1603 struct obd_export *ltd_exp;
1604 struct obd_uuid ltd_uuid;
1607 struct list_head ltd_kill;
1608 struct task_struct *ltd_recovery_task;
1609 struct mutex ltd_fid_mutex;
1610 struct lu_tgt_qos ltd_qos; /* qos info per target */
1611 struct obd_statfs ltd_statfs;
1612 time64_t ltd_statfs_age;
1613 unsigned long ltd_active:1,/* is this target up for requests */
1614 ltd_activate:1,/* should target be activated */
1615 ltd_reap:1, /* should this target be deleted */
1616 ltd_got_update_log:1, /* Already got update log */
1617 ltd_connecting:1; /* target is connecting */
1620 /* number of pointers at 2nd level */
1621 #define TGT_PTRS_PER_BLOCK (PAGE_SIZE / sizeof(void *))
1622 /* number of pointers at 1st level - only need as many as max OST/MDT count */
1623 #define TGT_PTRS ((LOV_ALL_STRIPES + 1) / TGT_PTRS_PER_BLOCK)
1625 struct lu_tgt_desc_idx {
1626 struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
1629 /* QoS data for LOD/LMV */
1631 struct list_head lq_svr_list; /* lu_svr_qos list */
1632 struct rw_semaphore lq_rw_sem;
1633 __u32 lq_active_svr_count;
1634 unsigned int lq_prio_free; /* priority for free space */
1635 unsigned int lq_threshold_rr;/* priority for rr */
1636 #ifdef HAVE_SERVER_SUPPORT
1637 struct lu_qos_rr lq_rr; /* round robin qos data */
1639 unsigned long lq_flags;
1641 unsigned long lq_dirty:1, /* recalc qos data */
1642 lq_same_space:1,/* the servers all have approx.
1643 * the same space avail */
1644 lq_reset:1; /* zero current penalties */
1648 struct lu_tgt_descs {
1650 struct lov_desc ltd_lov_desc;
1651 struct lmv_desc ltd_lmv_desc;
1653 /* list of known TGTs */
1654 struct lu_tgt_desc_idx *ltd_tgt_idx[TGT_PTRS];
1655 /* Size of the lu_tgts array, granted to be a power of 2 */
1656 __u32 ltd_tgts_size;
1657 /* bitmap of TGTs available */
1658 unsigned long *ltd_tgt_bitmap;
1659 /* TGTs scheduled to be deleted */
1660 __u32 ltd_death_row;
1661 /* Table refcount used for delayed deletion */
1663 /* mutex to serialize concurrent updates to the tgt table */
1664 struct mutex ltd_mutex;
1665 /* read/write semaphore used for array relocation */
1666 struct rw_semaphore ltd_rw_sem;
1668 struct lu_qos ltd_qos;
1669 /* all tgts in a packed array */
1670 struct lu_tgt_pool ltd_tgt_pool;
1671 /* true if tgt is MDT */
1675 #define LTD_TGT(ltd, index) \
1676 (ltd)->ltd_tgt_idx[(index) / TGT_PTRS_PER_BLOCK]-> \
1677 ldi_tgt[(index) % TGT_PTRS_PER_BLOCK]
1679 u64 lu_prandom_u64_max(u64 ep_ro);
1680 int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
1681 void lu_tgt_qos_weight_calc(struct lu_tgt_desc *tgt);
1683 int lu_tgt_descs_init(struct lu_tgt_descs *ltd, bool is_mdt);
1684 void lu_tgt_descs_fini(struct lu_tgt_descs *ltd);
1685 int ltd_add_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
1686 void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
1687 int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd);
1688 int ltd_qos_update(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt,
1692 * Whether MDT inode and space usages are balanced.
1694 static inline bool ltd_qos_is_balanced(struct lu_tgt_descs *ltd)
1696 return !test_bit(LQ_DIRTY, <d->ltd_qos.lq_flags) &&
1697 test_bit(LQ_SAME_SPACE, <d->ltd_qos.lq_flags);
1701 * Whether QoS data is up-to-date and QoS can be applied.
1703 static inline bool ltd_qos_is_usable(struct lu_tgt_descs *ltd)
1705 if (ltd_qos_is_balanced(ltd))
1708 if (ltd->ltd_lov_desc.ld_active_tgt_count < 2)
1714 static inline struct lu_tgt_desc *ltd_first_tgt(struct lu_tgt_descs *ltd)
1718 index = find_first_bit(ltd->ltd_tgt_bitmap,
1719 ltd->ltd_tgts_size);
1720 return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
1723 static inline struct lu_tgt_desc *ltd_next_tgt(struct lu_tgt_descs *ltd,
1724 struct lu_tgt_desc *tgt)
1731 index = tgt->ltd_index;
1732 LASSERT(index < ltd->ltd_tgts_size);
1733 index = find_next_bit(ltd->ltd_tgt_bitmap,
1734 ltd->ltd_tgts_size, index + 1);
1735 return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
1738 #define ltd_foreach_tgt(ltd, tgt) \
1739 for (tgt = ltd_first_tgt(ltd); tgt; tgt = ltd_next_tgt(ltd, tgt))
1741 #define ltd_foreach_tgt_safe(ltd, tgt, tmp) \
1742 for (tgt = ltd_first_tgt(ltd), tmp = ltd_next_tgt(ltd, tgt); tgt; \
1743 tgt = tmp, tmp = ltd_next_tgt(ltd, tgt))
1746 #endif /* __LUSTRE_LU_OBJECT_H */