4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef __LUSTRE_LU_OBJECT_H
33 #define __LUSTRE_LU_OBJECT_H
35 #ifdef HAVE_LINUX_STDARG_HEADER
36 #include <linux/stdarg.h>
40 #include <libcfs/libcfs.h>
41 #include <uapi/linux/lustre/lustre_idl.h>
43 #include <linux/percpu_counter.h>
44 #include <linux/rhashtable.h>
45 #include <linux/ctype.h>
48 struct proc_dir_entry;
54 * lu_* data-types represent server-side entities shared by data and meta-data
59 * -# support for layering.
61 * Server side object is split into layers, one per device in the
62 * corresponding device stack. Individual layer is represented by struct
63 * lu_object. Compound layered object --- by struct lu_object_header. Most
64 * interface functions take lu_object as an argument and operate on the
65 * whole compound object. This decision was made due to the following
68 * - it's envisaged that lu_object will be used much more often than
71 * - we want lower (non-top) layers to be able to initiate operations
72 * on the whole object.
74 * Generic code supports layering more complex than simple stacking, e.g.,
75 * it is possible that at some layer object "spawns" multiple sub-objects
78 * -# fid-based identification.
80 * Compound object is uniquely identified by its fid. Objects are indexed
81 * by their fids (hash table is used for index).
83 * -# caching and life-cycle management.
85 * Object's life-time is controlled by reference counting. When reference
86 * count drops to 0, object is returned to cache. Cached objects still
87 * retain their identity (i.e., fid), and can be recovered from cache.
89 * Objects are kept in the global LRU list, and lu_site_purge() function
90 * can be used to reclaim given number of unused objects from the tail of
93 * -# avoiding recursion.
95 * Generic code tries to replace recursion through layers by iterations
96 * where possible. Additionally to the end of reducing stack consumption,
97 * data, when practically possible, are allocated through lu_context_key
98 * interface rather than on stack.
105 struct lu_object_header;
111 * Operations common for data and meta-data devices.
113 struct lu_device_operations {
115 * Allocate object for the given device (without lower-layer
116 * parts). This is called by lu_object_operations::loo_object_init()
117 * from the parent layer, and should setup at least lu_object::lo_dev
118 * and lu_object::lo_ops fields of resulting lu_object.
120 * Object creation protocol.
122 * Due to design goal of avoiding recursion, object creation (see
123 * lu_object_alloc()) is somewhat involved:
125 * - first, lu_device_operations::ldo_object_alloc() method of the
126 * top-level device in the stack is called. It should allocate top
127 * level object (including lu_object_header), but without any
128 * lower-layer sub-object(s).
130 * - then lu_object_alloc() sets fid in the header of newly created
133 * - then lu_object_operations::loo_object_init() is called. It has
134 * to allocate lower-layer object(s). To do this,
135 * lu_object_operations::loo_object_init() calls ldo_object_alloc()
136 * of the lower-layer device(s).
138 * - for all new objects allocated by
139 * lu_object_operations::loo_object_init() (and inserted into object
140 * stack), lu_object_operations::loo_object_init() is called again
141 * repeatedly, until no new objects are created.
143 * \post ergo(!IS_ERR(result), result->lo_dev == d &&
144 * result->lo_ops != NULL);
146 struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
147 const struct lu_object_header *h,
148 struct lu_device *d);
150 * process config specific for device.
152 int (*ldo_process_config)(const struct lu_env *env,
153 struct lu_device *, struct lustre_cfg *);
154 int (*ldo_recovery_complete)(const struct lu_env *,
158 * initialize local objects for device. this method called after layer
159 * has been initialized (after LCFG_SETUP stage) and before it starts
160 * serving user requests.
162 int (*ldo_prepare)(const struct lu_env *,
163 struct lu_device *parent,
164 struct lu_device *dev);
168 * Allocate new FID for file with @name under @parent
170 * \param[in] env execution environment for this thread
171 * \param[in] dev dt device
172 * \param[out] fid new FID allocated
173 * \param[in] parent parent object
174 * \param[in] name lu_name
176 * \retval 0 0 FID allocated successfully.
177 * \retval 1 1 FID allocated successfully and new sequence
178 * requested from seq meta server
179 * \retval negative negative errno if FID allocation failed.
181 int (*ldo_fid_alloc)(const struct lu_env *env,
182 struct lu_device *dev,
184 struct lu_object *parent,
185 const struct lu_name *name);
189 * For lu_object_conf flags
192 /* This is a new object to be allocated, or the file
193 * corresponding to the object does not exists.
195 LOC_F_NEW = 0x00000001,
199 * Object configuration, describing particulars of object being created. On
200 * server this is not used, as server objects are full identified by fid. On
201 * client configuration contains struct lustre_md.
203 struct lu_object_conf {
205 * Some hints for obj find and alloc.
207 loc_flags_t loc_flags;
211 * Type of "printer" function used by lu_object_operations::loo_object_print()
214 * Printer function is needed to provide some flexibility in (semi-)debugging
215 * output: possible implementations: printk, CDEBUG, sysfs/seq_file
217 typedef int (*lu_printer_t)(const struct lu_env *env,
218 void *cookie, const char *format, ...)
222 * Operations specific for particular lu_object.
224 struct lu_object_operations {
227 * Allocate lower-layer parts of the object by calling
228 * lu_device_operations::ldo_object_alloc() of the corresponding
231 * This method is called once for each object inserted into object
232 * stack. It's responsibility of this method to insert lower-layer
233 * object(s) it create into appropriate places of object stack.
235 int (*loo_object_init)(const struct lu_env *env,
237 const struct lu_object_conf *conf);
239 * Called (in top-to-bottom order) during object allocation after all
240 * layers were allocated and initialized. Can be used to perform
241 * initialization depending on lower layers.
243 int (*loo_object_start)(const struct lu_env *env,
244 struct lu_object *o);
246 * Called before lu_object_operations::loo_object_free() to signal
247 * that object is being destroyed. Dual to
248 * lu_object_operations::loo_object_init().
250 void (*loo_object_delete)(const struct lu_env *env,
251 struct lu_object *o);
253 * Dual to lu_device_operations::ldo_object_alloc(). Called when
254 * object is removed from memory. Must use call_rcu or kfree_rcu
255 * if the object contains an lu_object_header.
257 void (*loo_object_free)(const struct lu_env *env,
258 struct lu_object *o);
260 * Called when last active reference to the object is released (and
261 * object returns to the cache). This method is optional.
263 void (*loo_object_release)(const struct lu_env *env,
264 struct lu_object *o);
266 * Optional debugging helper. Print given object.
268 int (*loo_object_print)(const struct lu_env *env, void *cookie,
269 lu_printer_t p, const struct lu_object *o);
271 * Optional debugging method. Returns true iff method is internally
274 int (*loo_object_invariant)(const struct lu_object *o);
280 struct lu_device_type;
283 * Device: a layer in the server side abstraction stacking.
287 * reference count. This is incremented, in particular, on each object
288 * created at this layer.
290 * \todo XXX which means that atomic_t is probably too small.
294 * Pointer to device type. Never modified once set.
296 struct lu_device_type *ld_type;
298 * Operation vector for this device.
300 const struct lu_device_operations *ld_ops;
302 * Stack this device belongs to.
304 struct lu_site *ld_site;
305 struct proc_dir_entry *ld_proc_entry;
307 /** \todo XXX: temporary back pointer into obd. */
308 struct obd_device *ld_obd;
310 * A list of references to this object, for debugging.
312 struct lu_ref ld_reference;
314 * Link the device to the site.
316 struct list_head ld_linkage;
319 struct lu_device_type_operations;
322 * Tag bits for device type. They are used to distinguish certain groups of
326 /** this is meta-data device */
327 LU_DEVICE_MD = BIT(0),
328 /** this is data device */
329 LU_DEVICE_DT = BIT(1),
330 /** data device in the client stack */
331 LU_DEVICE_CL = BIT(2)
337 struct lu_device_type {
339 * Tag bits. Taken from enum lu_device_tag. Never modified once set.
343 * Name of this class. Unique system-wide. Never modified once set.
347 * Operations for this type.
349 const struct lu_device_type_operations *ldt_ops;
351 * \todo XXX: temporary: context tags used by obd_*() calls.
355 * Number of existing device type instances.
357 atomic_t ldt_device_nr;
361 * Operations on a device type.
363 struct lu_device_type_operations {
365 * Allocate new device.
367 struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
368 struct lu_device_type *t,
369 struct lustre_cfg *lcfg);
371 * Free device. Dual to
372 * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
373 * the next device in the stack.
375 struct lu_device *(*ldto_device_free)(const struct lu_env *,
379 * Initialize the devices after allocation
381 int (*ldto_device_init)(const struct lu_env *env,
382 struct lu_device *, const char *,
385 * Finalize device. Dual to
386 * lu_device_type_operations::ldto_device_init(). Returns pointer to
387 * the next device in the stack.
389 struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
392 * Initialize device type. This is called on module load.
394 int (*ldto_init)(struct lu_device_type *t);
396 * Finalize device type. Dual to
397 * lu_device_type_operations::ldto_init(). Called on module unload.
399 void (*ldto_fini)(struct lu_device_type *t);
401 * Called when the first device is created.
403 void (*ldto_start)(struct lu_device_type *t);
405 * Called when number of devices drops to 0.
407 void (*ldto_stop)(struct lu_device_type *t);
410 static inline int lu_device_is_md(const struct lu_device *d)
412 return ergo(d != NULL, d->ld_type->ldt_tags & LU_DEVICE_MD);
416 * Common object attributes.
427 /** modification time in seconds since Epoch */
429 /** access time in seconds since Epoch */
431 /** change time in seconds since Epoch */
433 /** create time in seconds since Epoch */
435 /** 512-byte blocks allocated to object */
437 /** permission bits and file type */
445 /** number of persistent references to this object */
447 /** blk bits of the object*/
449 /** blk size of the object*/
455 /** set layout version to OST objects. */
456 __u32 la_layout_version;
458 __u64 la_dirent_count;
461 #define LU_DIRENT_COUNT_UNSET -1
464 * Layer in the layered object.
468 * Header for this object.
470 struct lu_object_header *lo_header;
472 * Device for this layer.
474 struct lu_device *lo_dev;
476 * Operations for this object.
478 const struct lu_object_operations *lo_ops;
480 * Linkage into list of all layers.
482 struct list_head lo_linkage;
484 * Link to the device, for debugging.
486 struct lu_ref_link lo_dev_ref;
489 enum lu_object_header_flags {
491 * Don't keep this object in cache. Object will be destroyed as soon
492 * as last reference to it is released. This flag cannot be cleared
495 LU_OBJECT_HEARD_BANSHEE = 0,
497 * Mark this object has already been taken out of cache.
499 LU_OBJECT_UNHASHED = 1,
501 * Object is initialized, when object is found in cache, it may not be
502 * intialized yet, the object allocator will initialize it.
504 LU_OBJECT_INITED = 2,
507 enum lu_object_header_attr {
508 LOHA_EXISTS = BIT(0),
509 LOHA_REMOTE = BIT(1),
510 LOHA_HAS_AGENT_ENTRY = BIT(2),
511 LOHA_FSCRYPT_MD = BIT(3),
513 * UNIX file type is stored in S_IFMT bits.
515 LOHA_FT_START = 001 << 12, /**< S_IFIFO */
516 LOHA_FT_END = 017 << 12, /**< S_IFMT */
520 * "Compound" object, consisting of multiple layers.
522 * Compound object with given fid is unique with given lu_site.
524 * Note, that object does *not* necessary correspond to the real object in the
525 * persistent storage: object is an anchor for locking and method calling, so
526 * it is created for things like not-yet-existing child created by mkdir or
527 * create calls. lu_object_operations::loo_exists() can be used to check
528 * whether object is backed by persistent storage entity.
529 * Any object containing this structre which might be placed in an
530 * rhashtable via loh_hash MUST be freed using call_rcu() or rcu_kfree().
532 struct lu_object_header {
534 * Fid, uniquely identifying this object.
536 struct lu_fid loh_fid;
538 * Object flags from enum lu_object_header_flags. Set and checked
541 unsigned long loh_flags;
543 * Object reference count. Protected by lu_site::ls_guard.
547 * Common object attributes, cached for efficiency. From enum
548 * lu_object_header_attr.
552 * Linkage into per-site hash table.
554 struct rhash_head loh_hash;
556 * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
558 struct list_head loh_lru;
560 * Linkage into list of layers. Never modified once set (except lately
561 * during object destruction). No locking is necessary.
563 struct list_head loh_layers;
565 * A list of references to this object, for debugging.
567 struct lu_ref loh_reference;
569 * Handle used for kfree_rcu() or similar.
571 struct rcu_head loh_rcu;
581 LU_SS_CACHE_DEATH_RACE,
587 * lu_site is a "compartment" within which objects are unique, and LRU
588 * discipline is maintained.
590 * lu_site exists so that multiple layered stacks can co-exist in the same
593 * lu_site has the same relation to lu_device as lu_object_header to
600 struct rhashtable ls_obj_hash;
602 * buckets for summary data
604 struct lu_site_bkt_data *ls_bkts;
608 * index of bucket on hash table while purging
610 unsigned int ls_purge_start;
612 * Top-level device for this stack.
614 struct lu_device *ls_top_dev;
616 * Bottom-level device for this stack
618 struct lu_device *ls_bottom_dev;
620 * Linkage into global list of sites.
622 struct list_head ls_linkage;
624 * List for lu device for this site, protected
627 struct list_head ls_ld_linkage;
628 spinlock_t ls_ld_lock;
630 * Lock to serialize site purge.
632 struct mutex ls_purge_mutex;
636 struct lprocfs_stats *ls_stats;
638 * XXX: a hack! fld has to find md_site via site, remove when possible
640 struct seq_server_site *ld_seq_site;
642 * Pointer to the lu_target for this site.
644 struct lu_target *ls_tgt;
647 * Number of objects in lsb_lru_lists - used for shrinking
649 struct percpu_counter ls_lru_len_counter;
653 lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid);
655 static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
657 return s->ld_seq_site;
661 * Constructors/destructors.
665 int lu_site_init(struct lu_site *s, struct lu_device *d);
666 void lu_site_fini(struct lu_site *s);
667 int lu_site_init_finish(struct lu_site *s);
668 void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
669 void lu_device_get(struct lu_device *d);
670 void lu_device_put(struct lu_device *d);
671 int lu_device_init(struct lu_device *d, struct lu_device_type *t);
672 void lu_device_fini(struct lu_device *d);
673 int lu_object_header_init(struct lu_object_header *h);
674 void lu_object_header_fini(struct lu_object_header *h);
675 void lu_object_header_free(struct lu_object_header *h);
676 int lu_object_init(struct lu_object *o,
677 struct lu_object_header *h, struct lu_device *d);
678 void lu_object_fini(struct lu_object *o);
679 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
680 void lu_object_add(struct lu_object *before, struct lu_object *o);
681 struct lu_object *lu_object_get_first(struct lu_object_header *h,
682 struct lu_device *dev);
683 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d);
684 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d);
687 * Helpers to initialize and finalize device types.
690 int lu_device_type_init(struct lu_device_type *ldt);
691 void lu_device_type_fini(struct lu_device_type *ldt);
696 * Caching and reference counting.
701 * Acquire additional reference to the given object. This function is used to
702 * attain additional reference. To acquire initial reference use
705 static inline void lu_object_get(struct lu_object *o)
707 LASSERT(atomic_read(&o->lo_header->loh_ref) > 0);
708 atomic_inc(&o->lo_header->loh_ref);
712 * Return true if object will not be cached after last reference to it is
715 static inline int lu_object_is_dying(const struct lu_object_header *h)
717 return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
721 * Return true if object is initialized.
723 static inline int lu_object_is_inited(const struct lu_object_header *h)
725 return test_bit(LU_OBJECT_INITED, &h->loh_flags);
728 void lu_object_put(const struct lu_env *env, struct lu_object *o);
729 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o);
730 void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
731 int lu_site_purge_objects(const struct lu_env *env, struct lu_site *s, int nr,
734 static inline int lu_site_purge(const struct lu_env *env, struct lu_site *s,
737 return lu_site_purge_objects(env, s, nr, 1);
740 void lu_site_print(const struct lu_env *env, struct lu_site *s, atomic_t *ref,
741 int msg_flags, lu_printer_t printer);
742 struct lu_object *lu_object_find(const struct lu_env *env,
743 struct lu_device *dev, const struct lu_fid *f,
744 const struct lu_object_conf *conf);
745 struct lu_object *lu_object_find_at(const struct lu_env *env,
746 struct lu_device *dev,
747 const struct lu_fid *f,
748 const struct lu_object_conf *conf);
749 struct lu_object *lu_object_find_slice(const struct lu_env *env,
750 struct lu_device *dev,
751 const struct lu_fid *f,
752 const struct lu_object_conf *conf);
761 * First (topmost) sub-object of given compound object
763 static inline struct lu_object *lu_object_top(struct lu_object_header *h)
765 LASSERT(!list_empty(&h->loh_layers));
766 return container_of(h->loh_layers.next, struct lu_object, lo_linkage);
770 * Next sub-object in the layering
772 static inline struct lu_object *lu_object_next(const struct lu_object *o)
774 return container_of(o->lo_linkage.next, struct lu_object, lo_linkage);
778 * Pointer to the fid of this object.
780 static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
782 return &o->lo_header->loh_fid;
786 * return device operations vector for this object
788 static const inline struct lu_device_operations *
789 lu_object_ops(const struct lu_object *o)
791 return o->lo_dev->ld_ops;
795 * Given a compound object, find its slice, corresponding to the device type
798 struct lu_object *lu_object_locate(struct lu_object_header *h,
799 const struct lu_device_type *dtype);
802 * Printer function emitting messages through libcfs_debug_msg().
804 int lu_cdebug_printer(const struct lu_env *env,
805 void *cookie, const char *format, ...);
808 * Print object description followed by a user-supplied message.
810 #define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
812 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
813 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
814 lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
815 CDEBUG(mask, format "\n", ## __VA_ARGS__); \
820 * Print short object description followed by a user-supplied message.
822 #define LU_OBJECT_HEADER(mask, env, object, format, ...) \
824 if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
825 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
826 lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
827 (object)->lo_header); \
828 lu_cdebug_printer(env, &msgdata, "\n"); \
829 CDEBUG(mask, format, ## __VA_ARGS__); \
833 void lu_object_print (const struct lu_env *env, void *cookie,
834 lu_printer_t printer, const struct lu_object *o);
835 void lu_object_header_print(const struct lu_env *env, void *cookie,
836 lu_printer_t printer,
837 const struct lu_object_header *hdr);
840 * Check object consistency.
842 int lu_object_invariant(const struct lu_object *o);
846 * Check whether object exists, no matter on local or remote storage.
847 * Note: LOHA_EXISTS will be set once some one created the object,
848 * and it does not needs to be committed to storage.
850 #define lu_object_exists(o) ((o)->lo_header->loh_attr & LOHA_EXISTS)
853 * Check whether object on the remote storage.
855 #define lu_object_remote(o) unlikely((o)->lo_header->loh_attr & LOHA_REMOTE)
858 * Check whether the object as agent entry on current target
860 #define lu_object_has_agent_entry(o) \
861 unlikely((o)->lo_header->loh_attr & LOHA_HAS_AGENT_ENTRY)
863 static inline void lu_object_set_agent_entry(struct lu_object *o)
865 o->lo_header->loh_attr |= LOHA_HAS_AGENT_ENTRY;
868 static inline void lu_object_clear_agent_entry(struct lu_object *o)
870 o->lo_header->loh_attr &= ~LOHA_HAS_AGENT_ENTRY;
873 static inline int lu_object_assert_exists(const struct lu_object *o)
875 return lu_object_exists(o);
878 static inline int lu_object_assert_not_exists(const struct lu_object *o)
880 return !lu_object_exists(o);
884 * Attr of this object.
886 static inline __u32 lu_object_attr(const struct lu_object *o)
888 LASSERT(lu_object_exists(o) != 0);
890 return o->lo_header->loh_attr & S_IFMT;
893 static inline void lu_object_ref_add_atomic(struct lu_object *o,
897 lu_ref_add_atomic(&o->lo_header->loh_reference, scope, source);
900 static inline void lu_object_ref_add(struct lu_object *o,
904 lu_ref_add(&o->lo_header->loh_reference, scope, source);
907 static inline void lu_object_ref_add_at(struct lu_object *o,
908 struct lu_ref_link *link,
912 lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
915 static inline void lu_object_ref_del(struct lu_object *o,
916 const char *scope, const void *source)
918 lu_ref_del(&o->lo_header->loh_reference, scope, source);
921 static inline void lu_object_ref_del_at(struct lu_object *o,
922 struct lu_ref_link *link,
923 const char *scope, const void *source)
925 lu_ref_del_at(&o->lo_header->loh_reference, link, scope, source);
928 /** input params, should be filled out by mdt */
932 /** count in bytes */
933 unsigned int rp_count;
934 /** number of pages */
935 unsigned int rp_npages;
936 /** requested attr */
938 /** pointers to pages */
939 struct page **rp_pages;
942 enum lu_xattr_flags {
943 LU_XATTR_REPLACE = BIT(0),
944 LU_XATTR_CREATE = BIT(1),
945 LU_XATTR_MERGE = BIT(2),
946 LU_XATTR_SPLIT = BIT(3),
947 LU_XATTR_PURGE = BIT(4),
952 /* \name lu_context @{ */
954 /** For lu_context health-checks */
955 enum lu_context_state {
964 * lu_context. Execution context for lu_object methods. Currently associated
967 * All lu_object methods, except device and device type methods (called during
968 * system initialization and shutdown) are executed "within" some
969 * lu_context. This means, that pointer to some "current" lu_context is passed
970 * as an argument to all methods.
972 * All service ptlrpc threads create lu_context as part of their
973 * initialization. It is possible to create "stand-alone" context for other
974 * execution environments (like system calls).
976 * lu_object methods mainly use lu_context through lu_context_key interface
977 * that allows each layer to associate arbitrary pieces of data with each
978 * context (see pthread_key_create(3) for similar interface).
980 * On a client, lu_context is bound to a thread, see cl_env_get().
982 * \see lu_context_key
986 * lu_context is used on the client side too. Yet we don't want to
987 * allocate values of server-side keys for the client contexts and
990 * To achieve this, set of tags in introduced. Contexts and keys are
991 * marked with tags. Key value are created only for context whose set
992 * of tags has non-empty intersection with one for key. Tags are taken
993 * from enum lu_context_tag.
996 enum lu_context_state lc_state;
998 * Pointer to the home service thread. NULL for other execution
1001 struct ptlrpc_thread *lc_thread;
1003 * Pointer to an array with key values. Internal implementation
1008 * Linkage into a list of all remembered contexts. Only
1009 * `non-transient' contexts, i.e., ones created for service threads
1012 struct list_head lc_remember;
1014 * Version counter used to skip calls to lu_context_refill() when no
1015 * keys were registered.
1017 unsigned int lc_version;
1021 unsigned int lc_cookie;
1025 * lu_context_key interface. Similar to pthread_key.
1028 enum lu_context_tag {
1030 * Thread on md server
1032 LCT_MD_THREAD = BIT(0),
1034 * Thread on dt server
1036 LCT_DT_THREAD = BIT(1),
1040 LCT_CL_THREAD = BIT(3),
1042 * A per-request session on a server, and a per-system-call session on
1045 LCT_SESSION = BIT(4),
1047 * A per-request data on OSP device
1049 LCT_OSP_THREAD = BIT(5),
1053 LCT_MG_THREAD = BIT(6),
1055 * Context for local operations
1059 * session for server thread
1061 LCT_SERVER_SESSION = BIT(8),
1063 * Set when at least one of keys, having values in this context has
1064 * non-NULL lu_context_key::lct_exit() method. This is used to
1065 * optimize lu_context_exit() call.
1067 LCT_HAS_EXIT = BIT(28),
1069 * Don't add references for modules creating key values in that context.
1070 * This is only for contexts used internally by lu_object framework.
1072 LCT_NOREF = BIT(29),
1074 * Key is being prepared for retiring, don't create new values for it.
1076 LCT_QUIESCENT = BIT(30),
1078 * Context should be remembered.
1080 LCT_REMEMBER = BIT(31),
1082 * Contexts usable in cache shrinker thread.
1084 LCT_SHRINKER = LCT_MD_THREAD|LCT_DT_THREAD|LCT_CL_THREAD|LCT_NOREF,
1088 * Key. Represents per-context value slot.
1090 * Keys are usually registered when module owning the key is initialized, and
1091 * de-registered when module is unloaded. Once key is registered, all new
1092 * contexts with matching tags, will get key value. "Old" contexts, already
1093 * initialized at the time of key registration, can be forced to get key value
1094 * by calling lu_context_refill().
1096 * Every key value is counted in lu_context_key::lct_used and acquires a
1097 * reference on an owning module. This means, that all key values have to be
1098 * destroyed before module can be unloaded. This is usually achieved by
1099 * stopping threads started by the module, that created contexts in their
1100 * entry functions. Situation is complicated by the threads shared by multiple
1101 * modules, like ptlrpcd daemon on a client. To work around this problem,
1102 * contexts, created in such threads, are `remembered' (see
1103 * LCT_REMEMBER)---i.e., added into a global list. When module is preparing
1104 * for unloading it does the following:
1106 * - marks its keys as `quiescent' (lu_context_tag::LCT_QUIESCENT)
1107 * preventing new key values from being allocated in the new contexts,
1110 * - scans a list of remembered contexts, destroying values of module
1111 * keys, thus releasing references to the module.
1113 * This is done by lu_context_key_quiesce(). If module is re-activated
1114 * before key has been de-registered, lu_context_key_revive() call clears
1115 * `quiescent' marker.
1117 * lu_context code doesn't provide any internal synchronization for these
1118 * activities---it's assumed that startup (including threads start-up) and
1119 * shutdown are serialized by some external means.
1123 struct lu_context_key {
1125 * Set of tags for which values of this key are to be instantiated.
1129 * Value constructor. This is called when new value is created for a
1130 * context. Returns pointer to new value of error pointer.
1132 void *(*lct_init)(const struct lu_context *ctx,
1133 struct lu_context_key *key);
1135 * Value destructor. Called when context with previously allocated
1136 * value of this slot is destroyed. \a data is a value that was returned
1137 * by a matching call to lu_context_key::lct_init().
1139 void (*lct_fini)(const struct lu_context *ctx,
1140 struct lu_context_key *key, void *data);
1142 * Optional method called on lu_context_exit() for all allocated
1143 * keys. Can be used by debugging code checking that locks are
1146 void (*lct_exit)(const struct lu_context *ctx,
1147 struct lu_context_key *key, void *data);
1149 * Internal implementation detail: index within lu_context::lc_value[]
1150 * reserved for this key.
1154 * Internal implementation detail: number of values created for this
1159 * Internal implementation detail: module for this key.
1161 struct module *lct_owner;
1163 * References to this key. For debugging.
1165 struct lu_ref lct_reference;
1168 #define LU_KEY_INIT(mod, type) \
1169 static void *mod##_key_init(const struct lu_context *ctx, \
1170 struct lu_context_key *key) \
1174 BUILD_BUG_ON(sizeof(*value) > PAGE_SIZE); \
1176 OBD_ALLOC_PTR(value); \
1177 if (value == NULL) \
1178 value = ERR_PTR(-ENOMEM); \
1182 struct __##mod##__dummy_init { ; } /* semicolon catcher */
1184 #define LU_KEY_FINI(mod, type) \
1185 static void mod##_key_fini(const struct lu_context *ctx, \
1186 struct lu_context_key *key, void *data) \
1188 type *info = data; \
1190 OBD_FREE_PTR(info); \
1192 struct __##mod##__dummy_fini {; } /* semicolon catcher */
1194 #define LU_KEY_INIT_FINI(mod, type) \
1195 LU_KEY_INIT(mod, type); \
1196 LU_KEY_FINI(mod, type) \
1198 #define LU_CONTEXT_KEY_DEFINE(mod, tags) \
1199 struct lu_context_key mod##_thread_key = { \
1201 .lct_init = mod##_key_init, \
1202 .lct_fini = mod##_key_fini \
1205 #define LU_CONTEXT_KEY_INIT(key) ((key)->lct_owner = THIS_MODULE)
1207 int lu_context_key_register(struct lu_context_key *key);
1208 void lu_context_key_degister(struct lu_context_key *key);
1209 void *lu_context_key_get(const struct lu_context *ctx,
1210 const struct lu_context_key *key);
1211 void lu_context_key_quiesce(struct lu_device_type *t,
1212 struct lu_context_key *key);
1213 void lu_context_key_revive(struct lu_context_key *key);
1217 * LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
1221 #define LU_KEY_INIT_GENERIC(mod) \
1222 static void mod##_key_init_generic(struct lu_context_key *k, ...) \
1224 struct lu_context_key *key = k; \
1227 va_start(args, k); \
1229 LU_CONTEXT_KEY_INIT(key); \
1230 key = va_arg(args, struct lu_context_key *); \
1231 } while (key != NULL); \
1235 #define LU_TYPE_INIT(mod, ...) \
1236 LU_KEY_INIT_GENERIC(mod) \
1237 static int mod##_type_init(struct lu_device_type *t) \
1239 mod##_key_init_generic(__VA_ARGS__, NULL); \
1240 return lu_context_key_register_many(__VA_ARGS__, NULL); \
1242 struct __##mod##_dummy_type_init {; }
1244 #define LU_TYPE_FINI(mod, ...) \
1245 static void mod##_type_fini(struct lu_device_type *t) \
1247 lu_context_key_degister_many(__VA_ARGS__, NULL); \
1249 struct __##mod##_dummy_type_fini {; }
1251 #define LU_TYPE_START(mod, ...) \
1252 static void mod##_type_start(struct lu_device_type *t) \
1254 lu_context_key_revive_many(__VA_ARGS__, NULL); \
1256 struct __##mod##_dummy_type_start {; }
1258 #define LU_TYPE_STOP(mod, ...) \
1259 static void mod##_type_stop(struct lu_device_type *t) \
1261 lu_context_key_quiesce_many(t, __VA_ARGS__, NULL); \
1263 struct __##mod##_dummy_type_stop { }
1267 #define LU_TYPE_INIT_FINI(mod, ...) \
1268 LU_TYPE_INIT(mod, __VA_ARGS__); \
1269 LU_TYPE_FINI(mod, __VA_ARGS__); \
1270 LU_TYPE_START(mod, __VA_ARGS__); \
1271 LU_TYPE_STOP(mod, __VA_ARGS__)
1273 int lu_context_init(struct lu_context *ctx, __u32 tags);
1274 void lu_context_fini(struct lu_context *ctx);
1275 void lu_context_enter(struct lu_context *ctx);
1276 void lu_context_exit(struct lu_context *ctx);
1277 int lu_context_refill(struct lu_context *ctx);
1280 * Helper functions to operate on multiple keys. These are used by the default
1281 * device type operations, defined by LU_TYPE_INIT_FINI().
1284 int lu_context_key_register_many(struct lu_context_key *k, ...);
1285 void lu_context_key_degister_many(struct lu_context_key *k, ...);
1286 void lu_context_key_revive_many(struct lu_context_key *k, ...);
1287 void lu_context_key_quiesce_many(struct lu_device_type *t,
1288 struct lu_context_key *k, ...);
1291 * update/clear ctx/ses tags.
1293 void lu_context_tags_update(__u32 tags);
1294 void lu_context_tags_clear(__u32 tags);
1295 void lu_session_tags_update(__u32 tags);
1296 void lu_session_tags_clear(__u32 tags);
1303 * "Local" context, used to store data instead of stack.
1305 struct lu_context le_ctx;
1307 * "Session" context for per-request data.
1309 struct lu_context *le_ses;
1312 int lu_env_init(struct lu_env *env, __u32 tags);
1313 void lu_env_fini(struct lu_env *env);
1314 int lu_env_refill(struct lu_env *env);
1315 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, __u32 stags);
1317 static inline void *lu_env_info(const struct lu_env *env,
1318 const struct lu_context_key *key)
1322 info = lu_context_key_get(&env->le_ctx, key);
1324 if (!lu_env_refill((struct lu_env *)env))
1325 info = lu_context_key_get(&env->le_ctx, key);
1331 struct lu_env *lu_env_find(void);
1332 int lu_env_add(struct lu_env *env);
1333 int lu_env_add_task(struct lu_env *env, struct task_struct *task);
1334 void lu_env_remove(struct lu_env *env);
1336 /** @} lu_context */
1339 * Output site statistical counters into a buffer. Suitable for
1340 * ll_rd_*()-style functions.
1342 int lu_site_stats_seq_print(const struct lu_site *s, struct seq_file *m);
1345 * Common name structure to be passed around for various name related methods.
1348 const char *ln_name;
1352 static inline bool name_is_dot_or_dotdot(const char *name, int namelen)
1354 return name[0] == '.' &&
1355 (namelen == 1 || (namelen == 2 && name[1] == '.'));
1358 static inline bool lu_name_is_dot_or_dotdot(const struct lu_name *lname)
1360 return name_is_dot_or_dotdot(lname->ln_name, lname->ln_namelen);
1364 * Determine if filename should be considered a "temporary" name.
1366 * For temporary names, use only the main part of the filename and ignore
1367 * the suffix, so that the filename will hash to the same MDT after it is
1368 * renamed. That avoids creating spurious remote entries for rsync, dcp,
1369 * vi, and other tools that create a temporary name before renaming the file.
1371 * The "CRUSH" and "CRUSH2" hash types are slightly different, and should
1372 * not be modified without introducing a new hash type. The hash algorithm
1373 * forms an important part of the network protocol for striped directories,
1374 * so if the hash function were "fixed" in any way it would prevent clients
1375 * from looking up a filename on the right MDT. LU-15692.
1377 * \param[in] name filename
1378 * \param[in] namelen length of @name
1379 * \param[in] dot_prefix if @name needs a leading '.' to be temporary
1380 * \param[in] suffixlen number of characters after '.' in @name to check
1381 * \param[in] crush2 whether CRUSH or CRUSH2 heuristic should be used
1383 static inline bool lu_name_is_temp_file(const char *name, int namelen,
1384 bool dot_prefix, int suffixlen,
1390 int len = suffixlen;
1392 if (dot_prefix && name[0] != '.')
1395 if (namelen < dot_prefix + suffixlen + 2 ||
1396 name[namelen - suffixlen - 1] != '.')
1399 /* Any non-alphanumeric chars in the suffix for CRUSH2 mean the
1400 * filename is *not* temporary. The original CRUSH was incorrectly
1401 * matching if a '.' happens to be in the right place, for example
1402 * file.mdtest.12.12345 or output.6334.log, which is bad. LU-15692
1405 if (islower(name[namelen - len]))
1407 else if (isupper(name[namelen - len]))
1409 else if (isdigit(name[namelen - len]))
1416 /* mktemp() suffixes normally have a mix of upper- and lower-case
1417 * letters and/or digits, rarely all upper- or lower-case or digits.
1418 * Random all-digit suffixes are rare (1/45k for suffixlen=6), but
1419 * common in normal usage (incrementing versions, dates, ranks, etc),
1420 * so are considered non-temporary even if 1 or 2 non-numeric chars.
1422 * About 0.07% of randomly-generated names will slip through, which
1423 * only means that they may be renamed to a different MDT (slowdown),
1424 * but this avoids 99.93% of cross-MDT renames for those files.
1426 if (upper == suffixlen || lower == suffixlen)
1430 if (digit >= suffixlen - 1 &&
1431 isdigit(name[namelen - suffixlen]))
1433 } else { /* old crush incorrectly returns "true" for all-digit suffix */
1434 if (digit >= suffixlen - 1 &&
1435 !isdigit(name[namelen - suffixlen]))
1442 static inline bool lu_name_is_backup_file(const char *name, int namelen,
1446 name[namelen - 2] != '.' && name[namelen - 1] == '~') {
1452 if (namelen > 4 && name[namelen - 4] == '.' &&
1453 (!strncasecmp(name + namelen - 3, "bak", 3) ||
1454 !strncasecmp(name + namelen - 3, "sav", 3))) {
1460 if (namelen > 5 && name[namelen - 5] == '.' &&
1461 !strncasecmp(name + namelen - 4, "orig", 4)) {
1470 static inline bool lu_name_is_valid_len(const char *name, size_t name_len)
1472 return name != NULL &&
1474 name_len < INT_MAX &&
1475 strlen(name) == name_len &&
1476 memchr(name, '/', name_len) == NULL;
1480 * Validate names (path components)
1482 * To be valid \a name must be non-empty, '\0' terminated of length \a
1483 * name_len, and not contain '/'. The maximum length of a name (before
1484 * say -ENAMETOOLONG will be returned) is really controlled by llite
1485 * and the server. We only check for something insane coming from bad
1486 * integer handling here.
1488 static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
1490 return lu_name_is_valid_len(name, name_len) && name[name_len] == '\0';
1493 static inline bool lu_name_is_valid(const struct lu_name *ln)
1495 return lu_name_is_valid_2(ln->ln_name, ln->ln_namelen);
1498 #define DNAME "%.*s"
1500 (lu_name_is_valid(ln) ? (ln)->ln_namelen : 0), \
1501 (lu_name_is_valid(ln) ? (ln)->ln_name : "")
1504 * Common buffer structure to be passed around for various xattr_{s,g}et()
1512 #define DLUBUF "(%p %zu)"
1513 #define PLUBUF(buf) ((buf)->lb_buf, (buf)->lb_len)
1515 /* read buffer params, should be filled out by out */
1517 /** number of buffers */
1518 unsigned int rb_nbufs;
1519 /** pointers to buffers */
1520 struct lu_buf rb_bufs[];
1524 * One-time initializers, called at obdclass module initialization, not
1529 * Initialization of global lu_* data.
1531 int lu_global_init(void);
1534 * Dual to lu_global_init().
1536 void lu_global_fini(void);
1538 struct lu_kmem_descr {
1539 struct kmem_cache **ckd_cache;
1540 const char *ckd_name;
1541 const size_t ckd_size;
1544 int lu_kmem_init(struct lu_kmem_descr *caches);
1545 void lu_kmem_fini(struct lu_kmem_descr *caches);
1547 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
1548 const struct lu_fid *fid);
1549 struct lu_object *lu_object_anon(const struct lu_env *env,
1550 struct lu_device *dev,
1551 const struct lu_object_conf *conf);
1554 extern struct lu_buf LU_BUF_NULL;
1556 void lu_buf_free(struct lu_buf *buf);
1557 void lu_buf_alloc(struct lu_buf *buf, size_t size);
1558 void lu_buf_realloc(struct lu_buf *buf, size_t size);
1560 int lu_buf_check_and_grow(struct lu_buf *buf, size_t len);
1561 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, size_t len);
1563 extern __u32 lu_context_tags_default;
1564 extern __u32 lu_session_tags_default;
1566 static inline bool lu_device_is_cl(const struct lu_device *d)
1568 return d->ld_type->ldt_tags & LU_DEVICE_CL;
1571 static inline bool lu_object_is_cl(const struct lu_object *o)
1573 return lu_device_is_cl(o->lo_dev);
1576 /* Generic subset of tgts */
1577 struct lu_tgt_pool {
1578 __u32 *op_array; /* array of index of
1581 unsigned int op_count; /* number of tgts in the array */
1582 unsigned int op_size; /* allocated size of op_array */
1583 struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
1586 int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
1587 #define lu_tgt_pool_add(op, idx, min_count) \
1588 lu_tgt_pool_add_lock(op, idx, min_count, true)
1589 #define lu_tgt_pool_add_locked(op, idx, min_count) \
1590 lu_tgt_pool_add_lock(op, idx, min_count, false)
1591 int lu_tgt_pool_add_lock(struct lu_tgt_pool *op, __u32 idx,
1592 unsigned int min_count, bool locked);
1593 int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
1594 void lu_tgt_pool_free(struct lu_tgt_pool *op);
1595 int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
1596 int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
1598 /* bitflags used in rr / qos allocation */
1600 LQ_DIRTY = 0, /* recalc qos data */
1601 LQ_SAME_SPACE, /* OSTs all have approx the same space avail */
1602 LQ_RESET, /* zero current penalties */
1603 LQ_SF_PROGRESS, /* statfs op in progress */
1606 #ifdef HAVE_SERVER_SUPPORT
1607 /* round-robin QoS data for LOD/LMV */
1609 spinlock_t lqr_alloc; /* protect allocation index */
1610 atomic_t lqr_start_idx; /* start index of new inode */
1611 __u32 lqr_offset_idx;/* aliasing for start_idx */
1612 int lqr_start_count;/* reseed counter */
1613 struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
1614 unsigned long lqr_flags;
1617 static inline void lu_qos_rr_init(struct lu_qos_rr *lqr)
1619 spin_lock_init(&lqr->lqr_alloc);
1620 set_bit(LQ_DIRTY, &lqr->lqr_flags);
1623 #endif /* HAVE_SERVER_SUPPORT */
1625 /* QoS data per MDS/OSS */
1627 struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
1628 struct list_head lsq_svr_list; /* link to lq_svr_list */
1629 __u64 lsq_bavail; /* total bytes avail on svr */
1630 __u64 lsq_iavail; /* total inode avail on svr */
1631 __u64 lsq_penalty; /* current penalty */
1632 __u64 lsq_penalty_per_obj; /* penalty dec per obj*/
1633 time64_t lsq_used; /* last used time, seconds */
1634 __u32 lsq_tgt_count; /* number of tgts on this svr */
1635 __u32 lsq_id; /* unique svr id */
1638 /* QoS data per MDT/OST */
1640 struct lu_svr_qos *ltq_svr; /* svr info */
1641 __u64 ltq_penalty; /* current penalty */
1642 __u64 ltq_penalty_per_obj; /* penalty dec per obj */
1643 __u64 ltq_avail; /* bytes/inode avail */
1644 __u64 ltq_weight; /* net weighting */
1645 time64_t ltq_used; /* last used time, seconds */
1646 bool ltq_usable:1; /* usable for striping */
1649 /* target descriptor */
1650 #define LOV_QOS_DEF_THRESHOLD_RR_PCT 17
1651 #define LMV_QOS_DEF_THRESHOLD_RR_PCT 5
1653 #define LOV_QOS_DEF_PRIO_FREE 90
1654 #define LMV_QOS_DEF_PRIO_FREE 90
1656 struct lu_tgt_desc {
1658 struct dt_device *ltd_tgt;
1659 struct obd_device *ltd_obd;
1661 struct obd_export *ltd_exp;
1662 struct obd_uuid ltd_uuid;
1665 struct list_head ltd_kill;
1666 struct task_struct *ltd_recovery_task;
1667 struct mutex ltd_fid_mutex;
1668 struct lu_tgt_qos ltd_qos; /* qos info per target */
1669 struct obd_statfs ltd_statfs;
1670 time64_t ltd_statfs_age;
1671 unsigned long ltd_active:1,/* is target available for requests */
1672 ltd_activate:1,/* should LOV target be connected */
1673 ltd_reap:1, /* should this target be deleted */
1674 ltd_got_update_log:1, /* Already got update log */
1675 ltd_discon:1; /* LOD target disconnected from OST */
1678 static inline __u64 tgt_statfs_bavail(struct lu_tgt_desc *tgt)
1680 struct obd_statfs *statfs = &tgt->ltd_statfs;
1682 return statfs->os_bavail * statfs->os_bsize;
1685 static inline __u64 tgt_statfs_iavail(struct lu_tgt_desc *tgt)
1687 return tgt->ltd_statfs.os_ffree;
1690 /* number of pointers at 2nd level */
1691 #define TGT_PTRS_PER_BLOCK (PAGE_SIZE / sizeof(void *))
1692 /* number of pointers at 1st level - only need as many as max OST/MDT count */
1693 #define TGT_PTRS ((LOV_ALL_STRIPES + 1) / TGT_PTRS_PER_BLOCK)
1695 struct lu_tgt_desc_idx {
1696 struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
1700 /* QoS data for LOD/LMV */
1701 #define QOS_THRESHOLD_MAX 256 /* should be power of two */
1703 struct list_head lq_svr_list; /* lu_svr_qos list */
1704 struct rw_semaphore lq_rw_sem;
1705 __u32 lq_active_svr_count;
1706 unsigned int lq_prio_free; /* priority for free space */
1707 unsigned int lq_threshold_rr;/* priority for rr */
1708 #ifdef HAVE_SERVER_SUPPORT
1709 struct lu_qos_rr lq_rr; /* round robin qos data */
1711 unsigned long lq_flags;
1714 struct lu_tgt_descs {
1716 struct lov_desc ltd_lov_desc;
1717 struct lmv_desc ltd_lmv_desc;
1719 /* list of known TGTs */
1720 struct lu_tgt_desc_idx *ltd_tgt_idx[TGT_PTRS];
1721 /* Size of the lu_tgts array, granted to be a power of 2 */
1722 __u32 ltd_tgts_size;
1723 /* bitmap of TGTs available */
1724 unsigned long *ltd_tgt_bitmap;
1725 /* TGTs scheduled to be deleted */
1726 __u32 ltd_death_row;
1727 /* Table refcount used for delayed deletion */
1729 /* mutex to serialize concurrent updates to the tgt table */
1730 struct mutex ltd_mutex;
1731 /* read/write semaphore used for array relocation */
1732 struct rw_semaphore ltd_rw_sem;
1734 struct lu_qos ltd_qos;
1735 /* all tgts in a packed array */
1736 struct lu_tgt_pool ltd_tgt_pool;
1737 /* true if tgt is MDT */
1741 #define LTD_TGT(ltd, index) \
1742 ((ltd)->ltd_tgt_idx[(index) / TGT_PTRS_PER_BLOCK]-> \
1743 ldi_tgt[(index) % TGT_PTRS_PER_BLOCK])
1745 u64 lu_prandom_u64_max(u64 ep_ro);
1746 int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
1747 int lu_qos_del_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
1748 void lu_tgt_qos_weight_calc(struct lu_tgt_desc *tgt, bool is_mdt);
1750 int lu_tgt_descs_init(struct lu_tgt_descs *ltd, bool is_mdt);
1751 void lu_tgt_descs_fini(struct lu_tgt_descs *ltd);
1752 int ltd_add_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
1753 void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
1754 int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd);
1755 int ltd_qos_update(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt,
1759 * Whether MDT inode and space usages are balanced.
1761 static inline bool ltd_qos_is_balanced(struct lu_tgt_descs *ltd)
1763 return !test_bit(LQ_DIRTY, <d->ltd_qos.lq_flags) &&
1764 test_bit(LQ_SAME_SPACE, <d->ltd_qos.lq_flags);
1768 * Whether QoS data is up-to-date and QoS can be applied.
1770 static inline bool ltd_qos_is_usable(struct lu_tgt_descs *ltd)
1772 if (ltd_qos_is_balanced(ltd))
1775 if (ltd->ltd_lov_desc.ld_active_tgt_count < 2)
1781 static inline struct lu_tgt_desc *ltd_first_tgt(struct lu_tgt_descs *ltd)
1785 index = find_first_bit(ltd->ltd_tgt_bitmap,
1786 ltd->ltd_tgts_size);
1787 return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
1790 static inline struct lu_tgt_desc *ltd_next_tgt(struct lu_tgt_descs *ltd,
1791 struct lu_tgt_desc *tgt)
1798 index = tgt->ltd_index;
1799 LASSERT(index < ltd->ltd_tgts_size);
1800 index = find_next_bit(ltd->ltd_tgt_bitmap,
1801 ltd->ltd_tgts_size, index + 1);
1802 return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
1805 #define ltd_foreach_tgt(ltd, tgt) \
1806 for (tgt = ltd_first_tgt(ltd); tgt; tgt = ltd_next_tgt(ltd, tgt))
1808 #define ltd_foreach_tgt_safe(ltd, tgt, tmp) \
1809 for (tgt = ltd_first_tgt(ltd), tmp = ltd_next_tgt(ltd, tgt); tgt; \
1810 tgt = tmp, tmp = ltd_next_tgt(ltd, tgt))
1813 #endif /* __LUSTRE_LU_OBJECT_H */