*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2013, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Internal interfaces of LOV layer.
*
#ifndef LOV_CL_INTERNAL_H
#define LOV_CL_INTERNAL_H
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-#else
-# include <liblustre.h>
-#endif
-
+#include <libcfs/libcfs.h>
#include <obd.h>
#include <cl_object.h>
#include "lov_internal.h"
* - top-page keeps a reference to its sub-page, and destroys it when it
* is destroyed.
*
- * - sub-lock keep a reference to its top-locks. Top-lock keeps a
- * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
- * actively using them (that is, in cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
- * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
- * hold. From this moment top-lock has only a 'weak' reference to its
- * sub-locks. This reference is protected by top-lock
- * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
- * when the latter is destroyed. When a sub-lock is canceled, a
- * reference to it is removed from the top-lock array, and top-lock is
- * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
- * while their top-lock is in CLS_HELD or CLS_CACHED states.
- *
* - IO's are not reference counted.
*
* To implement a connection between top and sub entities, lov layer is split
struct lovsub_device;
struct lovsub_object;
-struct lovsub_lock;
enum lov_device_flags {
- LOV_DEV_INITIALIZED = 1 << 0
+ LOV_DEV_INITIALIZED = BIT(0),
};
/*
* Upper half.
*/
-/**
- * Resources that are used in memory-cleaning path, and whose allocation
- * cannot fail even when memory is tight. They are preallocated in sufficient
- * quantities in lov_device::ld_emerg[], and access to them is serialized
- * lov_device::ld_mutex.
- */
-struct lov_device_emerg {
- /**
- * Page list used to submit IO when memory is in pressure.
- */
- struct cl_page_list emrg_page_list;
- /**
- * sub-io's shared by all threads accessing this device when memory is
- * too low to allocate sub-io's dynamically.
- */
- struct cl_io emrg_subio;
- /**
- * Environments used by sub-io's in
- * lov_device_emerg::emrg_subio.
- */
- struct lu_env *emrg_env;
- /**
- * Refchecks for lov_device_emerg::emrg_env.
- *
- * \see cl_env_get()
- */
- int emrg_refcheck;
+/* Data-on-MDT array item in lov_device::ld_md_tgts[] */
+struct lovdom_device {
+ struct cl_device *ldm_mdc;
+ int ldm_idx;
};
struct lov_device {
struct lovsub_device **ld_target;
__u32 ld_flags;
- /** Emergency resources used in memory-cleansing paths. */
- struct lov_device_emerg **ld_emrg;
- /**
- * Serializes access to lov_device::ld_emrg in low-memory
- * conditions.
- */
- struct mutex ld_mutex;
+ /* Data-on-MDT devices */
+ __u32 ld_md_tgts_nr;
+ struct lovdom_device *ld_md_tgts;
+ struct obd_device *ld_lmv;
+ /* LU site for subdevices */
+ struct lu_site ld_site;
};
/**
*/
enum lov_layout_type {
LLT_EMPTY, /** empty file without body (mknod + truncate) */
- LLT_RAID0, /** striped file */
LLT_RELEASED, /** file with no objects (data in HSM) */
+ LLT_COMP, /** support composite layout */
+ LLT_FOREIGN, /** foreign layout */
LLT_NR
};
switch (llt) {
case LLT_EMPTY:
return "EMPTY";
- case LLT_RAID0:
- return "RAID0";
case LLT_RELEASED:
return "RELEASED";
+ case LLT_COMP:
+ return "COMPOSITE";
+ case LLT_FOREIGN:
+ return "FOREIGN";
case LLT_NR:
LBUG();
}
}
/**
+ * Return lov_layout_entry_type associated with a given composite layout
+ * entry.
+ */
+static inline __u32 lov_entry_type(struct lov_stripe_md_entry *lsme)
+{
+ if ((lov_pattern(lsme->lsme_pattern) & LOV_PATTERN_RAID0) ||
+ (lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_MDT) ||
+ (lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_FOREIGN))
+ return lov_pattern(lsme->lsme_pattern &
+ ~LOV_PATTERN_OVERSTRIPING);
+ return 0;
+}
+
+struct lov_layout_entry;
+struct lov_object;
+struct lov_lock_sub;
+
+struct lov_comp_layout_entry_ops {
+ int (*lco_init)(const struct lu_env *env, struct lov_device *dev,
+ struct lov_object *lov, unsigned int index,
+ const struct cl_object_conf *conf,
+ struct lov_layout_entry *lle);
+ void (*lco_fini)(const struct lu_env *env,
+ struct lov_layout_entry *lle);
+ int (*lco_getattr)(const struct lu_env *env, struct lov_object *obj,
+ unsigned int index, struct lov_layout_entry *lle,
+ struct cl_attr **attr);
+};
+
+struct lov_layout_raid0 {
+ unsigned lo_nr;
+ /**
+ * record the stripe no before the truncate size, used for setting OST
+ * object size for truncate. LU-14128.
+ */
+ int lo_trunc_stripeno;
+ /**
+ * When this is true, lov_object::lo_attr contains
+ * valid up to date attributes for a top-level
+ * object. This field is reset to 0 when attributes of
+ * any sub-object change.
+ */
+ bool lo_attr_valid;
+ /**
+ * Array of sub-objects. Allocated when top-object is
+ * created (lov_init_raid0()).
+ *
+ * Top-object is a strict master of its sub-objects:
+ * it is created before them, and outlives its
+ * children (this later is necessary so that basic
+ * functions like cl_object_top() always
+ * work). Top-object keeps a reference on every
+ * sub-object.
+ *
+ * When top-object is destroyed (lov_delete_raid0())
+ * it releases its reference to a sub-object and waits
+ * until the latter is finally destroyed.
+ */
+ struct lovsub_object **lo_sub;
+ /**
+ * protect lo_sub
+ */
+ spinlock_t lo_sub_lock;
+ /**
+ * Cached object attribute, built from sub-object
+ * attributes.
+ */
+ struct cl_attr lo_attr;
+};
+
+struct lov_layout_dom {
+ /* keep this always at first place so DOM layout entry
+ * can be addressed also as RAID0 after initialization.
+ */
+ struct lov_layout_raid0 lo_dom_r0;
+ struct lovsub_object *lo_dom;
+ struct lov_oinfo *lo_loi;
+};
+
+struct lov_layout_entry {
+ __u32 lle_type;
+ unsigned int lle_valid:1;
+ unsigned int lle_preference;
+ struct lu_extent *lle_extent;
+ struct lov_stripe_md_entry *lle_lsme;
+ struct lov_comp_layout_entry_ops *lle_comp_ops;
+ union {
+ struct lov_layout_raid0 lle_raid0;
+ struct lov_layout_dom lle_dom;
+ };
+};
+
+struct lov_mirror_entry {
+ unsigned short lre_mirror_id;
+ unsigned short lre_stale:1, /* set if any components is stale */
+ lre_valid:1, /* set if at least one of components
+ * in this mirror is valid */
+ lre_foreign:1; /* set if it is a foreign component */
+ int lre_preference; /* overall preference of this mirror */
+
+ unsigned short lre_start; /* index to lo_entries, start index of
+ * this mirror */
+ unsigned short lre_end; /* end index of this mirror */
+};
+
+enum lov_object_flags {
+ /* Layout is invalid, set when layout lock is lost */
+ LO_LAYOUT_INVALID = 0x1,
+};
+
+/**
* lov-specific file state.
*
* lov object has particular layout type, determining how top-object is built
* function corresponding to the current layout type.
*/
struct lov_object {
- struct cl_object lo_cl;
- /**
- * Serializes object operations with transitions between layout types.
- *
- * This semaphore is taken in shared mode by all object methods, and
- * is taken in exclusive mode when object type is changed.
- *
- * \see lov_object::lo_type
- */
+ struct cl_object lo_cl;
+ /**
+ * Serializes object operations with transitions between layout types.
+ *
+ * This semaphore is taken in shared mode by all object methods, and
+ * is taken in exclusive mode when object type is changed.
+ *
+ * \see lov_object::lo_type
+ */
struct rw_semaphore lo_type_guard;
/**
* Type of an object. Protected by lov_object::lo_type_guard.
*/
enum lov_layout_type lo_type;
/**
- * True if layout is invalid. This bit is cleared when layout lock
- * is lost.
+ * Object flags.
*/
- bool lo_layout_invalid;
+ unsigned long lo_obj_flags;
/**
* How many IOs are on going on this object. Layout can be changed
* only if there is no active IO.
*/
- cfs_atomic_t lo_active_ios;
+ atomic_t lo_active_ios;
/**
* Waitq - wait for no one else is using lo_lsm
*/
*/
struct lov_stripe_md *lo_lsm;
- union lov_layout_state {
- struct lov_layout_raid0 {
- unsigned lo_nr;
- /**
- * When this is true, lov_object::lo_attr contains
- * valid up to date attributes for a top-level
- * object. This field is reset to 0 when attributes of
- * any sub-object change.
- */
- int lo_attr_valid;
- /**
- * Array of sub-objects. Allocated when top-object is
- * created (lov_init_raid0()).
- *
- * Top-object is a strict master of its sub-objects:
- * it is created before them, and outlives its
- * children (this later is necessary so that basic
- * functions like cl_object_top() always
- * work). Top-object keeps a reference on every
- * sub-object.
- *
- * When top-object is destroyed (lov_delete_raid0())
- * it releases its reference to a sub-object and waits
- * until the latter is finally destroyed.
- */
- struct lovsub_object **lo_sub;
- /**
- * protect lo_sub
- */
- spinlock_t lo_sub_lock;
- /**
- * Cached object attribute, built from sub-object
- * attributes.
- */
- struct cl_attr lo_attr;
- } raid0;
- struct lov_layout_state_empty {
- } empty;
+ union lov_layout_state {
+ struct lov_layout_state_empty {
+ } empty;
struct lov_layout_state_released {
} released;
- } u;
- /**
- * Thread that acquired lov_object::lo_type_guard in an exclusive
- * mode.
- */
- cfs_task_t *lo_owner;
+ struct lov_layout_composite {
+ /**
+ * flags of lov_comp_md_v1::lcm_flags. Mainly used
+ * by FLR.
+ */
+ uint32_t lo_flags;
+ /**
+ * For FLR: index of preferred mirror to read.
+ * Preferred mirror is initialized by the preferred
+ * bit of lsme. It can be changed when the preferred
+ * is inaccessible.
+ * In order to make lov_lsm_entry() return the same
+ * mirror in the same IO context, it's only possible
+ * to change the preferred mirror when the
+ * lo_active_ios reaches zero.
+ */
+ int lo_preferred_mirror;
+ /**
+ * For FLR: the lock to protect access to
+ * lo_preferred_mirror.
+ */
+ spinlock_t lo_write_lock;
+ /**
+ * For FLR: Number of (valid) mirrors.
+ */
+ unsigned lo_mirror_count;
+ struct lov_mirror_entry *lo_mirrors;
+ /**
+ * Current entry count of lo_entries, include
+ * invalid entries.
+ */
+ unsigned int lo_entry_count;
+ struct lov_layout_entry *lo_entries;
+ } composite;
+ } u;
+ /**
+ * Thread that acquired lov_object::lo_type_guard in an exclusive
+ * mode.
+ */
+ struct task_struct *lo_owner;
};
-/**
- * Flags that top-lock can set on each of its sub-locks.
- */
-enum lov_sub_flags {
- /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
- LSF_HELD = 1 << 0
-};
+static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov, int i)
+{
+ LASSERT(lov->lo_type == LLT_COMP);
+ LASSERTF(i < lov->u.composite.lo_entry_count,
+ "entry %d entry_count %d\n", i,
+ lov->u.composite.lo_entry_count);
+
+ return &lov->u.composite.lo_entries[i].lle_raid0;
+}
+
+static inline struct lov_stripe_md_entry *lov_lse(struct lov_object *lov, int i)
+{
+ LASSERT(lov->lo_lsm != NULL);
+ LASSERT(i < lov->lo_lsm->lsm_entry_count);
+
+ return lov->lo_lsm->lsm_entries[i];
+}
+
+static inline unsigned lov_flr_state(const struct lov_object *lov)
+{
+ if (lov->lo_type != LLT_COMP)
+ return LCM_FL_NONE;
+
+ return lov->u.composite.lo_flags & LCM_FL_FLR_MASK;
+}
+
+static inline bool lov_is_flr(const struct lov_object *lov)
+{
+ return lov_flr_state(lov) != LCM_FL_NONE;
+}
+
+static inline struct lov_layout_entry *lov_entry(struct lov_object *lov, int i)
+{
+ LASSERT(lov->lo_type == LLT_COMP);
+ LASSERTF(i < lov->u.composite.lo_entry_count,
+ "entry %d entry_count %d\n", i,
+ lov->u.composite.lo_entry_count);
+
+ return &lov->u.composite.lo_entries[i];
+}
+
+#define lov_for_layout_entry(lov, entry, start, end) \
+ for (entry = lov_entry(lov, start); \
+ entry <= lov_entry(lov, end); entry++)
+
+#define lov_foreach_layout_entry(lov, entry) \
+ lov_for_layout_entry(lov, entry, 0, \
+ (lov)->u.composite.lo_entry_count - 1)
+
+#define lov_foreach_mirror_layout_entry(lov, entry, lre) \
+ lov_for_layout_entry(lov, entry, (lre)->lre_start, (lre)->lre_end)
+
+static inline struct lov_mirror_entry *
+lov_mirror_entry(struct lov_object *lov, int i)
+{
+ LASSERT(i < lov->u.composite.lo_mirror_count);
+ return &lov->u.composite.lo_mirrors[i];
+}
+
+#define lov_foreach_mirror_entry(lov, lre) \
+ for (lre = lov_mirror_entry(lov, 0); \
+ lre <= lov_mirror_entry(lov, \
+ lov->u.composite.lo_mirror_count - 1); \
+ lre++)
+
+static inline unsigned
+lov_layout_entry_index(struct lov_object *lov, struct lov_layout_entry *entry)
+{
+ struct lov_layout_entry *first = &lov->u.composite.lo_entries[0];
+ unsigned index = (unsigned)(entry - first);
+
+ LASSERT(entry >= first);
+ LASSERT(index < lov->u.composite.lo_entry_count);
+
+ return index;
+}
/**
* State lov_lock keeps for each sub-lock.
*/
struct lov_lock_sub {
- /** sub-lock itself */
- struct lovsub_lock *sub_lock;
- /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
- unsigned sub_flags;
- int sub_stripe;
- struct cl_lock_descr sub_descr;
- struct cl_lock_descr sub_got;
+ /** sub-lock itself */
+ struct cl_lock sub_lock;
+ /** Set if the sublock has ever been enqueued, meaning it may
+ * hold resources of underlying layers */
+ unsigned int sub_is_enqueued:1,
+ sub_initialized:1;
+ int sub_index;
};
/**
* lov-specific lock state.
*/
struct lov_lock {
- struct cl_lock_slice lls_cl;
- /** Number of sub-locks in this lock */
- int lls_nr;
- /**
- * Number of existing sub-locks.
- */
- unsigned lls_nr_filled;
- /**
- * Set when sub-lock was canceled, while top-lock was being
- * used, or unused.
- */
- unsigned int lls_cancel_race:1;
- /**
- * An array of sub-locks
- *
- * There are two issues with managing sub-locks:
- *
- * - sub-locks are concurrently canceled, and
- *
- * - sub-locks are shared with other top-locks.
- *
- * To manage cancellation, top-lock acquires a hold on a sublock
- * (lov_sublock_adopt()) when the latter is inserted into
- * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
- * when top-lock is going into CLS_CACHED state or destroyed. Hold
- * prevents sub-lock from cancellation.
- *
- * Sub-lock sharing means, among other things, that top-lock that is
- * in the process of creation (i.e., not yet inserted into lock list)
- * is already accessible to other threads once at least one of its
- * sub-locks is created, see lov_lock_sub_init().
- *
- * Sub-lock can be in one of the following states:
- *
- * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
- * sub-lock was either never created (top-lock is in CLS_NEW
- * state), or it was created, then canceled, then destroyed
- * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
- *
- * - sub-lock exists and is on
- * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
- * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
- * of a top-lock.
- *
- * - sub-lock exists, but is not held by the top-lock. This
- * happens after top-lock released a hold on sub-locks before
- * going into cache (lov_lock_unuse()).
- *
- * \todo To support wide-striping, array has to be replaced with a set
- * of queues to avoid scanning.
- */
- struct lov_lock_sub *lls_sub;
- /**
- * Original description with which lock was enqueued.
- */
- struct cl_lock_descr lls_orig;
+ struct cl_lock_slice lls_cl;
+ /** Number of sub-locks in this lock */
+ int lls_nr;
+ /** sublock array */
+ struct lov_lock_sub lls_sub[0];
};
struct lov_page {
- struct cl_page_slice lps_cl;
- int lps_invalid;
+ struct cl_page_slice lps_cl;
};
/*
struct lovsub_device {
struct cl_device acid_cl;
- struct lov_device *acid_super;
- int acid_idx;
struct cl_device *acid_next;
};
};
/**
- * A link between a top-lock and a sub-lock. Separate data-structure is
- * necessary, because top-locks and sub-locks are in M:N relationship.
- *
- * \todo This can be optimized for a (by far) most frequent case of a single
- * top-lock per sub-lock.
- */
-struct lov_lock_link {
- struct lov_lock *lll_super;
- /** An index within parent lock. */
- int lll_idx;
- /**
- * A linkage into per sub-lock list of all corresponding top-locks,
- * hanging off lovsub_lock::lss_parents.
- */
- cfs_list_t lll_list;
-};
-
-/**
- * Lock state at lovsub layer.
- */
-struct lovsub_lock {
- struct cl_lock_slice lss_cl;
- /**
- * List of top-locks that have given sub-lock as their part. Protected
- * by cl_lock::cll_guard mutex.
- */
- cfs_list_t lss_parents;
- /**
- * Top-lock that initiated current operation on this sub-lock. This is
- * only set during top-to-bottom lock operations like enqueue, and is
- * used to optimize state change notification. Protected by
- * cl_lock::cll_guard mutex.
- *
- * \see lovsub_lock_state_one().
- */
- struct cl_lock *lss_active;
-};
-
-/**
* Describe the environment settings for sublocks.
*/
struct lov_sublock_env {
const struct lu_env *lse_env;
struct cl_io *lse_io;
- struct lov_io_sub *lse_sub;
};
-struct lovsub_page {
- struct cl_page_slice lsb_cl;
-};
-
-
struct lov_thread_info {
struct cl_object_conf lti_stripe_conf;
struct lu_fid lti_fid;
- struct cl_lock_descr lti_ldescr;
struct ost_lvb lti_lvb;
struct cl_2queue lti_cl2q;
- struct cl_lock_closure lti_closure;
- wait_queue_t lti_waiter;
+ struct cl_page_list lti_plist;
};
/**
* State that lov_io maintains for every sub-io.
*/
struct lov_io_sub {
- int sub_stripe;
- /**
- * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
- * independently, with lov acting as a scheduler to maximize overall
- * throughput.
- */
- struct cl_io *sub_io;
- /**
- * Linkage into a list (hanging off lov_io::lis_active) of all
- * sub-io's active for the current IO iteration.
- */
- cfs_list_t sub_linkage;
- /**
- * true, iff cl_io_init() was successfully executed against
- * lov_io_sub::sub_io.
- */
- int sub_io_initialized;
- /**
- * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't
- * allocated, but borrowed from a per-device emergency pool.
- */
- int sub_borrowed;
- /**
- * environment, in which sub-io executes.
- */
- struct lu_env *sub_env;
- /**
- * environment's refcheck.
- *
- * \see cl_env_get()
- */
- int sub_refcheck;
- int sub_refcheck2;
- int sub_reenter;
- void *sub_cookie;
+ /**
+ * Linkage into a list (hanging off lov_io::lis_subios)
+ */
+ struct list_head sub_list;
+ /**
+ * Linkage into a list (hanging off lov_io::lis_active) of all
+ * sub-io's active for the current IO iteration.
+ */
+ struct list_head sub_linkage;
+ unsigned int sub_subio_index;
+ /**
+ * sub-io for a stripe. Ideally sub-io's can be stopped and resumed
+ * independently, with lov acting as a scheduler to maximize overall
+ * throughput.
+ */
+ struct cl_io sub_io;
+ /**
+ * environment, in which sub-io executes.
+ */
+ struct lu_env *sub_env;
+ /**
+ * environment's refcheck.
+ *
+ * \see cl_env_get()
+ */
+ __u16 sub_refcheck;
};
/**
* IO state private for LOV.
*/
+#define LIS_CACHE_ENTRY_NONE -ENOENT
struct lov_io {
/** super-class */
struct cl_io_slice lis_cl;
+
+ /**
+ * FLR: index to lo_mirrors. Valid only if lov_is_flr() returns true.
+ *
+ * The mirror index of this io. Preserved over cl_io_init()
+ * if io->ci_ndelay_tried is greater than zero.
+ */
+ int lis_mirror_index;
+ /**
+ * FLR: the layout gen when lis_mirror_index was cached. The
+ * mirror index makes sense only when the layout gen doesn't
+ * change.
+ */
+ int lis_mirror_layout_gen;
+
+ /**
+ * fields below this will be initialized in lov_io_init().
+ */
+ unsigned lis_preserved;
+
/**
* Pointer to the object slice. This is a duplicate of
* lov_io::lis_cl::cis_object.
* starting position within a file, for the current io loop iteration
* (stripe), used by ci_io_loop().
*/
- obd_off lis_pos;
- /**
- * end position with in a file, for the current stripe io. This is
- * exclusive (i.e., next offset after last byte affected by io).
- */
- obd_off lis_endpos;
-
- int lis_mem_frozen;
- int lis_stripe_count;
- int lis_active_subios;
+ loff_t lis_pos;
+ /**
+ * end position with in a file, for the current stripe io. This is
+ * exclusive (i.e., next offset after last byte affected by io).
+ */
+ loff_t lis_endpos;
+ int lis_nr_subios;
- /**
- * the index of ls_single_subio in ls_subios array
- */
- int lis_single_subio_index;
- struct cl_io lis_single_subio;
+ /**
+ * the index of ls_single_subio in ls_subios array
+ */
+ int lis_single_subio_index;
+ struct lov_io_sub lis_single_subio;
- /**
- * size of ls_subios array, actually the highest stripe #
- */
- int lis_nr_subios;
- struct lov_io_sub *lis_subs;
- /**
- * List of active sub-io's.
- */
- cfs_list_t lis_active;
+ /**
+ * List of active sub-io's. Active sub-io's are under the range
+ * of [lis_pos, lis_endpos).
+ */
+ struct list_head lis_active;
+ /**
+ * All sub-io's created in this lov_io.
+ */
+ struct list_head lis_subios;
+ /* Cached results from stripe & offset calculations for page init */
+ int lis_cached_entry;
+ int lis_cached_stripe;
+ loff_t lis_cached_off;
+ loff_t lis_cached_suboff;
+ struct lov_io_sub *lis_cached_sub;
};
struct lov_session {
struct lov_sublock_env ls_subenv;
};
-/**
- * State of transfer for lov.
- */
-struct lov_req {
- struct cl_req_slice lr_cl;
-};
-
-/**
- * State of transfer for lovsub.
- */
-struct lovsub_req {
- struct cl_req_slice lsrq_cl;
-};
-
extern struct lu_device_type lov_device_type;
extern struct lu_device_type lovsub_device_type;
extern struct kmem_cache *lov_object_kmem;
extern struct kmem_cache *lov_thread_kmem;
extern struct kmem_cache *lov_session_kmem;
-extern struct kmem_cache *lov_req_kmem;
-extern struct kmem_cache *lovsub_lock_kmem;
extern struct kmem_cache *lovsub_object_kmem;
-extern struct kmem_cache *lovsub_req_kmem;
-
-extern struct kmem_cache *lov_lock_link_kmem;
-int lov_object_init (const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lovsub_object_init (const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lov_lock_init (const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init (const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-
-int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
+int lov_io_init_composite(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
-void lov_lock_unlink (const struct lu_env *env, struct lov_lock_link *link,
- struct lovsub_lock *sub);
struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
int stripe);
-void lov_sub_put (struct lov_io_sub *sub);
-int lov_sublock_modify (const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx);
-
-
-int lov_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-int lovsub_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-
-int lov_page_init_empty (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
-int lov_page_init_raid0 (const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+
+int lov_page_init_empty (const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, pgoff_t index);
+int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, pgoff_t index);
+int lov_page_init_foreign(const struct lu_env *env, struct cl_object *obj,
+ struct cl_page *page, pgoff_t index);
struct lu_object *lov_object_alloc (const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
+
struct lu_object *lovsub_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
-struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
- struct lov_lock *lck,
- struct lovsub_lock *sub);
-struct lov_io_sub *lov_page_subio (const struct lu_env *env,
- struct lov_io *lio,
- const struct cl_page_slice *slice);
-
-void lov_lsm_decref(struct lov_object *lov, struct lov_stripe_md *lsm);
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
+int lov_page_stripe(const struct cl_page *page);
+bool lov_page_is_empty(const struct cl_page *page);
+int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset);
+int lov_io_layout_at(struct lov_io *lio, __u64 offset);
#define lov_foreach_target(lov, var) \
for (var = 0; var < lov_targets_nr(lov); ++var)
+static inline struct lu_extent *lov_io_extent(struct lov_io *io, int i)
+{
+ return &lov_lse(io->lis_object, i)->lsme_extent;
+}
+
+/**
+ * For layout entries within @ext.
+ */
+#define lov_foreach_io_layout(ind, lio, ext) \
+ for (ind = lov_io_layout_at(lio, (ext)->e_start); \
+ ind >= 0 && \
+ lu_extent_is_overlapped(lov_io_extent(lio, ind), ext); \
+ ind = lov_io_layout_at(lio, lov_io_extent(lio, ind)->e_end))
+
/*****************************************************************************
*
* Type conversions.
static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
{
- LINVRNT(d->ld_type == &lov_device_type);
- return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
+ LINVRNT(d->ld_type == &lov_device_type);
+ return container_of(d, struct lov_device, ld_cl.cd_lu_dev);
}
static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
{
- LINVRNT(d->ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
+ LINVRNT(d->ld_type == &lovsub_device_type);
+ return container_of(d, struct lovsub_device, acid_cl.cd_lu_dev);
}
static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
{
- LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl);
+ LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
+ return container_of(d, struct lovsub_device, acid_cl);
}
static inline struct lu_object *lov2lu(struct lov_object *lov)
static inline struct lov_object *lu2lov(const struct lu_object *obj)
{
- LINVRNT(lov_is_object(obj));
- return container_of0(obj, struct lov_object, lo_cl.co_lu);
+ LINVRNT(lov_is_object(obj));
+ return container_of(obj, struct lov_object, lo_cl.co_lu);
}
static inline struct lov_object *cl2lov(const struct cl_object *obj)
{
- LINVRNT(lov_is_object(&obj->co_lu));
- return container_of0(obj, struct lov_object, lo_cl);
+ LINVRNT(lov_is_object(&obj->co_lu));
+ return container_of(obj, struct lov_object, lo_cl);
}
static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
{
- return &los->lso_cl.co_lu;
+ return &los->lso_cl.co_lu;
}
static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
{
- return &los->lso_cl;
+ return &los->lso_cl;
}
static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
{
- LINVRNT(lovsub_is_object(&obj->co_lu));
- return container_of0(obj, struct lovsub_object, lso_cl);
+ LINVRNT(lovsub_is_object(&obj->co_lu));
+ return container_of(obj, struct lovsub_object, lso_cl);
}
static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
{
- LINVRNT(lovsub_is_object(obj));
- return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
-}
-
-static inline struct lovsub_lock *
-cl2lovsub_lock(const struct cl_lock_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lovsub_lock, lss_cl);
-}
-
-static inline struct lovsub_lock *cl2sub_lock(const struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
-
- slice = cl_lock_at(lock, &lovsub_device_type);
- LASSERT(slice != NULL);
- return cl2lovsub_lock(slice);
+ LINVRNT(lovsub_is_object(obj));
+ return container_of(obj, struct lovsub_object, lso_cl.co_lu);
}
static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
{
- LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lov_lock, lls_cl);
+ LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
+ return container_of(slice, struct lov_lock, lls_cl);
}
static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
{
- LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lov_page, lps_cl);
-}
-
-static inline struct lov_req *cl2lov_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lov_req, lr_cl);
-}
-
-static inline struct lovsub_page *
-cl2lovsub_page(const struct cl_page_slice *slice)
-{
- LINVRNT(lovsub_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lovsub_page, lsb_cl);
-}
-
-static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct lovsub_req, lsrq_cl);
-}
-
-static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
-{
- return slice->cpl_page->cp_child;
+ LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
+ return container_of(slice, struct lov_page, lps_cl);
}
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
return info;
}
-static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov)
-{
- LASSERT(lov->lo_type == LLT_RAID0);
- LASSERT(lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC ||
- lov->lo_lsm->lsm_wire.lw_magic == LOV_MAGIC_V3);
- return &lov->u.raid0;
-}
+/* lov_pack.c */
+int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
+ struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
+ size_t size);
/** @} lov */
#endif
-