*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* Internal interfaces of LOV layer.
*
struct lovsub_object;
enum lov_device_flags {
- LOV_DEV_INITIALIZED = 1 << 0
+ LOV_DEV_INITIALIZED = BIT(0),
};
/*
static inline __u32 lov_entry_type(struct lov_stripe_md_entry *lsme)
{
if ((lov_pattern(lsme->lsme_pattern) & LOV_PATTERN_RAID0) ||
- (lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_MDT))
+ (lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_MDT) ||
+ (lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_FOREIGN))
return lov_pattern(lsme->lsme_pattern &
~LOV_PATTERN_OVERSTRIPING);
return 0;
struct lov_layout_raid0 {
unsigned lo_nr;
/**
+ * record the stripe no before the truncate size, used for setting OST
+ * object size for truncate. LU-14128.
+ */
+ int lo_trunc_stripeno;
+ /**
* When this is true, lov_object::lo_attr contains
* valid up to date attributes for a top-level
* object. This field is reset to 0 when attributes of
struct lov_layout_entry {
__u32 lle_type;
unsigned int lle_valid:1;
+ unsigned int lle_preference;
struct lu_extent *lle_extent;
struct lov_stripe_md_entry *lle_lsme;
struct lov_comp_layout_entry_ops *lle_comp_ops;
struct lov_mirror_entry {
unsigned short lre_mirror_id;
- unsigned short lre_preferred:1,
- lre_stale:1, /* set if any components is stale */
- lre_valid:1; /* set if at least one of components
+ unsigned short lre_stale:1, /* set if any components is stale */
+ lre_valid:1, /* set if at least one of components
* in this mirror is valid */
+ lre_foreign:1; /* set if it is a foreign component */
+ int lre_preference; /* overall preference of this mirror */
+
unsigned short lre_start; /* index to lo_entries, start index of
* this mirror */
unsigned short lre_end; /* end index of this mirror */
};
+enum lov_object_flags {
+ /* Layout is invalid, set when layout lock is lost */
+ LO_LAYOUT_INVALID = 0x1,
+};
+
/**
* lov-specific file state.
*
*/
enum lov_layout_type lo_type;
/**
- * True if layout is invalid. This bit is cleared when layout lock
- * is lost.
+ * Object flags.
*/
- bool lo_layout_invalid;
+ unsigned long lo_obj_flags;
/**
* How many IOs are on going on this object. Layout can be changed
* only if there is no active IO.
{
LASSERT(lov->lo_type == LLT_COMP);
LASSERTF(i < lov->u.composite.lo_entry_count,
- "entry %d entry_count %d", i, lov->u.composite.lo_entry_count);
+ "entry %d entry_count %d\n", i,
+ lov->u.composite.lo_entry_count);
return &lov->u.composite.lo_entries[i].lle_raid0;
}
{
LASSERT(lov->lo_type == LLT_COMP);
LASSERTF(i < lov->u.composite.lo_entry_count,
- "entry %d entry_count %d", i, lov->u.composite.lo_entry_count);
+ "entry %d entry_count %d\n", i,
+ lov->u.composite.lo_entry_count);
return &lov->u.composite.lo_entries[i];
}
struct lov_page {
struct cl_page_slice lps_cl;
- /** layout_entry + stripe index, composed using lov_comp_index() */
- unsigned int lps_index;
- /* the layout gen when this page was created */
- __u32 lps_layout_gen;
};
/*
struct ost_lvb lti_lvb;
struct cl_2queue lti_cl2q;
struct cl_page_list lti_plist;
- wait_queue_entry_t lti_waiter;
};
/**
* \see cl_env_get()
*/
__u16 sub_refcheck;
- __u16 sub_reenter;
};
/**
* IO state private for LOV.
*/
+#define LIS_CACHE_ENTRY_NONE -ENOENT
struct lov_io {
/** super-class */
struct cl_io_slice lis_cl;
* All sub-io's created in this lov_io.
*/
struct list_head lis_subios;
-
+ /* Cached results from stripe & offset calculations for page init */
+ int lis_cached_entry;
+ int lis_cached_stripe;
+ loff_t lis_cached_off;
+ loff_t lis_cached_suboff;
+ struct lov_io_sub *lis_cached_sub;
};
struct lov_session {
extern struct kmem_cache *lovsub_object_kmem;
-int lov_object_init (const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lovsub_object_init (const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-int lov_lock_init (const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io);
-int lov_io_init (const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io);
-
int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io);
int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj,
struct lov_io_sub *lov_sub_get(const struct lu_env *env, struct lov_io *lio,
int stripe);
-int lov_page_init (const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, pgoff_t index);
int lov_page_init_empty (const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj,
const struct lu_object_header *hdr,
struct lu_device *dev);
-struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov);
int lov_page_stripe(const struct cl_page *page);
bool lov_page_is_empty(const struct cl_page *page);
int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset);
static inline struct lov_device *lu2lov_dev(const struct lu_device *d)
{
- LINVRNT(d->ld_type == &lov_device_type);
- return container_of0(d, struct lov_device, ld_cl.cd_lu_dev);
+ LINVRNT(d->ld_type == &lov_device_type);
+ return container_of(d, struct lov_device, ld_cl.cd_lu_dev);
}
static inline struct cl_device *lovsub2cl_dev(struct lovsub_device *lovsub)
static inline struct lovsub_device *lu2lovsub_dev(const struct lu_device *d)
{
- LINVRNT(d->ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl.cd_lu_dev);
+ LINVRNT(d->ld_type == &lovsub_device_type);
+ return container_of(d, struct lovsub_device, acid_cl.cd_lu_dev);
}
static inline struct lovsub_device *cl2lovsub_dev(const struct cl_device *d)
{
- LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
- return container_of0(d, struct lovsub_device, acid_cl);
+ LINVRNT(d->cd_lu_dev.ld_type == &lovsub_device_type);
+ return container_of(d, struct lovsub_device, acid_cl);
}
static inline struct lu_object *lov2lu(struct lov_object *lov)
static inline struct lov_object *lu2lov(const struct lu_object *obj)
{
- LINVRNT(lov_is_object(obj));
- return container_of0(obj, struct lov_object, lo_cl.co_lu);
+ LINVRNT(lov_is_object(obj));
+ return container_of(obj, struct lov_object, lo_cl.co_lu);
}
static inline struct lov_object *cl2lov(const struct cl_object *obj)
{
- LINVRNT(lov_is_object(&obj->co_lu));
- return container_of0(obj, struct lov_object, lo_cl);
+ LINVRNT(lov_is_object(&obj->co_lu));
+ return container_of(obj, struct lov_object, lo_cl);
}
static inline struct lu_object *lovsub2lu(struct lovsub_object *los)
{
- return &los->lso_cl.co_lu;
+ return &los->lso_cl.co_lu;
}
static inline struct cl_object *lovsub2cl(struct lovsub_object *los)
{
- return &los->lso_cl;
+ return &los->lso_cl;
}
static inline struct lovsub_object *cl2lovsub(const struct cl_object *obj)
{
- LINVRNT(lovsub_is_object(&obj->co_lu));
- return container_of0(obj, struct lovsub_object, lso_cl);
+ LINVRNT(lovsub_is_object(&obj->co_lu));
+ return container_of(obj, struct lovsub_object, lso_cl);
}
static inline struct lovsub_object *lu2lovsub(const struct lu_object *obj)
{
- LINVRNT(lovsub_is_object(obj));
- return container_of0(obj, struct lovsub_object, lso_cl.co_lu);
+ LINVRNT(lovsub_is_object(obj));
+ return container_of(obj, struct lovsub_object, lso_cl.co_lu);
}
static inline struct lov_lock *cl2lov_lock(const struct cl_lock_slice *slice)
{
- LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
- return container_of(slice, struct lov_lock, lls_cl);
+ LINVRNT(lov_is_object(&slice->cls_obj->co_lu));
+ return container_of(slice, struct lov_lock, lls_cl);
}
static inline struct lov_page *cl2lov_page(const struct cl_page_slice *slice)
{
- LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
- return container_of0(slice, struct lov_page, lps_cl);
+ LINVRNT(lov_is_object(&slice->cpl_obj->co_lu));
+ return container_of(slice, struct lov_page, lps_cl);
}
static inline struct lov_io *cl2lov_io(const struct lu_env *env,