X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Flov%2Flov_cl_internal.h;h=f97747d9f001365b14863871e0ef5e5354ca2076;hb=8b352709a66f9079cadeda2e9af3834941ced969;hp=e93132ad21e29d38dfa13653cbfd20d8361fb447;hpb=72057a3af19ee02d9a686bd7e7d074917e381310;p=fs%2Flustre-release.git diff --git a/lustre/lov/lov_cl_internal.h b/lustre/lov/lov_cl_internal.h index e93132a..f97747d 100644 --- a/lustre/lov/lov_cl_internal.h +++ b/lustre/lov/lov_cl_internal.h @@ -23,7 +23,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, 2015, Intel Corporation. + * Copyright (c) 2012, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -91,6 +91,12 @@ enum lov_device_flags { * Upper half. */ +/* Data-on-MDT array item in lov_device::ld_md_tgts[] */ +struct lovdom_device { + struct cl_device *ldm_mdc; + int ldm_idx; +}; + struct lov_device { /* * XXX Locking of lov-private data is missing. @@ -101,6 +107,13 @@ struct lov_device { __u32 ld_target_nr; struct lovsub_device **ld_target; __u32 ld_flags; + + /* Data-on-MDT devices */ + __u32 ld_md_tgts_nr; + struct lovdom_device *ld_md_tgts; + struct obd_device *ld_lmv; + /* LU site for subdevices */ + struct lu_site ld_site; }; /** @@ -108,8 +121,8 @@ struct lov_device { */ enum lov_layout_type { LLT_EMPTY, /** empty file without body (mknod + truncate) */ - LLT_RAID0, /** striped file */ LLT_RELEASED, /** file with no objects (data in HSM) */ + LLT_COMP, /** support composite layout */ LLT_NR }; @@ -118,10 +131,10 @@ static inline char *llt2str(enum lov_layout_type llt) switch (llt) { case LLT_EMPTY: return "EMPTY"; - case LLT_RAID0: - return "RAID0"; case LLT_RELEASED: return "RELEASED"; + case LLT_COMP: + return "COMPOSITE"; case LLT_NR: LBUG(); } @@ -130,6 +143,89 @@ static inline char *llt2str(enum lov_layout_type llt) } /** + * Return lov_layout_entry_type associated with a given composite layout + * entry. + */ +static inline __u32 lov_entry_type(struct lov_stripe_md_entry *lsme) +{ + if ((lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_RAID0) || + (lov_pattern(lsme->lsme_pattern) == LOV_PATTERN_MDT)) + return lov_pattern(lsme->lsme_pattern); + return 0; +} + +struct lov_layout_entry; +struct lov_object; +struct lov_lock_sub; + +struct lov_comp_layout_entry_ops { + int (*lco_init)(const struct lu_env *env, struct lov_device *dev, + struct lov_object *lov, unsigned int index, + const struct cl_object_conf *conf, + struct lov_layout_entry *lle); + void (*lco_fini)(const struct lu_env *env, + struct lov_layout_entry *lle); + int (*lco_getattr)(const struct lu_env *env, struct lov_object *obj, + unsigned int index, struct lov_layout_entry *lle, + struct cl_attr **attr); +}; + +struct lov_layout_raid0 { + unsigned lo_nr; + /** + * When this is true, lov_object::lo_attr contains + * valid up to date attributes for a top-level + * object. This field is reset to 0 when attributes of + * any sub-object change. + */ + int lo_attr_valid; + /** + * Array of sub-objects. Allocated when top-object is + * created (lov_init_raid0()). + * + * Top-object is a strict master of its sub-objects: + * it is created before them, and outlives its + * children (this later is necessary so that basic + * functions like cl_object_top() always + * work). Top-object keeps a reference on every + * sub-object. + * + * When top-object is destroyed (lov_delete_raid0()) + * it releases its reference to a sub-object and waits + * until the latter is finally destroyed. + */ + struct lovsub_object **lo_sub; + /** + * protect lo_sub + */ + spinlock_t lo_sub_lock; + /** + * Cached object attribute, built from sub-object + * attributes. + */ + struct cl_attr lo_attr; +}; + +struct lov_layout_dom { + /* keep this always at first place so DOM layout entry + * can be addressed also as RAID0 after initialization. + */ + struct lov_layout_raid0 lo_dom_r0; + struct lovsub_object *lo_dom; + struct lov_oinfo *lo_loi; +}; + +struct lov_layout_entry { + __u32 lle_type; + struct lu_extent lle_extent; + struct lov_comp_layout_entry_ops *lle_comp_ops; + union { + struct lov_layout_raid0 lle_raid0; + struct lov_layout_dom lle_dom; + }; +}; + +/** * lov-specific file state. * * lov object has particular layout type, determining how top-object is built @@ -144,7 +240,7 @@ static inline char *llt2str(enum lov_layout_type llt) * function corresponding to the current layout type. */ struct lov_object { - struct cl_object lo_cl; + struct cl_object lo_cl; /** * Serializes object operations with transitions between layout types. * @@ -178,47 +274,17 @@ struct lov_object { struct lov_stripe_md *lo_lsm; union lov_layout_state { - struct lov_layout_raid0 { - unsigned lo_nr; - /** - * When this is true, lov_object::lo_attr contains - * valid up to date attributes for a top-level - * object. This field is reset to 0 when attributes of - * any sub-object change. - */ - int lo_attr_valid; - /** - * Array of sub-objects. Allocated when top-object is - * created (lov_init_raid0()). - * - * Top-object is a strict master of its sub-objects: - * it is created before them, and outlives its - * children (this later is necessary so that basic - * functions like cl_object_top() always - * work). Top-object keeps a reference on every - * sub-object. - * - * When top-object is destroyed (lov_delete_raid0()) - * it releases its reference to a sub-object and waits - * until the latter is finally destroyed. - * - * May be vmalloc'd, must be freed with OBD_FREE_LARGE. - */ - struct lovsub_object **lo_sub; - /** - * protect lo_sub - */ - spinlock_t lo_sub_lock; - /** - * Cached object attribute, built from sub-object - * attributes. - */ - struct cl_attr lo_attr; - } raid0; struct lov_layout_state_empty { } empty; struct lov_layout_state_released { } released; + struct lov_layout_composite { + /** + * Current valid entry count of entries. + */ + unsigned int lo_entry_count; + struct lov_layout_entry *lo_entries; + } composite; } u; /** * Thread that acquired lov_object::lo_type_guard in an exclusive @@ -227,6 +293,12 @@ struct lov_object { struct task_struct *lo_owner; }; +#define lov_foreach_layout_entry(lov, entry) \ + for (entry = &lov->u.composite.lo_entries[0]; \ + entry < &lov->u.composite.lo_entries \ + [lov->u.composite.lo_entry_count]; \ + entry++) + /** * State lov_lock keeps for each sub-lock. */ @@ -237,7 +309,7 @@ struct lov_lock_sub { * hold resources of underlying layers */ unsigned int sub_is_enqueued:1, sub_initialized:1; - int sub_stripe; + int sub_index; }; /** @@ -253,7 +325,8 @@ struct lov_lock { struct lov_page { struct cl_page_slice lps_cl; - unsigned int lps_stripe; /* stripe index */ + /** layout_entry + stripe index, composed using lov_comp_index() */ + unsigned int lps_index; }; /* @@ -305,38 +378,33 @@ struct lov_thread_info { * State that lov_io maintains for every sub-io. */ struct lov_io_sub { - __u16 sub_stripe; /** - * environment's refcheck. - * - * \see cl_env_get() + * Linkage into a list (hanging off lov_io::lis_subios) */ - __u16 sub_refcheck; - /** - * true, iff cl_io_init() was successfully executed against - * lov_io_sub::sub_io. - */ - __u16 sub_io_initialized:1, - /** - * True, iff lov_io_sub::sub_io and lov_io_sub::sub_env weren't - * allocated, but borrowed from a per-device emergency pool. - */ - sub_borrowed:1; + struct list_head sub_list; /** * Linkage into a list (hanging off lov_io::lis_active) of all * sub-io's active for the current IO iteration. */ struct list_head sub_linkage; + unsigned int sub_subio_index; /** * sub-io for a stripe. Ideally sub-io's can be stopped and resumed * independently, with lov acting as a scheduler to maximize overall * throughput. */ - struct cl_io *sub_io; + struct cl_io sub_io; /** * environment, in which sub-io executes. */ struct lu_env *sub_env; + /** + * environment's refcheck. + * + * \see cl_env_get() + */ + __u16 sub_refcheck; + __u16 sub_reenter; }; /** @@ -364,32 +432,29 @@ struct lov_io { * starting position within a file, for the current io loop iteration * (stripe), used by ci_io_loop(). */ - loff_t lis_pos; + loff_t lis_pos; /** * end position with in a file, for the current stripe io. This is * exclusive (i.e., next offset after last byte affected by io). */ - loff_t lis_endpos; - - int lis_stripe_count; - int lis_active_subios; + loff_t lis_endpos; + int lis_nr_subios; /** * the index of ls_single_subio in ls_subios array */ int lis_single_subio_index; - struct cl_io lis_single_subio; + struct lov_io_sub lis_single_subio; /** - * size of ls_subios array, actually the highest stripe # - * May be vmalloc'd, must be freed with OBD_FREE_LARGE(). + * List of active sub-io's. Active sub-io's are under the range + * of [lis_pos, lis_endpos). */ - int lis_nr_subios; - struct lov_io_sub *lis_subs; + struct list_head lis_active; /** - * List of active sub-io's. + * All sub-io's created in this lov_io. */ - struct list_head lis_active; + struct list_head lis_subios; }; struct lov_session { @@ -422,11 +487,11 @@ int lov_io_init (const struct lu_env *env, struct cl_object *obj, int lovsub_lock_init (const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); -int lov_lock_init_raid0 (const struct lu_env *env, struct cl_object *obj, +int lov_lock_init_composite(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); int lov_lock_init_empty (const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); -int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj, +int lov_io_init_composite(const struct lu_env *env, struct cl_object *obj, struct cl_io *io); int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj, struct cl_io *io); @@ -442,7 +507,7 @@ int lovsub_page_init (const struct lu_env *env, struct cl_object *ob, struct cl_page *page, pgoff_t index); int lov_page_init_empty (const struct lu_env *env, struct cl_object *obj, struct cl_page *page, pgoff_t index); -int lov_page_init_raid0 (const struct lu_env *env, struct cl_object *obj, +int lov_page_init_composite(const struct lu_env *env, struct cl_object *obj, struct cl_page *page, pgoff_t index); struct lu_object *lov_object_alloc (const struct lu_env *env, const struct lu_object_header *hdr, @@ -453,6 +518,7 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov); int lov_page_stripe(const struct cl_page *page); +int lov_lsm_entry(const struct lov_stripe_md *lsm, __u64 offset); #define lov_foreach_target(lov, var) \ for (var = 0; var < lov_targets_nr(lov); ++var) @@ -625,17 +691,36 @@ static inline struct lov_thread_info *lov_env_info(const struct lu_env *env) return info; } -static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov) +static inline struct lov_layout_entry *lov_entry(struct lov_object *lov, int i) +{ + LASSERT(lov->lo_type == LLT_COMP); + LASSERTF(i < lov->u.composite.lo_entry_count, + "entry %d entry_count %d", i, lov->u.composite.lo_entry_count); + + return &lov->u.composite.lo_entries[i]; +} + +static inline struct lov_layout_raid0 *lov_r0(struct lov_object *lov, int i) +{ + LASSERT(lov->lo_type == LLT_COMP); + LASSERTF(i < lov->u.composite.lo_entry_count, + "entry %d entry_count %d", i, lov->u.composite.lo_entry_count); + + return &lov->u.composite.lo_entries[i].lle_raid0; +} + +static inline struct lov_stripe_md_entry *lov_lse(struct lov_object *lov, int i) { - LASSERT(lov->lo_type == LLT_RAID0); - LASSERT(lov->lo_lsm->lsm_magic == LOV_MAGIC || - lov->lo_lsm->lsm_magic == LOV_MAGIC_V3); - return &lov->u.raid0; + LASSERT(lov->lo_lsm != NULL); + LASSERT(i < lov->lo_lsm->lsm_entry_count); + + return lov->lo_lsm->lsm_entries[i]; } /* lov_pack.c */ -int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, - struct lov_user_md __user *lump); +int lov_getstripe(const struct lu_env *env, struct lov_object *obj, + struct lov_stripe_md *lsm, struct lov_user_md __user *lump, + size_t size); /** @} lov */