X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=1f8641d8f9e647c37ccb6b09adab1898ab653378;hp=8b835a5f684aaf700cc6103f88e5a3abee2ddd60;hb=1ecf5bd5a3bece3d615d037a332792e360b49a09;hpb=519a65ddc04673022124f421e4809f8a87f790d7 diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index 8b835a5..1f8641d 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -91,14 +91,11 @@ #ifndef _LUSTRE_IDL_H_ #define _LUSTRE_IDL_H_ -#if !defined(LPU64) #include /* for LPUX64, etc */ -#endif - -/* Defn's shared with user-space. */ -#include - +#include +#include /* Defn's shared with user-space. */ #include +#include /* * GENERAL STUFF @@ -150,10 +147,7 @@ #define PTL_RPC_MSG_REPLY 4713 /* DON'T use swabbed values of MAGIC as magic! */ -#define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3 - -#define LUSTRE_MSG_MAGIC_V1_SWABBED 0xD00BD00B #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2 @@ -167,22 +161,6 @@ #define LUSTRE_LOG_VERSION 0x00050000 #define LUSTRE_MGS_VERSION 0x00060000 -typedef __u32 mdsno_t; -typedef __u64 seqno_t; -typedef __u64 obd_id; -typedef __u64 obd_seq; -typedef __s64 obd_time; -typedef __u64 obd_size; -typedef __u64 obd_off; -typedef __u64 obd_blocks; -typedef __u64 obd_valid; -typedef __u32 obd_blksize; -typedef __u32 obd_mode; -typedef __u32 obd_uid; -typedef __u32 obd_gid; -typedef __u32 obd_flag; -typedef __u32 obd_count; - /** * Describes a range of sequence, lsr_start is included but lsr_end is * not in the range. @@ -208,114 +186,6 @@ struct lu_seq_range_array { #define LU_SEQ_RANGE_MASK 0x3 -static inline unsigned fld_range_type(const struct lu_seq_range *range) -{ - return range->lsr_flags & LU_SEQ_RANGE_MASK; -} - -static inline int fld_range_is_ost(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_OST; -} - -static inline int fld_range_is_mdt(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_MDT; -} - -/** - * This all range is only being used when fld client sends fld query request, - * but it does not know whether the seq is MDT or OST, so it will send req - * with ALL type, which means either seq type gotten from lookup can be - * expected. - */ -static inline unsigned fld_range_is_any(const struct lu_seq_range *range) -{ - return fld_range_type(range) == LU_SEQ_RANGE_ANY; -} - -static inline void fld_range_set_type(struct lu_seq_range *range, - unsigned flags) -{ - range->lsr_flags |= flags; -} - -static inline void fld_range_set_mdt(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_MDT); -} - -static inline void fld_range_set_ost(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_OST); -} - -static inline void fld_range_set_any(struct lu_seq_range *range) -{ - fld_range_set_type(range, LU_SEQ_RANGE_ANY); -} - -/** - * returns width of given range \a r - */ - -static inline __u64 range_space(const struct lu_seq_range *range) -{ - return range->lsr_end - range->lsr_start; -} - -/** - * initialize range to zero - */ - -static inline void range_init(struct lu_seq_range *range) -{ - memset(range, 0, sizeof(*range)); -} - -/** - * check if given seq id \a s is within given range \a r - */ - -static inline int range_within(const struct lu_seq_range *range, - __u64 s) -{ - return s >= range->lsr_start && s < range->lsr_end; -} - -static inline int range_is_sane(const struct lu_seq_range *range) -{ - return (range->lsr_end >= range->lsr_start); -} - -static inline int range_is_zero(const struct lu_seq_range *range) -{ - return (range->lsr_start == 0 && range->lsr_end == 0); -} - -static inline int range_is_exhausted(const struct lu_seq_range *range) - -{ - return range_space(range) == 0; -} - -/* return 0 if two range have the same location */ -static inline int range_compare_loc(const struct lu_seq_range *r1, - const struct lu_seq_range *r2) -{ - return r1->lsr_index != r2->lsr_index || - r1->lsr_flags != r2->lsr_flags; -} - -#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s" - -#define PRANGE(range) \ - (range)->lsr_start, \ - (range)->lsr_end, \ - (range)->lsr_index, \ - fld_range_is_mdt(range) ? "mdt" : "ost" - - /** \defgroup lu_fid lu_fid * @{ */ @@ -326,7 +196,7 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, */ enum lma_compat { LMAC_HSM = 0x00000001, - LMAC_SOM = 0x00000002, +/* LMAC_SOM = 0x00000002, obsolete since 2.8.0 */ LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is * under /O//d. */ @@ -342,36 +212,21 @@ enum lma_incompat { LMAI_AGENT = 0x00000002, /* agent inode */ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object is on the remote MDT */ + LMAI_STRIPED = 0x00000008, /* striped directory inode */ }; -#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) +#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT | LMAI_STRIPED) extern void lustre_lma_swab(struct lustre_mdt_attrs *lma); extern void lustre_lma_init(struct lustre_mdt_attrs *lma, const struct lu_fid *fid, __u32 compat, __u32 incompat); -/** - * SOM on-disk attributes stored in a separate xattr. - */ -struct som_attrs { - /** Bitfield for supported data in this structure. For future use. */ - __u32 som_compat; - - /** Incompat feature list. The supported feature mask is availabe in - * SOM_INCOMPAT_SUPP */ - __u32 som_incompat; - /** IO Epoch SOM attributes belongs to */ - __u64 som_ioepoch; - /** total file size in objects */ - __u64 som_size; - /** total fs blocks in objects */ - __u64 som_blocks; - /** mds mount id the size is valid for */ - __u64 som_mountid; -}; -extern void lustre_som_swab(struct som_attrs *attrs); - -#define SOM_INCOMPAT_SUPP 0x0 +/* copytool uses a 32b bitmask field to encode archive-Ids during register + * with MDT thru kuc. + * archive num = 0 => all + * archive num from 1 to 32 + */ +#define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8) /** * HSM on-disk attributes stored in a separate xattr. @@ -422,7 +277,7 @@ static inline void fid_zero(struct lu_fid *fid) memset(fid, 0, sizeof(*fid)); } -static inline obd_id fid_ver_oid(const struct lu_fid *fid) +static inline __u64 fid_ver_oid(const struct lu_fid *fid) { return ((__u64)fid_ver(fid) << 32 | fid_oid(fid)); } @@ -439,8 +294,8 @@ enum fid_seq { FID_SEQ_OST_MDT0 = 0, FID_SEQ_LLOG = 1, /* unnamed llogs */ FID_SEQ_ECHO = 2, - FID_SEQ_OST_MDT1 = 3, - FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ + FID_SEQ_UNUSED_START = 3, + FID_SEQ_UNUSED_END = 9, FID_SEQ_LLOG_NAME = 10, /* named llogs */ FID_SEQ_RSVD = 11, FID_SEQ_IGIF = 12, @@ -463,6 +318,12 @@ enum fid_seq { FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */ + FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL, + /* sequence is used for update logs of cross-MDT operation */ + FID_SEQ_UPDATE_LOG = 0x200000009ULL, + /* Sequence is used for the directory under which update logs + * are created. */ + FID_SEQ_UPDATE_LOG_DIR = 0x20000000aULL, FID_SEQ_NORMAL = 0x200000400ULL, FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL }; @@ -482,142 +343,195 @@ enum special_oid { /** OID for FID_SEQ_DOT_LUSTRE */ enum dot_lustre_oid { - FID_OID_DOT_LUSTRE = 1UL, - FID_OID_DOT_LUSTRE_OBF = 2UL, + FID_OID_DOT_LUSTRE = 1UL, + FID_OID_DOT_LUSTRE_OBF = 2UL, + FID_OID_DOT_LUSTRE_LPF = 3UL, }; -static inline int fid_seq_is_mdt0(obd_seq seq) +/** OID for FID_SEQ_ROOT */ +enum root_oid { + FID_OID_ROOT = 1UL, + FID_OID_ECHO_ROOT = 2UL, +}; + +static inline bool fid_seq_is_mdt0(__u64 seq) { - return (seq == FID_SEQ_OST_MDT0); + return seq == FID_SEQ_OST_MDT0; } -static inline int fid_seq_is_mdt(const __u64 seq) +static inline bool fid_seq_is_mdt(__u64 seq) { return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL; }; -static inline int fid_seq_is_echo(obd_seq seq) +static inline bool fid_seq_is_echo(__u64 seq) { - return (seq == FID_SEQ_ECHO); + return seq == FID_SEQ_ECHO; } -static inline int fid_is_echo(const struct lu_fid *fid) +static inline bool fid_is_echo(const struct lu_fid *fid) { return fid_seq_is_echo(fid_seq(fid)); } -static inline int fid_seq_is_llog(obd_seq seq) +static inline bool fid_seq_is_llog(__u64 seq) { - return (seq == FID_SEQ_LLOG); + return seq == FID_SEQ_LLOG; } -static inline int fid_is_llog(const struct lu_fid *fid) +static inline bool fid_is_llog(const struct lu_fid *fid) { /* file with OID == 0 is not llog but contains last oid */ return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0; } -static inline int fid_seq_is_rsvd(const __u64 seq) +static inline bool fid_seq_is_rsvd(__u64 seq) { - return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD); + return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD; }; -static inline int fid_seq_is_special(const __u64 seq) +static inline bool fid_seq_is_special(__u64 seq) { return seq == FID_SEQ_SPECIAL; }; -static inline int fid_seq_is_local_file(const __u64 seq) +static inline bool fid_seq_is_local_file(__u64 seq) { return seq == FID_SEQ_LOCAL_FILE || seq == FID_SEQ_LOCAL_NAME; }; -static inline int fid_seq_is_root(const __u64 seq) +static inline bool fid_seq_is_root(__u64 seq) { return seq == FID_SEQ_ROOT; } -static inline int fid_seq_is_dot(const __u64 seq) +static inline bool fid_seq_is_dot(__u64 seq) { return seq == FID_SEQ_DOT_LUSTRE; } -static inline int fid_seq_is_default(const __u64 seq) +static inline bool fid_seq_is_default(__u64 seq) { return seq == FID_SEQ_LOV_DEFAULT; } -static inline int fid_is_mdt0(const struct lu_fid *fid) +static inline bool fid_is_mdt0(const struct lu_fid *fid) { - return fid_seq_is_mdt0(fid_seq(fid)); + return fid_seq_is_mdt0(fid_seq(fid)); } static inline void lu_root_fid(struct lu_fid *fid) { fid->f_seq = FID_SEQ_ROOT; - fid->f_oid = 1; + fid->f_oid = FID_OID_ROOT; + fid->f_ver = 0; +} + +static inline void lu_echo_root_fid(struct lu_fid *fid) +{ + fid->f_seq = FID_SEQ_ROOT; + fid->f_oid = FID_OID_ECHO_ROOT; + fid->f_ver = 0; +} + +static inline void lu_update_log_fid(struct lu_fid *fid, __u32 index) +{ + fid->f_seq = FID_SEQ_UPDATE_LOG; + fid->f_oid = index; + fid->f_ver = 0; +} + +static inline void lu_update_log_dir_fid(struct lu_fid *fid, __u32 index) +{ + fid->f_seq = FID_SEQ_UPDATE_LOG_DIR; + fid->f_oid = index; fid->f_ver = 0; } /** * Check if a fid is igif or not. * \param fid the fid to be tested. - * \return true if the fid is a igif; otherwise false. + * \return true if the fid is an igif; otherwise false. */ -static inline int fid_seq_is_igif(const __u64 seq) +static inline bool fid_seq_is_igif(__u64 seq) { - return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; + return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; } -static inline int fid_is_igif(const struct lu_fid *fid) +static inline bool fid_is_igif(const struct lu_fid *fid) { - return fid_seq_is_igif(fid_seq(fid)); + return fid_seq_is_igif(fid_seq(fid)); } /** * Check if a fid is idif or not. * \param fid the fid to be tested. - * \return true if the fid is a idif; otherwise false. + * \return true if the fid is an idif; otherwise false. */ -static inline int fid_seq_is_idif(const __u64 seq) +static inline bool fid_seq_is_idif(__u64 seq) { - return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; + return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; } -static inline int fid_is_idif(const struct lu_fid *fid) +static inline bool fid_is_idif(const struct lu_fid *fid) { - return fid_seq_is_idif(fid_seq(fid)); + return fid_seq_is_idif(fid_seq(fid)); } -static inline int fid_is_local_file(const struct lu_fid *fid) +static inline bool fid_is_local_file(const struct lu_fid *fid) { return fid_seq_is_local_file(fid_seq(fid)); } -static inline int fid_seq_is_norm(const __u64 seq) +static inline bool fid_seq_is_norm(__u64 seq) +{ + return (seq >= FID_SEQ_NORMAL); +} + +static inline bool fid_is_norm(const struct lu_fid *fid) +{ + return fid_seq_is_norm(fid_seq(fid)); +} + +static inline int fid_is_layout_rbtree(const struct lu_fid *fid) { - return (seq >= FID_SEQ_NORMAL); + return fid_seq(fid) == FID_SEQ_LAYOUT_RBTREE; } -static inline int fid_is_norm(const struct lu_fid *fid) +static inline bool fid_seq_is_update_log(__u64 seq) { - return fid_seq_is_norm(fid_seq(fid)); + return seq == FID_SEQ_UPDATE_LOG; +} + +static inline bool fid_is_update_log(const struct lu_fid *fid) +{ + return fid_seq_is_update_log(fid_seq(fid)); +} + +static inline bool fid_seq_is_update_log_dir(__u64 seq) +{ + return seq == FID_SEQ_UPDATE_LOG_DIR; +} + +static inline bool fid_is_update_log_dir(const struct lu_fid *fid) +{ + return fid_seq_is_update_log_dir(fid_seq(fid)); } /* convert an OST objid into an IDIF FID SEQ number */ -static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx) +static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx) { - return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); + return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); } /* convert a packed IDIF FID into an OST objid */ -static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver) +static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver) { - return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; + return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; } -static inline __u32 idif_ost_idx(obd_seq seq) +static inline __u32 idif_ost_idx(__u64 seq) { return (seq >> 16) & 0xffff; } @@ -629,12 +543,12 @@ static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid) } /* extract OST sequence (group) from a wire ost_id (id/seq) pair */ -static inline obd_seq ostid_seq(const struct ost_id *ostid) +static inline __u64 ostid_seq(const struct ost_id *ostid) { if (fid_seq_is_mdt0(ostid->oi.oi_seq)) return FID_SEQ_OST_MDT0; - if (fid_seq_is_default(ostid->oi.oi_seq)) + if (unlikely(fid_seq_is_default(ostid->oi.oi_seq))) return FID_SEQ_LOV_DEFAULT; if (fid_is_idif(&ostid->oi_fid)) @@ -644,11 +558,14 @@ static inline obd_seq ostid_seq(const struct ost_id *ostid) } /* extract OST objid from a wire ost_id (id/seq) pair */ -static inline obd_id ostid_id(const struct ost_id *ostid) +static inline __u64 ostid_id(const struct ost_id *ostid) { - if (fid_seq_is_mdt0(ostid_seq(ostid))) + if (fid_seq_is_mdt0(ostid->oi.oi_seq)) return ostid->oi.oi_id & IDIF_OID_MASK; + if (unlikely(fid_seq_is_default(ostid->oi.oi_seq))) + return ostid->oi.oi_id; + if (fid_is_idif(&ostid->oi_fid)) return fid_idif_id(fid_seq(&ostid->oi_fid), fid_oid(&ostid->oi_fid), 0); @@ -691,13 +608,23 @@ static inline void ostid_set_seq_llog(struct ost_id *oi) */ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { - if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (fid_seq_is_mdt0(oi->oi.oi_seq)) { if (oid >= IDIF_MAX_OID) { CERROR("Bad "LPU64" to set "DOSTID"\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; + } else if (fid_is_idif(&oi->oi_fid)) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi_fid.f_seq = fid_idif_seq(oid, + fid_idif_ost_idx(&oi->oi_fid)); + oi->oi_fid.f_oid = oid; + oi->oi_fid.f_ver = oid >> 48; } else { if (oid > OBIF_MAX_OID) { CERROR("Bad "LPU64" to set "DOSTID"\n", @@ -708,25 +635,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) } } -static inline void ostid_inc_id(struct ost_id *oi) +static inline int fid_set_id(struct lu_fid *fid, __u64 oid) { - if (fid_seq_is_mdt0(ostid_seq(oi))) { - if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) { - CERROR("Bad inc "DOSTID"\n", POSTID(oi)); - return; + if (unlikely(fid_seq_is_igif(fid->f_seq))) { + CERROR("bad IGIF, "DFID"\n", PFID(fid)); + return -EBADF; + } + + if (fid_is_idif(fid)) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DFID"\n", + oid, PFID(fid)); + return -EBADF; } - oi->oi.oi_id++; + fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid)); + fid->f_oid = oid; + fid->f_ver = oid >> 48; } else { - oi->oi_fid.f_oid++; + if (oid > OBIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DFID"\n", + oid, PFID(fid)); + return -EBADF; + } + fid->f_oid = oid; } -} - -static inline void ostid_dec_id(struct ost_id *oi) -{ - if (fid_seq_is_mdt0(ostid_seq(oi))) - oi->oi.oi_id--; - else - oi->oi_fid.f_oid--; + return 0; } /** @@ -738,45 +671,50 @@ static inline void ostid_dec_id(struct ost_id *oi) * struct lu_fid fields without loss. For reference see: * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs */ -static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, +static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid, __u32 ost_idx) { + __u64 seq = ostid_seq(ostid); + if (ost_idx > 0xffff) { CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid), ost_idx); return -EBADF; } - if (fid_seq_is_mdt0(ostid_seq(ostid))) { + if (fid_seq_is_mdt0(seq)) { + __u64 oid = ostid_id(ostid); + /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates * of 1M objects/s/OST for 9 years, or combinations thereof. */ - if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + if (oid >= IDIF_MAX_OID) { + CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } - fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); + fid->f_seq = fid_idif_seq(oid, ost_idx); /* truncate to 32 bits by assignment */ - fid->f_oid = ostid_id(ostid); + fid->f_oid = oid; /* in theory, not currently used */ - fid->f_ver = ostid_id(ostid) >> 48; - } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { - /* This is either an IDIF object, which identifies objects across - * all OSTs, or a regular FID. The IDIF namespace maps legacy - * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + fid->f_ver = oid >> 48; + } else if (likely(!fid_seq_is_default(seq))) + /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { + /* This is either an IDIF object, which identifies objects across + * all OSTs, or a regular FID. The IDIF namespace maps legacy + * OST objects into the FID namespace. In both cases, we just + * pass the FID through, no conversion needed. */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n", POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; } - return 0; + return 0; } /* pack any OST FID into an ostid (id/seq) for the wire/disk */ @@ -799,14 +737,15 @@ static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid) } /* Check whether the fid is for LAST_ID */ -static inline int fid_is_last_id(const struct lu_fid *fid) +static inline bool fid_is_last_id(const struct lu_fid *fid) { - return (fid_oid(fid) == 0); + return fid_oid(fid) == 0 && fid_seq(fid) != FID_SEQ_UPDATE_LOG && + fid_seq(fid) != FID_SEQ_UPDATE_LOG_DIR; } /** - * Get inode number from a igif. - * \param fid a igif to get inode number from. + * Get inode number from an igif. + * \param fid an igif to get inode number from. * \return inode number for the igif. */ static inline ino_t lu_igif_ino(const struct lu_fid *fid) @@ -817,8 +756,8 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) extern void lustre_swab_ost_id(struct ost_id *oid); /** - * Get inode generation from a igif. - * \param fid a igif to get inode generation from. + * Get inode generation from an igif. + * \param fid an igif to get inode generation from. * \return inode generation for the igif. */ static inline __u32 lu_igif_gen(const struct lu_fid *fid) @@ -868,7 +807,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) dst->f_ver = be32_to_cpu(fid_ver(src)); } -static inline int fid_is_sane(const struct lu_fid *fid) +static inline bool fid_is_sane(const struct lu_fid *fid) { return fid != NULL && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || @@ -876,15 +815,9 @@ static inline int fid_is_sane(const struct lu_fid *fid) fid_seq_is_rsvd(fid_seq(fid))); } -static inline int fid_is_zero(const struct lu_fid *fid) -{ - return fid_seq(fid) == 0 && fid_oid(fid) == 0; -} - extern void lustre_swab_lu_fid(struct lu_fid *fid); -extern void lustre_swab_lu_seq_range(struct lu_seq_range *range); -static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) +static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { return memcmp(f0, f1, sizeof *f0) == 0; } @@ -909,7 +842,7 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, static inline void ostid_cpu_to_le(const struct ost_id *src_oi, struct ost_id *dst_oi) { - if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); } else { @@ -920,7 +853,7 @@ static inline void ostid_cpu_to_le(const struct ost_id *src_oi, static inline void ostid_le_to_cpu(const struct ost_id *src_oi, struct ost_id *dst_oi) { - if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); } else { @@ -928,6 +861,20 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi, } } +struct lu_orphan_rec { + /* The MDT-object's FID referenced by the orphan OST-object */ + struct lu_fid lor_fid; + __u32 lor_uid; + __u32 lor_gid; +}; + +struct lu_orphan_ent { + /* The orphan OST-object's FID */ + struct lu_fid loe_key; + struct lu_orphan_rec loe_rec; +}; +void lustre_swab_orphan_ent(struct lu_orphan_ent *ent); + /** @} lu_fid */ /** \defgroup lu_dir lu_dir @@ -944,7 +891,7 @@ enum lu_dirent_attrs { LUDA_TYPE = 0x0002, LUDA_64BITHASH = 0x0004, - /* The following attrs are used for MDT interanl only, + /* The following attrs are used for MDT internal only, * not visible to client */ /* Verify the dirent consistency */ @@ -1051,12 +998,12 @@ static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent) return next; } -static inline int lu_dirent_calc_size(int namelen, __u16 attr) +static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr) { - int size; + size_t size; - if (attr & LUDA_TYPE) { - const unsigned align = sizeof(struct luda_type) - 1; + if (attr & LUDA_TYPE) { + const size_t align = sizeof(struct luda_type) - 1; size = (sizeof(struct lu_dirent) + namelen + align) & ~align; size += sizeof(struct luda_type); } else @@ -1065,15 +1012,6 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr) return (size + 7) & ~7; } -static inline int lu_dirent_size(struct lu_dirent *ent) -{ - if (le16_to_cpu(ent->lde_reclen) == 0) { - return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen), - le32_to_cpu(ent->lde_attrs)); - } - return le16_to_cpu(ent->lde_reclen); -} - #define MDS_DIR_END_OFF 0xfffffffffffffffeULL /** @@ -1098,21 +1036,21 @@ struct lustre_handle { }; #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL -static inline int lustre_handle_is_used(struct lustre_handle *lh) +static inline bool lustre_handle_is_used(const struct lustre_handle *lh) { - return lh->cookie != 0ull; + return lh->cookie != 0; } -static inline int lustre_handle_equal(const struct lustre_handle *lh1, - const struct lustre_handle *lh2) +static inline bool lustre_handle_equal(const struct lustre_handle *lh1, + const struct lustre_handle *lh2) { - return lh1->cookie == lh2->cookie; + return lh1->cookie == lh2->cookie; } static inline void lustre_handle_copy(struct lustre_handle *tgt, - struct lustre_handle *src) + const struct lustre_handle *src) { - tgt->cookie = src->cookie; + tgt->cookie = src->cookie; } /* flags for lm_flags */ @@ -1136,15 +1074,16 @@ struct lustre_msg_v2 { /* without gss, ptlrpc_body is put at the first buffer. */ #define PTLRPC_NUM_VERSIONS 4 -#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */ struct ptlrpc_body_v3 { struct lustre_handle pb_handle; __u32 pb_type; __u32 pb_version; __u32 pb_opc; __u32 pb_status; - __u64 pb_last_xid; - __u64 pb_last_seen; + __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */ + __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */ + __u16 pb_padding0; + __u32 pb_padding1; __u64 pb_last_committed; __u64 pb_transno; __u32 pb_flags; @@ -1156,9 +1095,12 @@ struct ptlrpc_body_v3 { __u64 pb_slv; /* VBR: pre-versions */ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; + __u64 pb_mbits; /**< match bits for bulk request */ /* padding for future needs */ - __u64 pb_padding[4]; - char pb_jobid[JOBSTATS_JOBID_SIZE]; + __u64 pb_padding64_0; + __u64 pb_padding64_1; + __u64 pb_padding64_2; + char pb_jobid[LUSTRE_JOBID_SIZE]; }; #define ptlrpc_body ptlrpc_body_v3 @@ -1168,8 +1110,10 @@ struct ptlrpc_body_v2 { __u32 pb_version; __u32 pb_opc; __u32 pb_status; - __u64 pb_last_xid; - __u64 pb_last_seen; + __u64 pb_last_xid; /* highest replied XID without lower unreplied XID */ + __u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */ + __u16 pb_padding0; + __u32 pb_padding1; __u64 pb_last_committed; __u64 pb_transno; __u32 pb_flags; @@ -1182,8 +1126,11 @@ struct ptlrpc_body_v2 { __u64 pb_slv; /* VBR: pre-versions */ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; + __u64 pb_mbits; /**< unused in V2 */ /* padding for future needs */ - __u64 pb_padding[4]; + __u64 pb_padding64_0; + __u64 pb_padding64_1; + __u64 pb_padding64_2; }; extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); @@ -1307,7 +1254,13 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/* create stripe disposition*/ #define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack name in request */ - +#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */ +#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */ +#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify + RPCs in parallel */ +#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */ +/** bulk matchbits is sent within ptlrpc_body */ +#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL /* XXX README XXX: * Please DO NOT add flag values here before first ensuring that this same * flag value is not in use on some other branch. Please clear any such @@ -1340,23 +1293,26 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ OBD_CONNECT_RMT_CLIENT | \ OBD_CONNECT_RMT_CLIENT_FORCE | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \ + OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_MDS | \ OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \ OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \ - OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \ + OBD_CONNECT_FULL20 | \ OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \ OBD_CONNECT_EINPROGRESS | \ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ OBD_CONNECT_FLOCK_DEAD | \ - OBD_CONNECT_DISP_STRIPE) + OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | \ + OBD_CONNECT_OPEN_BY_FID | \ + OBD_CONNECT_DIR_STRIPE | \ + OBD_CONNECT_BULK_MBITS | \ + OBD_CONNECT_MULTIMODRPCS) #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \ + OBD_CONNECT_BRW_SIZE | \ OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ OBD_CONNECT_RMT_CLIENT | \ @@ -1369,46 +1325,22 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_JOBSTATS | \ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) + OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK | \ + OBD_CONNECT_BULK_MBITS) #define ECHO_CONNECT_SUPPORTED (0) #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ - OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS) + OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS |\ + OBD_CONNECT_BULK_MBITS) /* Features required for this version of the client to work with server */ #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) -#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\ - ((patch)<<8) + (fix)) -#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255) -#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255) -#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255) -#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255) - /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we * almost certainly will, then perhaps we stick a union in here. */ -struct obd_connect_data_v1 { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */ - __u64 ocd_ibits_known; /* inode bits this client understands */ - __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ - __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ - __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */ - __u32 ocd_unused; /* also fix lustre_swab_connect */ - __u64 ocd_transno; /* first transno from client to be replayed */ - __u32 ocd_group; /* MDS group on OST */ - __u32 ocd_cksum_types; /* supported checksum algorithms */ - __u32 ocd_max_easize; /* How big LOV EA can be on MDS */ - __u32 ocd_instance; /* also fix lustre_swab_connect */ - __u64 ocd_maxbytes; /* Maximum stripe size in bytes */ -}; - struct obd_connect_data { __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ __u32 ocd_version; /* lustre release version number */ @@ -1430,7 +1362,9 @@ struct obd_connect_data { * if the corresponding flag in ocd_connect_flags is set. Accessing * any field after ocd_maxbytes on the receiver without a valid flag * may result in out-of-bound memory access and kernel oops. */ - __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */ + __u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */ + __u16 padding0; /* added 2.1.0. also fix lustre_swab_connect */ + __u32 padding1; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */ __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */ @@ -1491,7 +1425,7 @@ typedef enum { OST_STATFS = 13, OST_SYNC = 16, OST_SET_INFO = 17, - OST_QUOTACHECK = 18, + OST_QUOTACHECK = 18, /* not used since 2.4 */ OST_QUOTACTL = 19, OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC @@ -1533,10 +1467,23 @@ enum obdo_flags { OBD_FL_LOCAL_MASK = 0xF0000000, }; -#define LOV_MAGIC_V1 0x0BD10BD0 -#define LOV_MAGIC LOV_MAGIC_V1 -#define LOV_MAGIC_JOIN_V1 0x0BD20BD0 -#define LOV_MAGIC_V3 0x0BD30BD0 +/* + * All LOV EA magics should have the same postfix, if some new version + * Lustre instroduces new LOV EA magic, then when down-grade to an old + * Lustre, even though the old version system does not recognizes such + * new magic, it still can distinguish the corrupted cases by checking + * the magic's postfix. + */ +#define LOV_MAGIC_MAGIC 0x0BD0 +#define LOV_MAGIC_MASK 0xFFFF + +#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC) +/* reserved for specifying OSTs */ +#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC LOV_MAGIC_V1 /* * magic for fully defined striping @@ -1553,14 +1500,6 @@ enum obdo_flags { #define LOV_MAGIC_V1_DEF 0x0CD10BD0 #define LOV_MAGIC_V3_DEF 0x0CD30BD0 -#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ -#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ -#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ -#define LOV_PATTERN_CMOBD 0x200 - -#define LOV_PATTERN_F_MASK 0xffff0000 -#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */ - #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK) #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK) @@ -1619,25 +1558,30 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq) oi->oi.oi_seq = seq; } -static inline __u64 lmm_oi_id(struct ost_id *oi) +static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid) +{ + oi->oi.oi_id = oid; +} + +static inline __u64 lmm_oi_id(const struct ost_id *oi) { return oi->oi.oi_id; } -static inline __u64 lmm_oi_seq(struct ost_id *oi) +static inline __u64 lmm_oi_seq(const struct ost_id *oi) { return oi->oi.oi_seq; } static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi, - struct ost_id *src_oi) + const struct ost_id *src_oi) { dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); } static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, - struct ost_id *src_oi) + const struct ost_id *src_oi) { dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); @@ -1648,22 +1592,36 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) #define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data)) +/* This is the default MDT reply size allocated, should the striping be bigger, + * it will be reallocated in mdt_fix_reply. + * 100 stripes is a bit less than 2.5k of data */ +#define DEF_REP_MD_SIZE (sizeof(struct lov_mds_md) + \ + 100 * sizeof(struct lov_ost_data)) + #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access" #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default" #define XATTR_USER_PREFIX "user." #define XATTR_TRUSTED_PREFIX "trusted." #define XATTR_SECURITY_PREFIX "security." -#define XATTR_LUSTRE_PREFIX "lustre." #define XATTR_NAME_LOV "trusted.lov" #define XATTR_NAME_LMA "trusted.lma" #define XATTR_NAME_LMV "trusted.lmv" +#define XATTR_NAME_DEFAULT_LMV "trusted.dmv" #define XATTR_NAME_LINK "trusted.link" #define XATTR_NAME_FID "trusted.fid" #define XATTR_NAME_VERSION "trusted.version" #define XATTR_NAME_SOM "trusted.som" #define XATTR_NAME_HSM "trusted.hsm" -#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace" +#define XATTR_NAME_LFSCK_BITMAP "trusted.lfsck_bitmap" +#define XATTR_NAME_DUMMY "trusted.dummy" + +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0) +# define XATTR_NAME_LFSCK_NAMESPACE_OLD "trusted.lfsck_namespace" +#endif + +#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_ns" +#define XATTR_NAME_MAX_LEN 32 /* increase this, if there is longer name. */ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ @@ -1673,7 +1631,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ /* lmm_stripe_count used to be __u32 */ __u16 lmm_stripe_count; /* num stripes in use for this object */ __u16 lmm_layout_gen; /* layout generation number */ - char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ + char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; @@ -1687,6 +1645,30 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) stripes * sizeof(struct lov_ost_data_v1); } +static inline __u32 +lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) +{ + switch (lmm_magic) { + case LOV_MAGIC_V1: { + struct lov_mds_md_v1 lmm; + + if (buf_size < sizeof(lmm)) + return 0; + + return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); + } + case LOV_MAGIC_V3: { + struct lov_mds_md_v3 lmm; + + if (buf_size < sizeof(lmm)) + return 0; + + return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); + } + default: + return 0; + } +} #define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ @@ -1710,7 +1692,7 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */ -#define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */ +/* OBD_MD_FLCOOKIE (0x00800000ULL) obsolete in 2.8 */ #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ @@ -1747,7 +1729,10 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */ #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ -#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */ +#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent + executed */ + +#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */ #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \ OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \ @@ -1801,7 +1786,7 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); * space for unstable pages; asking * it to sync quickly */ -#define OBD_OBJECT_EOF 0xffffffffffffffffULL +#define OBD_OBJECT_EOF LUSTRE_EOF #define OST_MIN_PRECREATE 32 #define OST_MAX_PRECREATE 20000 @@ -1824,12 +1809,12 @@ extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo); /* multiple of 8 bytes => can array */ struct niobuf_remote { - __u64 offset; - __u32 len; - __u32 flags; + __u64 rnb_offset; + __u32 rnb_len; + __u32 rnb_flags; }; -extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); +void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ @@ -1844,25 +1829,25 @@ extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT) struct ost_lvb_v1 { - __u64 lvb_size; - obd_time lvb_mtime; - obd_time lvb_atime; - obd_time lvb_ctime; - __u64 lvb_blocks; + __u64 lvb_size; + __s64 lvb_mtime; + __s64 lvb_atime; + __s64 lvb_ctime; + __u64 lvb_blocks; }; extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb); struct ost_lvb { - __u64 lvb_size; - obd_time lvb_mtime; - obd_time lvb_atime; - obd_time lvb_ctime; - __u64 lvb_blocks; - __u32 lvb_mtime_ns; - __u32 lvb_atime_ns; - __u32 lvb_ctime_ns; - __u32 lvb_padding; + __u64 lvb_size; + __s64 lvb_mtime; + __s64 lvb_atime; + __s64 lvb_ctime; + __u64 lvb_blocks; + __u32 lvb_mtime_ns; + __u32 lvb_atime_ns; + __u32 lvb_ctime_ns; + __u32 lvb_padding; }; extern void lustre_swab_ost_lvb(struct ost_lvb *lvb); @@ -1872,15 +1857,15 @@ extern void lustre_swab_ost_lvb(struct ost_lvb *lvb); */ #ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 +# define QUOTABLOCK_BITS LUSTRE_QUOTABLOCK_BITS #endif #ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) +# define QUOTABLOCK_SIZE LUSTRE_QUOTABLOCK_SIZE #endif #ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) +# define toqb lustre_stoqb #endif /* The lquota_id structure is an union of all the possible identifier types that @@ -1906,12 +1891,6 @@ struct obd_quotactl { extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); -#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */ -#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */ -#define Q_GETOINFO 0x800102 /* get obd quota info */ -#define Q_GETOQUOTA 0x800103 /* get obd quotas */ -#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ - #define Q_COPY(out, in, member) (out)->member = (in)->member #define QCTL_COPY(out, in) \ @@ -2061,17 +2040,17 @@ typedef enum { MDS_DISCONNECT = 39, MDS_GETSTATUS = 40, MDS_STATFS = 41, - MDS_PIN = 42, - MDS_UNPIN = 43, + MDS_PIN = 42, /* obsolete, never used in a release */ + MDS_UNPIN = 43, /* obsolete, never used in a release */ MDS_SYNC = 44, - MDS_DONE_WRITING = 45, + MDS_DONE_WRITING = 45, /* obsolete since 2.8.0 */ MDS_SET_INFO = 46, - MDS_QUOTACHECK = 47, + MDS_QUOTACHECK = 47, /* not used since 2.4 */ MDS_QUOTACTL = 48, MDS_GETXATTR = 49, MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ MDS_WRITEPAGE = 51, - MDS_IS_SUBDIR = 52, + MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */ MDS_GET_INFO = 53, MDS_HSM_STATE_GET = 54, MDS_HSM_STATE_SET = 55, @@ -2089,11 +2068,11 @@ typedef enum { /* opcodes for object update */ typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC + OUT_UPDATE = 1000, + OUT_UPDATE_LAST_OPC } update_cmd_t; -#define UPDATE_FIRST_OPC UPDATE_OBJ +#define OUT_UPDATE_FIRST_OPC OUT_UPDATE /* * Do not exceed 63 @@ -2108,7 +2087,7 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, + REINT_MIGRATE = 9, REINT_MAX } mds_reint_t, mdt_reint_t; @@ -2127,6 +2106,7 @@ extern void lustre_swab_generic_32s (__u32 *val); #define DISP_OPEN_LOCK 0x02000000 #define DISP_OPEN_LEASE 0x04000000 #define DISP_OPEN_STRIPE 0x08000000 +#define DISP_OPEN_DENY 0x10000000 /* INODE LOCK PARTS */ #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also @@ -2169,26 +2149,6 @@ enum { #define MDS_STATUS_CONN 1 #define MDS_STATUS_LOV 2 -/* mdt_thread_info.mti_flags. */ -enum md_op_flags { - /* The flag indicates Size-on-MDS attributes are changed. */ - MF_SOM_CHANGE = (1 << 0), - /* Flags indicates an epoch opens or closes. */ - MF_EPOCH_OPEN = (1 << 1), - MF_EPOCH_CLOSE = (1 << 2), - MF_MDC_CANCEL_FID1 = (1 << 3), - MF_MDC_CANCEL_FID2 = (1 << 4), - MF_MDC_CANCEL_FID3 = (1 << 5), - MF_MDC_CANCEL_FID4 = (1 << 6), - /* There is a pending attribute update. */ - MF_SOM_AU = (1 << 7), - /* Cancel OST locks while getattr OST attributes. */ - MF_GETATTR_LOCK = (1 << 8), - MF_GET_MDT_IDX = (1 << 9), -}; - -#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE) - #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1 /* these should be identical to their EXT4_*_FL counterparts, they are @@ -2235,51 +2195,51 @@ enum md_transient_state { }; struct mdt_body { - struct lu_fid fid1; - struct lu_fid fid2; - struct lustre_handle handle; - __u64 valid; - __u64 size; /* Offset, in the case of MDS_READPAGE */ - obd_time mtime; - obd_time atime; - obd_time ctime; - __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 ioepoch; - __u64 t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 */ - __u32 fsuid; - __u32 fsgid; - __u32 capability; - __u32 mode; - __u32 uid; - __u32 gid; - __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ - __u32 rdev; - __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ - __u32 suppgid; - __u32 eadatasize; - __u32 aclsize; - __u32 max_mdsize; - __u32 max_cookiesize; - __u32 uid_h; /* high 32-bits of uid, for FUID */ - __u32 gid_h; /* high 32-bits of gid, for FUID */ - __u32 padding_5; /* also fix lustre_swab_mdt_body */ - __u64 padding_6; - __u64 padding_7; - __u64 padding_8; - __u64 padding_9; - __u64 padding_10; + struct lu_fid mbo_fid1; + struct lu_fid mbo_fid2; + struct lustre_handle mbo_handle; + __u64 mbo_valid; + __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */ + __s64 mbo_mtime; + __s64 mbo_atime; + __s64 mbo_ctime; + __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */ + __u64 mbo_ioepoch; + __u64 mbo_t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 */ + __u32 mbo_fsuid; + __u32 mbo_fsgid; + __u32 mbo_capability; + __u32 mbo_mode; + __u32 mbo_uid; + __u32 mbo_gid; + __u32 mbo_flags; + __u32 mbo_rdev; + __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */ + __u32 mbo_unused2; /* was "generation" until 2.4.0 */ + __u32 mbo_suppgid; + __u32 mbo_eadatasize; + __u32 mbo_aclsize; + __u32 mbo_max_mdsize; + __u32 mbo_unused3; /* was max_cookiesize until 2.8 */ + __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */ + __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */ + __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */ + __u64 mbo_padding_6; + __u64 mbo_padding_7; + __u64 mbo_padding_8; + __u64 mbo_padding_9; + __u64 mbo_padding_10; }; /* 216 */ extern void lustre_swab_mdt_body (struct mdt_body *b); struct mdt_ioepoch { - struct lustre_handle handle; - __u64 ioepoch; - __u32 flags; - __u32 padding; + struct lustre_handle mio_handle; + __u64 mio_unused1; /* was ioepoch */ + __u32 mio_unused2; /* was flags */ + __u32 mio_padding; }; extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b); @@ -2325,9 +2285,9 @@ struct mdt_rec_setattr { __u32 sa_gid; __u64 sa_size; __u64 sa_blocks; - obd_time sa_mtime; - obd_time sa_atime; - obd_time sa_ctime; + __s64 sa_mtime; + __s64 sa_atime; + __s64 sa_ctime; __u32 sa_attr_flags; __u32 sa_mode; __u32 sa_bias; /* some operation flags */ @@ -2368,12 +2328,9 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); #define MDS_FMODE_CLOSED 00000000 #define MDS_FMODE_EXEC 00000004 -/* IO Epoch is opened on a closed file. */ -#define MDS_FMODE_EPOCH 01000000 -/* IO Epoch is opened on a file truncate. */ -#define MDS_FMODE_TRUNC 02000000 -/* Size-on-MDS Attribute Update is pending. */ -#define MDS_FMODE_SOM 04000000 +/* MDS_FMODE_EPOCH 01000000 obsolete since 2.8.0 */ +/* MDS_FMODE_TRUNC 02000000 obsolete since 2.8.0 */ +/* MDS_FMODE_SOM 04000000 obsolete since 2.8.0 */ #define MDS_OPEN_CREATED 00000010 #define MDS_OPEN_CROSS 00000020 @@ -2408,29 +2365,18 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); */ #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */ -/* permission for create non-directory file */ -#define MAY_CREATE (1 << 7) -/* permission for create directory file */ -#define MAY_LINK (1 << 8) -/* permission for delete from the directory */ -#define MAY_UNLINK (1 << 9) -/* source's permission for rename */ -#define MAY_RENAME_SRC (1 << 10) -/* target's permission for rename */ -#define MAY_RENAME_TAR (1 << 11) -/* part (parent's) VTX permission check */ -#define MAY_VTX_PART (1 << 12) -/* full VTX permission check */ -#define MAY_VTX_FULL (1 << 13) -/* lfs rgetfacl permission check */ -#define MAY_RGETFACL (1 << 14) +/* lustre internal open flags, which should not be set from user space */ +#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \ + MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \ + MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \ + MDS_OPEN_RELEASE) enum mds_op_bias { MDS_CHECK_SPLIT = 1 << 0, MDS_CROSS_REF = 1 << 1, MDS_VTX_BYPASS = 1 << 2, MDS_PERM_BYPASS = 1 << 3, - MDS_SOM = 1 << 4, +/* MDS_SOM = 1 << 4, obsolete since 2.8.0 */ MDS_QUOTA_IGNORE = 1 << 5, /* Was MDS_CLOSE_CLEANUP (1 << 6), No more used */ MDS_KEEP_ORPHAN = 1 << 7, @@ -2439,6 +2385,8 @@ enum mds_op_bias { MDS_CREATE_VOLATILE = 1 << 10, MDS_OWNEROVERRIDE = 1 << 11, MDS_HSM_RELEASE = 1 << 12, + MDS_RENAME_MIGRATE = 1 << 13, + MDS_CLOSE_LAYOUT_SWAP = 1 << 14, }; /* instance of mdt_reint_rec */ @@ -2456,7 +2404,7 @@ struct mdt_rec_create { struct lu_fid cr_fid1; struct lu_fid cr_fid2; struct lustre_handle cr_old_handle; /* handle in case of open replay */ - obd_time cr_time; + __s64 cr_time; __u64 cr_rdev; __u64 cr_ioepoch; __u64 cr_padding_1; /* rr_blocks */ @@ -2496,7 +2444,7 @@ struct mdt_rec_link { __u32 lk_suppgid2_h; struct lu_fid lk_fid1; struct lu_fid lk_fid2; - obd_time lk_time; + __s64 lk_time; __u64 lk_padding_1; /* rr_atime */ __u64 lk_padding_2; /* rr_ctime */ __u64 lk_padding_3; /* rr_size */ @@ -2523,7 +2471,7 @@ struct mdt_rec_unlink { __u32 ul_suppgid2_h; struct lu_fid ul_fid1; struct lu_fid ul_fid2; - obd_time ul_time; + __s64 ul_time; __u64 ul_padding_2; /* rr_atime */ __u64 ul_padding_3; /* rr_ctime */ __u64 ul_padding_4; /* rr_size */ @@ -2550,7 +2498,7 @@ struct mdt_rec_rename { __u32 rn_suppgid2_h; struct lu_fid rn_fid1; struct lu_fid rn_fid2; - obd_time rn_time; + __s64 rn_time; __u64 rn_padding_1; /* rr_atime */ __u64 rn_padding_2; /* rr_ctime */ __u64 rn_padding_3; /* rr_size */ @@ -2580,7 +2528,7 @@ struct mdt_rec_setxattr { __u32 sx_padding_2; __u32 sx_padding_3; __u64 sx_valid; - obd_time sx_time; + __s64 sx_time; __u64 sx_padding_5; /* rr_ctime */ __u64 sx_padding_6; /* rr_size */ __u64 sx_padding_7; /* rr_blocks */ @@ -2613,9 +2561,9 @@ struct mdt_rec_reint { __u32 rr_suppgid2_h; struct lu_fid rr_fid1; struct lu_fid rr_fid2; - obd_time rr_mtime; - obd_time rr_atime; - obd_time rr_ctime; + __s64 rr_mtime; + __s64 rr_atime; + __s64 rr_ctime; __u64 rr_size; __u64 rr_blocks; __u32 rr_bias; @@ -2633,7 +2581,7 @@ struct lmv_desc { __u32 ld_tgt_count; /* how many MDS's */ __u32 ld_active_tgt_count; /* how many active */ __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default MEA_MAGIC_* */ + __u32 ld_pattern; /* default hash pattern */ __u64 ld_default_hash_size; __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */ @@ -2645,25 +2593,125 @@ struct lmv_desc { extern void lustre_swab_lmv_desc (struct lmv_desc *ld); -/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */ -struct lmv_stripe_md { - __u32 mea_magic; - __u32 mea_count; - __u32 mea_master; - __u32 mea_padding; - char mea_pool_name[LOV_MAXPOOLNAME]; - struct lu_fid mea_ids[0]; +/* LMV layout EA, and it will be stored both in master and slave object */ +struct lmv_mds_md_v1 { + __u32 lmv_magic; + __u32 lmv_stripe_count; + __u32 lmv_master_mdt_index; /* On master object, it is master + * MDT index, on slave object, it + * is stripe index of the slave obj */ + __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate + * which hash function to be used, + * Note: only lower 16 bits is being + * used for now. Higher 16 bits will + * be used to mark the object status, + * for example migrating or dead. */ + __u32 lmv_layout_version; /* Used for directory restriping */ + __u32 lmv_padding1; + __u64 lmv_padding2; + __u64 lmv_padding3; + char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */ + struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */ +}; + +#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */ +#define LMV_MAGIC LMV_MAGIC_V1 + +/* #define LMV_USER_MAGIC 0x0CD30CD0 */ +#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */ + +/* Right now only the lower part(0-16bits) of lmv_hash_type is being used, + * and the higher part will be the flag to indicate the status of object, + * for example the object is being migrated. And the hash function + * might be interpreted differently with different flags. */ +#define LMV_HASH_TYPE_MASK 0x0000ffff + +#define LMV_HASH_FLAG_MIGRATION 0x80000000 +#define LMV_HASH_FLAG_DEAD 0x40000000 +#define LMV_HASH_FLAG_BAD_TYPE 0x20000000 + +/* The striped directory has ever lost its master LMV EA, then LFSCK + * re-generated it. This flag is used to indicate such case. It is an + * on-disk flag. */ +#define LMV_HASH_FLAG_LOST_LMV 0x10000000 + +/** + * The FNV-1a hash algorithm is as follows: + * hash = FNV_offset_basis + * for each octet_of_data to be hashed + * hash = hash XOR octet_of_data + * hash = hash × FNV_prime + * return hash + * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + * + * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source + * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL + **/ +#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL +#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL +static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size) +{ + __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS; + const unsigned char *p = buf; + size_t i; + + for (i = 0; i < size; i++) { + hash ^= p[i]; + hash *= LUSTRE_FNV_1A_64_PRIME; + } + + return hash; +} + +union lmv_mds_md { + __u32 lmv_magic; + struct lmv_mds_md_v1 lmv_md_v1; + struct lmv_user_md lmv_user_md; }; -extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea); +extern void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm); + +static inline int lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) +{ + switch (lmm_magic) { + case LMV_MAGIC_V1:{ + struct lmv_mds_md_v1 *lmm1; + + return sizeof(*lmm1) + stripe_count * + sizeof(lmm1->lmv_stripe_fids[0]); + } + default: + return -EINVAL; + } +} -#define MEA_MAGIC_LAST_CHAR 0xb2221ca1 -#define MEA_MAGIC_ALL_CHARS 0xb222a11c -#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b +static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm) +{ + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count); + case LMV_USER_MAGIC: + return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count); + default: + return -EINVAL; + } +} -#define MAX_HASH_SIZE_32 0x7fffffffUL -#define MAX_HASH_SIZE 0x7fffffffffffffffULL -#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL +static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm, + unsigned int stripe_count) +{ + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count); + break; + case LMV_USER_MAGIC: + lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count); + break; + default: + return -EINVAL; + } + return 0; +} enum fld_rpc_opc { FLD_QUERY = 900, @@ -2689,6 +2737,14 @@ enum fld_op { FLD_LOOKUP = 2, }; +/* LFSCK opcodes */ +typedef enum { + LFSCK_NOTIFY = 1101, + LFSCK_QUERY = 1102, + LFSCK_LAST_OPC, + LFSCK_FIRST_OPC = LFSCK_NOTIFY +} lfsck_cmd_t; + /* * LOV data structures */ @@ -2699,6 +2755,8 @@ enum fld_op { * protocol, this will limit the max number of OSTs per LOV */ #define LOV_DESC_MAGIC 0xB0CCDE5C +#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ +#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS) /* LOV settings descriptor (should only contain static info) */ struct lov_desc { @@ -2746,10 +2804,10 @@ struct ldlm_res_id { extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id); -static inline int ldlm_res_eq(const struct ldlm_res_id *res0, - const struct ldlm_res_id *res1) +static inline bool ldlm_res_eq(const struct ldlm_res_id *res0, + const struct ldlm_res_id *res1) { - return !memcmp(res0, res1, sizeof(*res0)); + return memcmp(res0, res1, sizeof(*res0)) == 0; } /* lock types */ @@ -2784,17 +2842,17 @@ struct ldlm_extent { __u64 gid; }; -static inline int ldlm_extent_overlap(struct ldlm_extent *ex1, - struct ldlm_extent *ex2) +static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1, + const struct ldlm_extent *ex2) { - return (ex1->start <= ex2->end) && (ex2->start <= ex1->end); + return ex1->start <= ex2->end && ex2->start <= ex1->end; } /* check if @ex1 contains @ex2 */ -static inline int ldlm_extent_contain(struct ldlm_extent *ex1, - struct ldlm_extent *ex2) +static inline int ldlm_extent_contain(const struct ldlm_extent *ex1, + const struct ldlm_extent *ex2) { - return (ex1->start <= ex2->start) && (ex1->end >= ex2->end); + return ex1->start <= ex2->start && ex1->end >= ex2->end; } struct ldlm_inodebits { @@ -2970,14 +3028,14 @@ extern void lustre_swab_mgs_config_res(struct mgs_config_res *body); #define CM_START_SKIP (CM_START | CM_SKIP) struct cfg_marker { - __u32 cm_step; /* aka config version */ - __u32 cm_flags; - __u32 cm_vers; /* lustre release version number */ - __u32 cm_padding; /* 64 bit align */ - obd_time cm_createtime; /*when this record was first created */ - obd_time cm_canceltime; /*when this record is no longer valid*/ - char cm_tgtname[MTI_NAME_MAXLEN]; - char cm_comment[MTI_NAME_MAXLEN]; + __u32 cm_step; /* aka config version */ + __u32 cm_flags; + __u32 cm_vers; /* lustre release version number */ + __u32 cm_padding; /* 64 bit align */ + __s64 cm_createtime; /*when this record was first created */ + __s64 cm_canceltime; /*when this record is no longer valid*/ + char cm_tgtname[MTI_NAME_MAXLEN]; + char cm_comment[MTI_NAME_MAXLEN]; }; extern void lustre_swab_cfg_marker(struct cfg_marker *marker, @@ -2990,13 +3048,37 @@ extern void lustre_swab_cfg_marker(struct cfg_marker *marker, typedef enum { OBD_PING = 400, OBD_LOG_CANCEL, - OBD_QC_CALLBACK, + OBD_QC_CALLBACK, /* not used since 2.4 */ OBD_IDX_READ, OBD_LAST_OPC } obd_cmd_t; #define OBD_FIRST_OPC OBD_PING -/* catalog of log objects */ +/** + * llog contexts indices. + * + * There is compatibility problem with indexes below, they are not + * continuous and must keep their numbers for compatibility needs. + * See LU-5218 for details. + */ +enum llog_ctxt_id { + LLOG_CONFIG_ORIG_CTXT = 0, + LLOG_CONFIG_REPL_CTXT = 1, + LLOG_MDS_OST_ORIG_CTXT = 2, + LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */ + LLOG_SIZE_ORIG_CTXT = 4, + LLOG_SIZE_REPL_CTXT = 5, + LLOG_TEST_ORIG_CTXT = 8, + LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */ + LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */ + LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */ + /* for multiple changelog consumers */ + LLOG_CHANGELOG_USER_ORIG_CTXT = 14, + LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */ + LLOG_UPDATELOG_ORIG_CTXT = 16, /* update log */ + LLOG_UPDATELOG_REPL_CTXT = 17, /* update log */ + LLOG_MAX_CTXTS +}; /** Identifier for a single log object */ struct llog_logid { @@ -3037,6 +3119,7 @@ typedef enum { CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000, CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000, HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, + UPDATE_REC = LLOG_OP_MAGIC | 0xa0000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, } llog_op_type; @@ -3068,6 +3151,12 @@ struct llog_rec_tail { (rec->lrh_len - sizeof(struct llog_rec_hdr) - \ sizeof(struct llog_rec_tail)) +static inline void *rec_tail(struct llog_rec_hdr *rec) +{ + return (void *)((char *)rec + rec->lrh_len - + sizeof(struct llog_rec_tail)); +} + struct llog_logid_rec { struct llog_rec_hdr lid_hdr; struct llog_logid lid_id; @@ -3079,16 +3168,16 @@ struct llog_logid_rec { struct llog_unlink_rec { struct llog_rec_hdr lur_hdr; - obd_id lur_oid; - obd_count lur_oseq; - obd_count lur_count; + __u64 lur_oid; + __u32 lur_oseq; + __u32 lur_count; struct llog_rec_tail lur_tail; } __attribute__((packed)); struct llog_unlink64_rec { struct llog_rec_hdr lur_hdr; struct lu_fid lur_fid; - obd_count lur_count; /* to destroy the lost precreated */ + __u32 lur_count; /* to destroy the lost precreated */ __u32 lur_padding1; __u64 lur_padding2; __u64 lur_padding3; @@ -3102,7 +3191,7 @@ struct llog_setattr64_rec { __u32 lsr_uid_h; __u32 lsr_gid; __u32 lsr_gid_h; - __u64 lsr_padding; + __u64 lsr_valid; struct llog_rec_tail lsr_tail; } __attribute__((packed)); @@ -3122,8 +3211,10 @@ struct llog_size_change_rec { #define CHANGELOG_MINMASK (1 << CL_MARK) /** bits covering all \a changelog_rec_type's */ #define CHANGELOG_ALLMASK 0XFFFFFFFF -/** default \a changelog_rec_type mask */ -#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE) +/** default \a changelog_rec_type mask. Allow all of them, except + * CL_ATIME since it can really be time consuming, and not necessary + * under normal use. */ +#define CHANGELOG_DEFMASK (CHANGELOG_ALLMASK & ~(1 << CL_ATIME)) /* changelog llog name, needed by client replicators */ #define CHANGELOG_CATALOG "changelog_catalog" @@ -3135,15 +3226,9 @@ struct changelog_setinfo { /** changelog record */ struct llog_changelog_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_rec cr; - struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -struct llog_changelog_ext_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_ext_rec cr; - struct llog_rec_tail cr_tail; /**< for_sizezof_only */ + struct llog_rec_hdr cr_hdr; + struct changelog_rec cr; /**< Variable length field */ + struct llog_rec_tail cr_do_not_use; /**< for_sizeof_only */ } __attribute__((packed)); #define CHANGELOG_USER_PREFIX "cl" @@ -3164,7 +3249,7 @@ enum agent_req_status { ARS_SUCCEED, }; -static inline char *agent_req_status2name(enum agent_req_status ars) +static inline const char *agent_req_status2name(enum agent_req_status ars) { switch (ars) { case ARS_WAITING: @@ -3217,38 +3302,56 @@ struct llog_gen_rec { struct llog_rec_tail lgr_tail; }; -/* On-disk header structure of each log object, stored in little endian order */ -#define LLOG_CHUNK_SIZE 8192 -#define LLOG_HEADER_SIZE (96) -#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE) - -#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */ - /* flags for the logs */ enum llog_flag { LLOG_F_ZAP_WHEN_EMPTY = 0x1, LLOG_F_IS_CAT = 0x2, LLOG_F_IS_PLAIN = 0x4, + LLOG_F_EXT_JOBID = 0x8, + + LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID, }; +/* On-disk header structure of each log object, stored in little endian order */ +#define LLOG_MIN_CHUNK_SIZE 8192 +#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) + sizeof(llh_tail) + * - sizeof(llh_bitmap) */ +#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE) +#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */ + struct llog_log_hdr { - struct llog_rec_hdr llh_hdr; - obd_time llh_timestamp; - __u32 llh_count; - __u32 llh_bitmap_offset; - __u32 llh_size; - __u32 llh_flags; - __u32 llh_cat_idx; - /* for a catalog the first plain slot is next to it */ - struct obd_uuid llh_tgtuuid; - __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23]; - __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)]; - struct llog_rec_tail llh_tail; + struct llog_rec_hdr llh_hdr; + __s64 llh_timestamp; + __u32 llh_count; + __u32 llh_bitmap_offset; + __u32 llh_size; + __u32 llh_flags; + __u32 llh_cat_idx; + /* for a catalog the first plain slot is next to it */ + struct obd_uuid llh_tgtuuid; + __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32)-23]; + /* These fields must always be at the end of the llog_log_hdr. + * Note: llh_bitmap size is variable because llog chunk size could be + * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192 + * bytes, and the real size is stored in llh_hdr.lrh_len, which means + * llh_tail should only be refered by LLOG_HDR_TAIL(). + * But this structure is also used by client/server llog interface + * (see llog_client.c), it will be kept in its original way to avoid + * compatiblity issue. */ + __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)]; + struct llog_rec_tail llh_tail; } __attribute__((packed)); - -#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ - llh->llh_bitmap_offset - \ - sizeof(llh->llh_tail)) * 8) +#undef LLOG_HEADER_SIZE +#undef LLOG_BITMAP_BYTES + +#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \ + llh->llh_bitmap_offset - \ + sizeof(llh->llh_tail)) * 8) +#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \ + (llh)->llh_bitmap_offset) +#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \ + llh->llh_hdr.lrh_len - \ + sizeof(llh->llh_tail))) /** log cookies are used to reference a specific log file and a record therein */ struct llog_cookie { @@ -3291,37 +3394,38 @@ struct llogd_conn_body { /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { - obd_valid o_valid; /* hot fields in this obdo */ - struct ost_id o_oi; - obd_id o_parent_seq; - obd_size o_size; /* o_size-o_blocks == ost_lvb */ - obd_time o_mtime; - obd_time o_atime; - obd_time o_ctime; - obd_blocks o_blocks; /* brw: cli sent cached bytes */ - obd_size o_grant; - - /* 32-bit fields start here: keep an even number of them via padding */ - obd_blksize o_blksize; /* optimal IO blocksize */ - obd_mode o_mode; /* brw: cli sent cache remain */ - obd_uid o_uid; - obd_gid o_gid; - obd_flag o_flags; - obd_count o_nlink; /* brw: checksum */ - obd_count o_parent_oid; - obd_count o_misc; /* brw: o_dropped */ - - __u64 o_ioepoch; /* epoch in ost writes */ - __u32 o_stripe_idx; /* holds stripe idx */ - __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong - * locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from - * MDS */ + __u64 o_valid; /* hot fields in this obdo */ + struct ost_id o_oi; + __u64 o_parent_seq; + __u64 o_size; /* o_size-o_blocks == ost_lvb */ + __s64 o_mtime; + __s64 o_atime; + __s64 o_ctime; + __u64 o_blocks; /* brw: cli sent cached bytes */ + __u64 o_grant; + + /* 32-bit fields start here: keep an even number of them via padding */ + __u32 o_blksize; /* optimal IO blocksize */ + __u32 o_mode; /* brw: cli sent cache remain */ + __u32 o_uid; + __u32 o_gid; + __u32 o_flags; + __u32 o_nlink; /* brw: checksum */ + __u32 o_parent_oid; + __u32 o_misc; /* brw: o_dropped */ + + __u64 o_ioepoch; /* epoch in ost writes */ + __u32 o_stripe_idx; /* holds stripe idx */ + __u32 o_parent_ver; + struct lustre_handle o_handle; /* brw: lock handle to prolong + * locks */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from + * MDS, obsolete in 2.8, reused + * in OSP */ __u32 o_uid_h; __u32 o_gid_h; - __u64 o_data_version; /* getattr: sum of iversion for + __u64 o_data_version; /* getattr: sum of iversion for * each stripe. * brw: grant space consumed on * the client for the write */ @@ -3336,7 +3440,65 @@ struct obdo { #define o_cksum o_nlink #define o_grant_used o_data_version -static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, +struct lfsck_request { + __u32 lr_event; + __u32 lr_index; + __u32 lr_flags; + __u32 lr_valid; + union { + __u32 lr_speed; + __u32 lr_status; + __u32 lr_type; + }; + __u16 lr_version; + __u16 lr_active; + __u16 lr_param; + __u16 lr_async_windows; + __u32 lr_flags2; + struct lu_fid lr_fid; + struct lu_fid lr_fid2; + struct lu_fid lr_fid3; + __u64 lr_padding_1; + __u64 lr_padding_2; +}; + +void lustre_swab_lfsck_request(struct lfsck_request *lr); + +struct lfsck_reply { + __u32 lr_status; + __u32 lr_padding_1; + __u64 lr_padding_2; +}; + +void lustre_swab_lfsck_reply(struct lfsck_reply *lr); + +enum lfsck_events { + LE_LASTID_REBUILDING = 1, + LE_LASTID_REBUILT = 2, + LE_PHASE1_DONE = 3, + LE_PHASE2_DONE = 4, + LE_START = 5, + LE_STOP = 6, + LE_QUERY = 7, + LE_FID_ACCESSED = 8, + LE_PEER_EXIT = 9, + LE_CONDITIONAL_DESTROY = 10, + LE_PAIRS_VERIFY = 11, + LE_SKIP_NLINK_DECLARE = 13, + LE_SKIP_NLINK = 14, + LE_SET_LMV_MASTER = 15, + LE_SET_LMV_SLAVE = 16, +}; + +enum lfsck_event_flags { + LEF_TO_OST = 0x00000001, + LEF_FROM_OST = 0x00000002, + LEF_SET_LMV_HASH = 0x00000004, + LEF_SET_LMV_ALL = 0x00000008, + LEF_RECHECK_NAME_HASH = 0x00000010, +}; + +static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd, struct obdo *wobdo, const struct obdo *lobdo) { @@ -3354,11 +3516,11 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, } } -static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, +static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd, struct obdo *lobdo, const struct obdo *wobdo) { - obd_flag local_flags = 0; + __u32 local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; @@ -3390,20 +3552,22 @@ struct ost_body { /* Key for FIEMAP to be used in get_info calls */ struct ll_fiemap_info_key { - char name[8]; - struct obdo oa; - struct ll_user_fiemap fiemap; + char lfik_name[8]; + struct obdo lfik_oa; + struct fiemap lfik_fiemap; }; extern void lustre_swab_ost_body (struct ost_body *b); -extern void lustre_swab_ost_last_id(obd_id *id); -extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap); +extern void lustre_swab_ost_last_id(__u64 *id); +extern void lustre_swab_fiemap(struct fiemap *fiemap); extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, int stripe_count); extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); +void lustre_print_user_md(unsigned int level, struct lov_user_md *lum, + const char *msg); /* llog_swab.c */ extern void lustre_swab_llogd_body (struct llogd_body *d); @@ -3475,6 +3639,7 @@ enum idx_info_flags { II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ II_FL_VARREC = 1 << 2, /* records can be of variable size */ II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ + II_FL_NOKEY = 1 << 4, /* client doesn't care about key */ }; #define LIP_MAGIC 0x8A6D6B6C @@ -3564,19 +3729,14 @@ enum { #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY) -/* MDS capability covers object capability for operations of body r/w - * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w, - * while OSS capability only covers object capability for operations of - * oss data(file content) r/w/truncate. - */ -static inline int capa_for_mds(struct lustre_capa *c) +static inline bool lovea_slot_is_dummy(const struct lov_ost_data_v1 *obj) { - return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0; -} + /* zero area does not care about the bytes-order. */ + if (obj->l_ost_oi.oi.oi_id == 0 && obj->l_ost_oi.oi.oi_seq == 0 && + obj->l_ost_idx == 0 && obj->l_ost_gen == 0) + return true; -static inline int capa_for_oss(struct lustre_capa *c) -{ - return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0; + return false; } /* lustre_capa::lc_hmac_alg */ @@ -3629,6 +3789,14 @@ struct getinfo_fid2path { void lustre_swab_fid2path (struct getinfo_fid2path *gf); +/** path2parent request/reply structures */ +struct getparent { + struct lu_fid gp_fid; /**< parent FID */ + __u32 gp_linkno; /**< hardlink number */ + __u32 gp_name_size; /**< size of the name field */ + char gp_name[0]; /**< zero-terminated link name */ +} __attribute__((packed)); + enum { LAYOUT_INTENT_ACCESS = 0, LAYOUT_INTENT_READ = 1, @@ -3675,90 +3843,240 @@ extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui); extern void lustre_swab_hsm_request(struct hsm_request *hr); /** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. + * OUT_UPDATE RPC Format * * During the cross-ref operation, the Master MDT, which the client send the * request to, will disassembly the operation into object updates, then OSP * will send these updates to the remote MDT to be executed. * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. + * An UPDATE_OBJ RPC does a list of updates. Each update belongs to an + * operation and does a type of modification to an object. * - ******************************************************************* - * update reply format: + * Request Format * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. + * update_buf + * update (1st) + * update (2nd) + * ... + * update (ub_count-th) * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] + * ub_count must be less than or equal to UPDATE_PER_RPC_MAX. * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff + * Reply Format + * + * update_reply + * rc [+ buffers] (1st) + * rc [+ buffers] (2st) + * ... + * rc [+ buffers] (nr_count-th) + * + * ur_count must be less than or equal to UPDATE_PER_RPC_MAX and should usually + * be equal to ub_count. + */ + +/** + * Type of each update, if adding/deleting update, please also update + * update_opcode in lustre/target/out_lib.c. */ +enum update_type { + OUT_START = 0, + OUT_CREATE = 1, + OUT_DESTROY = 2, + OUT_REF_ADD = 3, + OUT_REF_DEL = 4, + OUT_ATTR_SET = 5, + OUT_ATTR_GET = 6, + OUT_XATTR_SET = 7, + OUT_XATTR_GET = 8, + OUT_INDEX_LOOKUP = 9, + OUT_INDEX_INSERT = 10, + OUT_INDEX_DELETE = 11, + OUT_WRITE = 12, + OUT_XATTR_DEL = 13, + OUT_PUNCH = 14, + OUT_READ = 15, + OUT_NOOP = 16, + OUT_LAST +}; + +enum update_flag { + UPDATE_FL_OST = 0x00000001, /* op from OST (not MDT) */ + UPDATE_FL_SYNC = 0x00000002, /* commit before replying */ + UPDATE_FL_COMMITTED = 0x00000004, /* op committed globally */ + UPDATE_FL_NOLOG = 0x00000008 /* for idempotent updates */ +}; + +struct object_update_param { + __u16 oup_len; /* length of this parameter */ + __u16 oup_padding; + __u32 oup_padding2; + char oup_buf[0]; +}; + +static inline size_t +object_update_param_size(const struct object_update_param *param) +{ + return cfs_size_round(sizeof(*param) + param->oup_len); +} + +/* object update */ +struct object_update { + __u16 ou_type; /* enum update_type */ + __u16 ou_params_count; /* update parameters count */ + __u32 ou_master_index; /* master MDT/OST index */ + __u32 ou_flags; /* enum update_flag */ + __u32 ou_padding1; /* padding 1 */ + __u64 ou_batchid; /* op transno on master */ + struct lu_fid ou_fid; /* object to be updated */ + struct object_update_param ou_params[0]; /* update params */ +}; + +#define UPDATE_REQUEST_MAGIC_V1 0xBDDE0001 +#define UPDATE_REQUEST_MAGIC_V2 0xBDDE0002 +#define UPDATE_REQUEST_MAGIC UPDATE_REQUEST_MAGIC_V2 +/* Hold object_updates sending to the remote OUT in single RPC */ +struct object_update_request { + __u32 ourq_magic; + __u16 ourq_count; /* number of ourq_updates[] */ + __u16 ourq_padding; + struct object_update ourq_updates[0]; +}; + +#define OUT_UPDATE_HEADER_MAGIC 0xBDDF0001 +/* Header for updates request between MDTs */ +struct out_update_header { + __u32 ouh_magic; + __u32 ouh_count; +}; + +struct out_update_buffer { + __u32 oub_size; + __u32 oub_padding; +}; + +void lustre_swab_object_update(struct object_update *ou); +void lustre_swab_object_update_request(struct object_update_request *our); +void lustre_swab_out_update_header(struct out_update_header *ouh); +void lustre_swab_out_update_buffer(struct out_update_buffer *oub); + +static inline size_t +object_update_params_size(const struct object_update *update) +{ + const struct object_update_param *param; + size_t total_size = 0; + unsigned int i; + + param = &update->ou_params[0]; + for (i = 0; i < update->ou_params_count; i++) { + size_t size = object_update_param_size(param); + + param = (struct object_update_param *)((char *)param + size); + total_size += size; + } + + return total_size; +} + +static inline size_t +object_update_size(const struct object_update *update) +{ + return offsetof(struct object_update, ou_params[0]) + + object_update_params_size(update); +} + +static inline struct object_update * +object_update_request_get(const struct object_update_request *our, + unsigned int index, size_t *size) +{ + void *ptr; + unsigned int i; + + if (index >= our->ourq_count) + return NULL; -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); + ptr = (void *)&our->ourq_updates[0]; + for (i = 0; i < index; i++) + ptr += object_update_size(ptr); + + if (size != NULL) + *size = object_update_size(ptr); + + return ptr; +} + + +/* the result of object update */ +struct object_update_result { + __u32 our_rc; + __u16 our_datalen; + __u16 our_padding; + __u32 our_data[0]; +}; + +#define UPDATE_REPLY_MAGIC_V1 0x00BD0001 +#define UPDATE_REPLY_MAGIC_V2 0x00BD0002 +#define UPDATE_REPLY_MAGIC UPDATE_REPLY_MAGIC_V2 +/* Hold object_update_results being replied from the remote OUT. */ +struct object_update_reply { + __u32 ourp_magic; + __u16 ourp_count; + __u16 ourp_padding; + __u16 ourp_lens[0]; +}; + +void lustre_swab_object_update_result(struct object_update_result *our); +void lustre_swab_object_update_reply(struct object_update_reply *our); + +static inline struct object_update_result * +object_update_result_get(const struct object_update_reply *reply, + unsigned int index, size_t *size) +{ + __u16 count = reply->ourp_count; + unsigned int i; + void *ptr; + + if (index >= count) + return NULL; + + ptr = (char *)reply + + cfs_size_round(offsetof(struct object_update_reply, + ourp_lens[count])); + for (i = 0; i < index; i++) { + if (reply->ourp_lens[i] == 0) + return NULL; + + ptr += cfs_size_round(reply->ourp_lens[i]); + } + + if (size != NULL) + *size = reply->ourp_lens[index]; + + return ptr; +} + +/* read update result */ +struct out_read_reply { + __u32 orr_size; + __u32 orr_padding; + __u64 orr_offset; + char orr_data[0]; +}; + +static inline void orr_cpu_to_le(struct out_read_reply *orr_dst, + const struct out_read_reply *orr_src) +{ + orr_dst->orr_size = cpu_to_le32(orr_src->orr_size); + orr_dst->orr_padding = cpu_to_le32(orr_src->orr_padding); + orr_dst->orr_offset = cpu_to_le64(orr_dst->orr_offset); +} + +static inline void orr_le_to_cpu(struct out_read_reply *orr_dst, + const struct out_read_reply *orr_src) +{ + orr_dst->orr_size = le32_to_cpu(orr_src->orr_size); + orr_dst->orr_padding = le32_to_cpu(orr_src->orr_padding); + orr_dst->orr_offset = le64_to_cpu(orr_dst->orr_offset); +} /** layout swap request structure * fid1 and fid2 are in mdt_body @@ -3778,5 +4096,66 @@ struct close_data { void lustre_swab_close_data(struct close_data *data); +struct update_ops; +void lustre_swab_update_ops(struct update_ops *uops, unsigned int op_count); + +/* Update llog format */ +struct update_op { + struct lu_fid uop_fid; + __u16 uop_type; + __u16 uop_param_count; + __u16 uop_params_off[0]; +}; + +struct update_ops { + struct update_op uops_op[0]; +}; + +struct update_params { + struct object_update_param up_params[0]; +}; + +enum update_records_flag { + UPDATE_RECORD_CONTINUE = 1 >> 0, +}; +/* + * This is the update record format used to store the updates in + * disk. All updates of the operation will be stored in ur_ops. + * All of parameters for updates of the operation will be stored + * in ur_params. + * To save the space of the record, parameters in ur_ops will only + * remember their offset in ur_params, so to avoid storing duplicate + * parameters in ur_params, which can help us save a lot space for + * operation like creating striped directory. + */ +struct update_records { + __u64 ur_master_transno; + __u64 ur_batchid; + __u32 ur_flags; + /* If the operation includes multiple updates, then ur_index + * means the index of the update inside the whole updates. */ + __u32 ur_index; + __u32 ur_update_count; + __u32 ur_param_count; + struct update_ops ur_ops; + /* Note ur_ops has a variable size, so comment out + * the following ur_params, in case some use it directly + * update_records->ur_params + * + * struct update_params ur_params; + */ +}; + +struct llog_update_record { + struct llog_rec_hdr lur_hdr; + struct update_records lur_update_rec; + /* Note ur_update_rec has a variable size, so comment out + * the following ur_tail, in case someone use it directly + * + * struct llog_rec_tail lur_tail; + */ +}; + + #endif /** @} lustreidl */