X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=174fd4226f9fc05466efba4b728731fc5d8e77a9;hp=553d6cc658a57e741e8047bde0dcc5ff06c794f5;hb=9ead7830a599e94abd819c94f41ef4e6fb06d289;hpb=4af3ab1945fd1ac6cc9870d72734c37a000a0999 diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index 553d6cc..174fd42 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -98,6 +98,8 @@ /* Defn's shared with user-space. */ #include +#include + /* * GENERAL STUFF */ @@ -188,17 +190,67 @@ typedef __u32 obd_count; * Same structure is used in fld module where lsr_index field holds mdt id * of the home mdt. */ - -#define LU_SEQ_RANGE_MDT 0x0 -#define LU_SEQ_RANGE_OST 0x1 - struct lu_seq_range { - __u64 lsr_start; - __u64 lsr_end; - __u32 lsr_index; - __u32 lsr_flags; + __u64 lsr_start; + __u64 lsr_end; + __u32 lsr_index; + __u32 lsr_flags; }; +#define LU_SEQ_RANGE_MDT 0x0 +#define LU_SEQ_RANGE_OST 0x1 +#define LU_SEQ_RANGE_ANY 0x3 + +#define LU_SEQ_RANGE_MASK 0x3 + +static inline unsigned fld_range_type(const struct lu_seq_range *range) +{ + return range->lsr_flags & LU_SEQ_RANGE_MASK; +} + +static inline int fld_range_is_ost(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_OST; +} + +static inline int fld_range_is_mdt(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_MDT; +} + +/** + * This all range is only being used when fld client sends fld query request, + * but it does not know whether the seq is MDT or OST, so it will send req + * with ALL type, which means either seq type gotten from lookup can be + * expected. + */ +static inline unsigned fld_range_is_any(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_ANY; +} + +static inline void fld_range_set_type(struct lu_seq_range *range, + unsigned flags) +{ + LASSERT(!(flags & ~LU_SEQ_RANGE_MASK)); + range->lsr_flags |= flags; +} + +static inline void fld_range_set_mdt(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_MDT); +} + +static inline void fld_range_set_ost(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_OST); +} + +static inline void fld_range_set_any(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_ANY); +} + /** * returns width of given range \a r */ @@ -253,11 +305,11 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, #define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s" -#define PRANGE(range) \ - (range)->lsr_start, \ - (range)->lsr_end, \ - (range)->lsr_index, \ - (range)->lsr_flags == LU_SEQ_RANGE_MDT ? "mdt" : "ost" +#define PRANGE(range) \ + (range)->lsr_start, \ + (range)->lsr_end, \ + (range)->lsr_index, \ + fld_range_is_mdt(range) ? "mdt" : "ost" /** \defgroup lu_fid lu_fid @@ -280,40 +332,15 @@ enum lma_compat { */ enum lma_incompat { LMAI_RELEASED = 0x0000001, /* file is released */ + LMAI_AGENT = 0x00000002, /* agent inode */ + LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object + is on the remote MDT */ }; -#define LMA_INCOMPAT_SUPP 0x0 - -/** - * Following struct for MDT attributes, that will be kept inode's EA. - * Introduced in 2.0 release (please see b15993, for details) - */ -struct lustre_mdt_attrs { - /** - * Bitfield for supported data in this structure. From enum lma_compat. - * lma_self_fid and lma_flags are always available. - */ - __u32 lma_compat; - /** - * Per-file incompat feature list. Lustre version should support all - * flags set in this field. The supported feature mask is available in - * LMA_INCOMPAT_SUPP. - */ - __u32 lma_incompat; - /** FID of this inode */ - struct lu_fid lma_self_fid; - /** mdt/ost type, others */ - __u64 lma_flags; -}; - -/** - * Prior to 2.4, the LMA structure also included SOM attributes which has since - * been moved to a dedicated xattr - */ -#define LMA_OLD_SIZE (sizeof(struct lustre_mdt_attrs) + 4 * sizeof(__u64)) +#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) extern void lustre_lma_swab(struct lustre_mdt_attrs *lma); extern void lustre_lma_init(struct lustre_mdt_attrs *lma, - const struct lu_fid *fid); + const struct lu_fid *fid, __u32 incompat); /** * SOM on-disk attributes stored in a separate xattr. */ @@ -358,6 +385,8 @@ extern void lustre_hsm_swab(struct hsm_attrs *attrs); * fid constants */ enum { + /** LASTID file has zero OID */ + LUSTRE_FID_LASTID_OID = 0UL, /** initial fid id value */ LUSTRE_FID_INIT_OID = 1UL }; @@ -399,29 +428,35 @@ static inline obd_id fid_ver_oid(const struct lu_fid *fid) * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0 */ enum fid_seq { - FID_SEQ_OST_MDT0 = 0, - FID_SEQ_LLOG = 1, - FID_SEQ_ECHO = 2, - FID_SEQ_OST_MDT1 = 3, - FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ - FID_SEQ_RSVD = 11, - FID_SEQ_IGIF = 12, - FID_SEQ_IGIF_MAX = 0x0ffffffffULL, - FID_SEQ_IDIF = 0x100000000ULL, - FID_SEQ_IDIF_MAX = 0x1ffffffffULL, - /* Normal FID sequence starts from this value, i.e. 1<<33 */ - FID_SEQ_START = 0x200000000ULL, + FID_SEQ_OST_MDT0 = 0, + FID_SEQ_LLOG = 1, /* unnamed llogs */ + FID_SEQ_ECHO = 2, + FID_SEQ_OST_MDT1 = 3, + FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ + FID_SEQ_LLOG_NAME = 10, /* named llogs */ + FID_SEQ_RSVD = 11, + FID_SEQ_IGIF = 12, + FID_SEQ_IGIF_MAX = 0x0ffffffffULL, + FID_SEQ_IDIF = 0x100000000ULL, + FID_SEQ_IDIF_MAX = 0x1ffffffffULL, + /* Normal FID sequence starts from this value, i.e. 1<<33 */ + FID_SEQ_START = 0x200000000ULL, /* sequence for local pre-defined FIDs listed in local_oid */ - FID_SEQ_LOCAL_FILE = 0x200000001ULL, - FID_SEQ_DOT_LUSTRE = 0x200000002ULL, + FID_SEQ_LOCAL_FILE = 0x200000001ULL, + FID_SEQ_DOT_LUSTRE = 0x200000002ULL, /* sequence is used for local named objects FIDs generated * by local_object_storage library */ - FID_SEQ_LOCAL_NAME = 0x200000003ULL, - FID_SEQ_SPECIAL = 0x200000004ULL, - FID_SEQ_QUOTA = 0x200000005ULL, - FID_SEQ_QUOTA_GLB = 0x200000006ULL, - FID_SEQ_NORMAL = 0x200000400ULL, - FID_SEQ_LOV_DEFAULT= 0xffffffffffffffffULL + FID_SEQ_LOCAL_NAME = 0x200000003ULL, + /* Because current FLD will only cache the fid sequence, instead + * of oid on the client side, if the FID needs to be exposed to + * clients sides, it needs to make sure all of fids under one + * sequence will be located in one MDT. */ + FID_SEQ_SPECIAL = 0x200000004ULL, + FID_SEQ_QUOTA = 0x200000005ULL, + FID_SEQ_QUOTA_GLB = 0x200000006ULL, + FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */ + FID_SEQ_NORMAL = 0x200000400ULL, + FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL }; #define OBIF_OID_MAX_BITS 32 @@ -470,7 +505,8 @@ static inline int fid_seq_is_llog(obd_seq seq) static inline int fid_is_llog(const struct lu_fid *fid) { - return fid_seq_is_llog(fid_seq(fid)); + /* file with OID == 0 is not llog but contains last oid */ + return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0; } static inline int fid_seq_is_rsvd(const __u64 seq) @@ -478,11 +514,44 @@ static inline int fid_seq_is_rsvd(const __u64 seq) return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD); }; +static inline int fid_seq_is_special(const __u64 seq) +{ + return seq == FID_SEQ_SPECIAL; +}; + +static inline int fid_seq_is_local_file(const __u64 seq) +{ + return seq == FID_SEQ_LOCAL_FILE || + seq == FID_SEQ_LOCAL_NAME; +}; + +static inline int fid_seq_is_root(const __u64 seq) +{ + return seq == FID_SEQ_ROOT; +} + +static inline int fid_seq_is_dot(const __u64 seq) +{ + return seq == FID_SEQ_DOT_LUSTRE; +} + +static inline int fid_seq_is_default(const __u64 seq) +{ + return seq == FID_SEQ_LOV_DEFAULT; +} + static inline int fid_is_mdt0(const struct lu_fid *fid) { return fid_seq_is_mdt0(fid_seq(fid)); } +static inline void lu_root_fid(struct lu_fid *fid) +{ + fid->f_seq = FID_SEQ_ROOT; + fid->f_oid = 1; + fid->f_ver = 0; +} + /** * Check if a fid is igif or not. * \param fid the fid to be tested. @@ -513,10 +582,10 @@ static inline int fid_is_idif(const struct lu_fid *fid) return fid_seq_is_idif(fid_seq(fid)); } -struct ost_id { - obd_id oi_id; - obd_seq oi_seq; -}; +static inline int fid_is_local_file(const struct lu_fid *fid) +{ + return fid_seq_is_local_file(fid_seq(fid)); +} static inline int fid_seq_is_norm(const __u64 seq) { @@ -547,150 +616,180 @@ static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid) return (fid_seq(fid) >> 16) & 0xffff; } -/* unpack an ostid (id/seq) from a wire/disk structure into an IDIF FID */ -static inline void ostid_idif_unpack(struct ost_id *ostid, - struct lu_fid *fid, __u32 ost_idx) +/* extract OST sequence (group) from a wire ost_id (id/seq) pair */ +static inline obd_seq ostid_seq(const struct ost_id *ostid) { - fid->f_seq = fid_idif_seq(ostid->oi_id, ost_idx); - fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */ - fid->f_ver = ostid->oi_id >> 48; /* in theory, not currently used */ -} + if (fid_seq_is_mdt0(ostid->oi.oi_seq)) + return FID_SEQ_OST_MDT0; -/* unpack an ostid (id/seq) from a wire/disk structure into a non-IDIF FID */ -static inline void ostid_fid_unpack(struct ost_id *ostid, struct lu_fid *fid) -{ - fid->f_seq = ostid->oi_seq; - fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */ - fid->f_ver = ostid->oi_id >> 32; /* in theory, not currently used */ + if (fid_seq_is_default(ostid->oi.oi_seq)) + return FID_SEQ_LOV_DEFAULT; + + if (fid_is_idif(&ostid->oi_fid)) + return FID_SEQ_OST_MDT0; + + return fid_seq(&ostid->oi_fid); } -/* Unpack an OST object id/seq (group) into a FID. This is needed for - * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper - * FIDs. Note that if an id/seq is already in FID/IDIF format it will - * be passed through unchanged. Only legacy OST objects in "group 0" - * will be mapped into the IDIF namespace so that they can fit into the - * struct lu_fid fields without loss. For reference see: - * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs - */ -static inline int fid_ostid_unpack(struct lu_fid *fid, struct ost_id *ostid, - __u32 ost_idx) +/* extract OST objid from a wire ost_id (id/seq) pair */ +static inline obd_id ostid_id(const struct ost_id *ostid) { - if (ost_idx > 0xffff) { - CERROR("bad ost_idx, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - } + if (fid_seq_is_mdt0(ostid_seq(ostid))) + return ostid->oi.oi_id & IDIF_OID_MASK; - if (fid_seq_is_mdt0(ostid->oi_seq)) { - /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" - * that we map into the IDIF namespace. It allows up to 2^48 - * objects per OST, as this is the object namespace that has - * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. */ - if (ostid->oi_id >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - } - ostid_idif_unpack(ostid, fid, ost_idx); - - } else if (fid_seq_is_rsvd(ostid->oi_seq)) { - /* These are legacy OST objects for LLOG/ECHO and CMD testing. - * We only support 2^32 objects in these groups, and cannot - * uniquely identify them in the system (i.e. they are the - * duplicated on all OSTs), but this is not strictly required - * for the old object protocol, which has a separate ost_idx. */ - if (ostid->oi_id >= 0xffffffffULL) { - CERROR("bad RSVD id, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - } - ostid_fid_unpack(ostid, fid); - - } else if (unlikely(fid_seq_is_igif(ostid->oi_seq))) { - /* This is an MDT inode number, which should never collide with - * proper OST object IDs, and is probably a broken filesystem */ - CERROR("bad IGIF, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - - } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { - /* This is either an IDIF object, which identifies objects across - * all OSTs, or a regular FID. The IDIF namespace maps legacy - * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ - ostid_fid_unpack(ostid, fid); - } + if (fid_is_idif(&ostid->oi_fid)) + return fid_idif_id(fid_seq(&ostid->oi_fid), + fid_oid(&ostid->oi_fid), 0); - return 0; + return fid_oid(&ostid->oi_fid); } -/* pack an IDIF FID into an ostid (id/seq) for the wire/disk */ -static inline void ostid_idif_pack(const struct lu_fid *fid, - struct ost_id *ostid) +static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) { - ostid->oi_seq = FID_SEQ_OST_MDT0; - ostid->oi_id = fid_idif_id(fid->f_seq, fid->f_oid, fid->f_ver); + if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) { + oi->oi.oi_seq = seq; + } else { + oi->oi_fid.f_seq = seq; + /* Note: if f_oid + f_ver is zero, we need init it + * to be 1, otherwise, ostid_seq will treat this + * as old ostid (oi_seq == 0) */ + if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0) + oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; + } } -/* pack a non-IDIF FID into an ostid (id/seq) for the wire/disk */ -static inline void ostid_fid_pack(const struct lu_fid *fid, - struct ost_id *ostid) +static inline void ostid_set_seq_mdt0(struct ost_id *oi) { - ostid->oi_seq = fid_seq(fid); - ostid->oi_id = fid_ver_oid(fid); + ostid_set_seq(oi, FID_SEQ_OST_MDT0); } -/* pack any OST FID into an ostid (id/seq) for the wire/disk */ -static inline int fid_ostid_pack(const struct lu_fid *fid, - struct ost_id *ostid) +static inline void ostid_set_seq_echo(struct ost_id *oi) { - if (unlikely(fid_seq_is_igif(fid->f_seq))) { - CERROR("bad IGIF, "DFID"\n", PFID(fid)); - return -EBADF; - } - - if (fid_is_idif(fid)) - ostid_idif_pack(fid, ostid); - else - ostid_fid_pack(fid, ostid); - - return 0; + ostid_set_seq(oi, FID_SEQ_ECHO); } -/* extract OST sequence (group) from a wire ost_id (id/seq) pair */ -static inline obd_seq ostid_seq(struct ost_id *ostid) +static inline void ostid_set_seq_llog(struct ost_id *oi) { - if (unlikely(fid_seq_is_igif(ostid->oi_seq))) - CWARN("bad IGIF, oi_seq: "LPU64" oi_id: "LPX64"\n", - ostid->oi_seq, ostid->oi_id); + ostid_set_seq(oi, FID_SEQ_LLOG); +} + +/** + * Note: we need check oi_seq to decide where to set oi_id, + * so oi_seq should always be set ahead of oi_id. + */ +static inline void ostid_set_id(struct ost_id *oi, __u64 oid) +{ + if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi.oi_id = oid; + } else { + if (oid > OBIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi_fid.f_oid = oid; + } +} + +static inline void ostid_inc_id(struct ost_id *oi) +{ + if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) { + CERROR("Bad inc "DOSTID"\n", POSTID(oi)); + return; + } + oi->oi.oi_id++; + } else { + oi->oi_fid.f_oid++; + } +} + +static inline void ostid_dec_id(struct ost_id *oi) +{ + if (fid_seq_is_mdt0(ostid_seq(oi))) + oi->oi.oi_id--; + else + oi->oi_fid.f_oid--; +} - if (unlikely(fid_seq_is_idif(ostid->oi_seq))) - return FID_SEQ_OST_MDT0; +/** + * Unpack an OST object id/seq (group) into a FID. This is needed for + * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper + * FIDs. Note that if an id/seq is already in FID/IDIF format it will + * be passed through unchanged. Only legacy OST objects in "group 0" + * will be mapped into the IDIF namespace so that they can fit into the + * struct lu_fid fields without loss. For reference see: + * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs + */ +static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, + __u32 ost_idx) +{ + if (ost_idx > 0xffff) { + CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid), + ost_idx); + return -EBADF; + } + + if (fid_seq_is_mdt0(ostid_seq(ostid))) { + /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" + * that we map into the IDIF namespace. It allows up to 2^48 + * objects per OST, as this is the object namespace that has + * been in production for years. This can handle create rates + * of 1M objects/s/OST for 9 years, or combinations thereof. */ + if (ostid_id(ostid) >= IDIF_MAX_OID) { + CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; + } + fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); + /* truncate to 32 bits by assignment */ + fid->f_oid = ostid_id(ostid); + /* in theory, not currently used */ + fid->f_ver = ostid_id(ostid) >> 48; + } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { + /* This is either an IDIF object, which identifies objects across + * all OSTs, or a regular FID. The IDIF namespace maps legacy + * OST objects into the FID namespace. In both cases, we just + * pass the FID through, no conversion needed. */ + if (ostid->oi_fid.f_ver != 0) { + CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; + } + *fid = ostid->oi_fid; + } - return ostid->oi_seq; + return 0; } -/* extract OST objid from a wire ost_id (id/seq) pair */ -static inline obd_id ostid_id(struct ost_id *ostid) +/* pack any OST FID into an ostid (id/seq) for the wire/disk */ +static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid) { - if (ostid->oi_seq == FID_SEQ_OST_MDT0) - return ostid->oi_id & IDIF_OID_MASK; - - if (fid_seq_is_rsvd(ostid->oi_seq)) - return ostid->oi_id & OBIF_OID_MASK; + if (unlikely(fid_seq_is_igif(fid->f_seq))) { + CERROR("bad IGIF, "DFID"\n", PFID(fid)); + return -EBADF; + } - if (fid_seq_is_idif(ostid->oi_seq)) - return fid_idif_id(ostid->oi_seq, ostid->oi_id, 0); + if (fid_is_idif(fid)) { + ostid_set_seq_mdt0(ostid); + ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid), + fid_ver(fid))); + } else { + ostid->oi_fid = *fid; + } - return ostid->oi_id; + return 0; } /* Check whether the fid is for LAST_ID */ static inline int fid_is_last_id(const struct lu_fid *fid) { - return (fid_is_idif(fid) || fid_is_norm(fid) || fid_is_echo(fid)) && - fid_oid(fid) == 0; + return (fid_oid(fid) == 0); } /** @@ -703,6 +802,8 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) return fid_seq(fid); } +extern void lustre_swab_ost_id(struct ost_id *oid); + /** * Get inode generation from a igif. * \param fid a igif to get inode generation from. @@ -775,8 +876,8 @@ static inline int fid_is_sane(const struct lu_fid *fid) { return fid != NULL && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || - fid_is_igif(fid) || fid_is_idif(fid) || - fid_seq_is_rsvd(fid_seq(fid))); + fid_is_igif(fid) || fid_is_idif(fid) || + fid_seq_is_rsvd(fid_seq(fid))); } static inline int fid_is_zero(const struct lu_fid *fid) @@ -812,6 +913,28 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, __diff_normalize(fid_ver(f0), fid_ver(f1)); } +static inline void ostid_cpu_to_le(const struct ost_id *src_oi, + struct ost_id *dst_oi) +{ + if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); + } else { + fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid); + } +} + +static inline void ostid_le_to_cpu(const struct ost_id *src_oi, + struct ost_id *dst_oi) +{ + if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); + } else { + fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid); + } +} + /** @} lu_fid */ /** \defgroup lu_dir lu_dir @@ -1191,8 +1314,9 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * Please DO NOT add flag values here before first ensuring that this same * flag value is not in use on some other branch. Please clear any such * changes with senior engineers before starting to use a new flag. Then, - * submit a small patch against EVERY branch that ONLY adds the new flag - * and updates obd_connect_names[] for lprocfs_rd_connect_flags(), so it + * submit a small patch against EVERY branch that ONLY adds the new flag, + * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the + * flag to check_obd_connect_data(), and updates wiretests accordingly, so it * can be approved and landed easily to reserve the flag for future use. */ /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS @@ -1266,11 +1390,11 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * If we eventually have separate connect data for different types, which we * almost certainly will, then perhaps we stick a union in here. */ struct obd_connect_data_v1 { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes */ + __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ + __u32 ocd_version; /* lustre release version number */ + __u32 ocd_grant; /* initial cache grant amount (bytes) */ + __u32 ocd_index; /* LOV index to connect to */ + __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */ __u64 ocd_ibits_known; /* inode bits this client understands */ __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ @@ -1285,11 +1409,11 @@ struct obd_connect_data_v1 { }; struct obd_connect_data { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes */ + __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ + __u32 ocd_version; /* lustre release version number */ + __u32 ocd_grant; /* initial cache grant amount (bytes) */ + __u32 ocd_index; /* LOV index to connect to */ + __u32 ocd_brw_size; /* Maximum BRW size in bytes */ __u64 ocd_ibits_known; /* inode bits this client understands */ __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ @@ -1426,32 +1550,96 @@ enum obdo_flags { #define LOV_MAGIC_V1_DEF 0x0CD10BD0 #define LOV_MAGIC_V3_DEF 0x0CD30BD0 -#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ -#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ -#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ -#define LOV_PATTERN_CMOBD 0x200 +#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ +#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ +#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ +#define LOV_PATTERN_CMOBD 0x200 + +#define LOV_PATTERN_F_MASK 0xffff0000 +#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */ + +#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK) +#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK) #define lov_ost_data lov_ost_data_v1 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/ - __u64 l_object_id; /* OST object ID */ - __u64 l_object_seq; /* OST object seq number */ - __u32 l_ost_gen; /* generation of this l_ost_idx */ - __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */ + struct ost_id l_ost_oi; /* OST object ID */ + __u32 l_ost_gen; /* generation of this l_ost_idx */ + __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */ }; #define lov_mds_md lov_mds_md_v1 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ - __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - __u64 lmm_object_id; /* LOV object ID */ - __u64 lmm_object_seq; /* LOV object seq number */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - /* lmm_stripe_count used to be __u32 */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - __u16 lmm_layout_gen; /* layout generation number */ - struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ + __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */ + __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ + struct ost_id lmm_oi; /* LOV object ID */ + __u32 lmm_stripe_size; /* size of stripe in bytes */ + /* lmm_stripe_count used to be __u32 */ + __u16 lmm_stripe_count; /* num stripes in use for this object */ + __u16 lmm_layout_gen; /* layout generation number */ + struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; +/** + * Sigh, because pre-2.4 uses + * struct lov_mds_md_v1 { + * ........ + * __u64 lmm_object_id; + * __u64 lmm_object_seq; + * ...... + * } + * to identify the LOV(MDT) object, and lmm_object_seq will + * be normal_fid, which make it hard to combine these conversion + * to ostid_to FID. so we will do lmm_oi/fid conversion separately + * + * We can tell the lmm_oi by this way, + * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0 + * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL + * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k}, + * lmm_oi.f_ver = 0 + * + * But currently lmm_oi/lsm_oi does not have any "real" usages, + * except for printing some information, and the user can always + * get the real FID from LMA, besides this multiple case check might + * make swab more complicate. So we will keep using id/seq for lmm_oi. + */ + +static inline void fid_to_lmm_oi(const struct lu_fid *fid, + struct ost_id *oi) +{ + oi->oi.oi_id = fid_oid(fid); + oi->oi.oi_seq = fid_seq(fid); +} + +static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq) +{ + oi->oi.oi_seq = seq; +} + +static inline __u64 lmm_oi_id(struct ost_id *oi) +{ + return oi->oi.oi_id; +} + +static inline __u64 lmm_oi_seq(struct ost_id *oi) +{ + return oi->oi.oi_seq; +} + +static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi, + struct ost_id *src_oi) +{ + dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); +} + +static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, + struct ost_id *src_oi) +{ + dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); +} + /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */ #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) @@ -1474,20 +1662,28 @@ struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ #define XATTR_NAME_HSM "trusted.hsm" #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace" - struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ - __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - __u64 lmm_object_id; /* LOV object ID */ - __u64 lmm_object_seq; /* LOV object seq number */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - /* lmm_stripe_count used to be __u32 */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - __u16 lmm_layout_gen; /* layout generation number */ - char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ - struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ + __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ + __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ + struct ost_id lmm_oi; /* LOV object ID */ + __u32 lmm_stripe_size; /* size of stripe in bytes */ + /* lmm_stripe_count used to be __u32 */ + __u16 lmm_stripe_count; /* num stripes in use for this object */ + __u16 lmm_layout_gen; /* layout generation number */ + char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ + struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; +static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) +{ + if (lmm_magic == LOV_MAGIC_V3) + return sizeof(struct lov_mds_md_v3) + + stripes * sizeof(struct lov_ost_data_v1); + else + return sizeof(struct lov_mds_md_v1) + + stripes * sizeof(struct lov_ost_data_v1); +} + #define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ @@ -1602,13 +1798,18 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OST_MAX_PRECREATE 20000 struct obd_ioobj { - struct ost_id ioo_oid; - __u32 ioo_type; - __u32 ioo_bufcnt; + struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ + __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, + * now (PTLRPC_BULK_OPS_COUNT - 1) in + * high 16 bits in 2.4 and later */ + __u32 ioo_bufcnt; /* number of niobufs for this object */ }; -#define ioo_id ioo_oid.oi_id -#define ioo_seq ioo_oid.oi_seq +#define IOOBJ_MAX_BRW_BITS 16 +#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1) +#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1) +#define ioobj_max_brw_set(ioo, num) \ +do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0) extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo); @@ -2363,29 +2564,29 @@ struct mdt_rec_setxattr { * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also. */ struct mdt_rec_reint { - __u32 rr_opcode; - __u32 rr_cap; - __u32 rr_fsuid; - __u32 rr_fsuid_h; - __u32 rr_fsgid; - __u32 rr_fsgid_h; - __u32 rr_suppgid1; - __u32 rr_suppgid1_h; - __u32 rr_suppgid2; - __u32 rr_suppgid2_h; - struct lu_fid rr_fid1; - struct lu_fid rr_fid2; - obd_time rr_mtime; - obd_time rr_atime; - obd_time rr_ctime; - __u64 rr_size; - __u64 rr_blocks; - __u32 rr_bias; - __u32 rr_mode; - __u32 rr_flags; - __u32 rr_padding_2; /* also fix lustre_swab_mdt_rec_reint */ - __u32 rr_padding_3; /* also fix lustre_swab_mdt_rec_reint */ - __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ + __u32 rr_opcode; + __u32 rr_cap; + __u32 rr_fsuid; + __u32 rr_fsuid_h; + __u32 rr_fsgid; + __u32 rr_fsgid_h; + __u32 rr_suppgid1; + __u32 rr_suppgid1_h; + __u32 rr_suppgid2; + __u32 rr_suppgid2_h; + struct lu_fid rr_fid1; + struct lu_fid rr_fid2; + obd_time rr_mtime; + obd_time rr_atime; + obd_time rr_ctime; + __u64 rr_size; + __u64 rr_blocks; + __u32 rr_bias; + __u32 rr_mode; + __u32 rr_flags; + __u32 rr_flags_h; + __u32 rr_umask; + __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ }; extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); @@ -2751,8 +2952,7 @@ typedef enum { /** Identifier for a single log object */ struct llog_logid { - __u64 lgl_oid; - __u64 lgl_oseq; + struct ost_id lgl_oi; __u32 lgl_ogen; } __attribute__((packed)); @@ -2848,8 +3048,7 @@ struct llog_unlink64_rec { struct llog_setattr64_rec { struct llog_rec_hdr lsr_hdr; - obd_id lsr_oid; - obd_seq lsr_oseq; + struct ost_id lsr_oi; __u32 lsr_uid; __u32 lsr_uid_h; __u32 lsr_gid; @@ -2998,7 +3197,7 @@ struct llogd_conn_body { /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { obd_valid o_valid; /* hot fields in this obdo */ - struct ost_id o_oi; + struct ost_id o_oi; obd_id o_parent_seq; obd_size o_size; /* o_size-o_blocks == ost_lvb */ obd_time o_mtime; @@ -3036,21 +3235,31 @@ struct obdo { __u64 o_padding_6; }; -#define o_id o_oi.oi_id -#define o_seq o_oi.oi_seq #define o_dirty o_blocks #define o_undirty o_mode #define o_dropped o_misc #define o_cksum o_nlink #define o_grant_used o_data_version -static inline void lustre_set_wire_obdo(struct obdo *wobdo, struct obdo *lobdo) +static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, + struct obdo *wobdo, struct obdo *lobdo) { memcpy(wobdo, lobdo, sizeof(*lobdo)); - wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + if (ocd == NULL) + return; + + if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && + fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { + /* Currently OBD_FL_OSTID will only be used when 2.4 echo + * client communicate with pre-2.4 server */ + wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); + wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); + } } -static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo) +static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, + struct obdo *lobdo, struct obdo *wobdo) { obd_flag local_flags = 0; @@ -3059,12 +3268,22 @@ static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo) LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK)); - memcpy(lobdo, wobdo, sizeof(*lobdo)); - if (local_flags != 0) { - lobdo->o_valid |= OBD_MD_FLFLAGS; - lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - lobdo->o_flags |= local_flags; - } + memcpy(lobdo, wobdo, sizeof(*lobdo)); + if (local_flags != 0) { + lobdo->o_valid |= OBD_MD_FLFLAGS; + lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + lobdo->o_flags |= local_flags; + } + if (ocd == NULL) + return; + + if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && + fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) { + /* see above */ + lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq; + lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id; + lobdo->o_oi.oi_fid.f_ver = 0; + } } extern void lustre_swab_obdo (struct obdo *o); @@ -3096,6 +3315,7 @@ extern void lustre_swab_llogd_body (struct llogd_body *d); extern void lustre_swab_llog_hdr (struct llog_log_hdr *h); extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d); extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec); +extern void lustre_swab_llog_id(struct llog_logid *lid); struct lustre_cfg; extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); @@ -3423,7 +3643,7 @@ enum object_update_op { struct update { __u32 u_type; - __u32 u_padding; + __u32 u_batchid; struct lu_fid u_fid; __u32 u_lens[UPDATE_BUF_COUNT]; __u32 u_bufs[0];