X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=adf30951af00eee5ca87910b936729f08a64d5f3;hp=64168082e0d157e39a1fa42510924a18e8839d49;hb=27815a0611a2e315a9a7696a20c2f257d48aeb7e;hpb=d00aa67e61944a59accfb43d277ad4dd8d85f16d diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index 6416808..adf3095 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Whamcloud, Inc. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -91,13 +91,15 @@ #ifndef _LUSTRE_IDL_H_ #define _LUSTRE_IDL_H_ -#if !defined(LASSERT) && !defined(LPU64) -#include /* for LASSERT, LPUX64, etc */ +#if !defined(LPU64) +#include /* for LPUX64, etc */ #endif /* Defn's shared with user-space. */ #include +#include + /* * GENERAL STUFF */ @@ -129,8 +131,7 @@ //#define PTLBD_BULK_PORTAL 21 #define MDS_SETATTR_PORTAL 22 #define MDS_READPAGE_PORTAL 23 -#define MDS_MDS_PORTAL 24 - +#define OUT_PORTAL 24 #define MGC_REPLY_PORTAL 25 #define MGS_REQUEST_PORTAL 26 #define MGS_REPLY_PORTAL 27 @@ -188,17 +189,66 @@ typedef __u32 obd_count; * Same structure is used in fld module where lsr_index field holds mdt id * of the home mdt. */ - -#define LU_SEQ_RANGE_MDT 0x0 -#define LU_SEQ_RANGE_OST 0x1 - struct lu_seq_range { - __u64 lsr_start; - __u64 lsr_end; - __u32 lsr_index; - __u32 lsr_flags; + __u64 lsr_start; + __u64 lsr_end; + __u32 lsr_index; + __u32 lsr_flags; }; +#define LU_SEQ_RANGE_MDT 0x0 +#define LU_SEQ_RANGE_OST 0x1 +#define LU_SEQ_RANGE_ANY 0x3 + +#define LU_SEQ_RANGE_MASK 0x3 + +static inline unsigned fld_range_type(const struct lu_seq_range *range) +{ + return range->lsr_flags & LU_SEQ_RANGE_MASK; +} + +static inline int fld_range_is_ost(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_OST; +} + +static inline int fld_range_is_mdt(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_MDT; +} + +/** + * This all range is only being used when fld client sends fld query request, + * but it does not know whether the seq is MDT or OST, so it will send req + * with ALL type, which means either seq type gotten from lookup can be + * expected. + */ +static inline unsigned fld_range_is_any(const struct lu_seq_range *range) +{ + return fld_range_type(range) == LU_SEQ_RANGE_ANY; +} + +static inline void fld_range_set_type(struct lu_seq_range *range, + unsigned flags) +{ + range->lsr_flags |= flags; +} + +static inline void fld_range_set_mdt(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_MDT); +} + +static inline void fld_range_set_ost(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_OST); +} + +static inline void fld_range_set_any(struct lu_seq_range *range) +{ + fld_range_set_type(range, LU_SEQ_RANGE_ANY); +} + /** * returns width of given range \a r */ @@ -214,7 +264,7 @@ static inline __u64 range_space(const struct lu_seq_range *range) static inline void range_init(struct lu_seq_range *range) { - range->lsr_start = range->lsr_end = range->lsr_index = 0; + memset(range, 0, sizeof(*range)); } /** @@ -253,11 +303,11 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, #define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s" -#define PRANGE(range) \ - (range)->lsr_start, \ - (range)->lsr_end, \ - (range)->lsr_index, \ - (range)->lsr_flags == LU_SEQ_RANGE_MDT ? "mdt" : "ost" +#define PRANGE(range) \ + (range)->lsr_start, \ + (range)->lsr_end, \ + (range)->lsr_index, \ + fld_range_is_mdt(range) ? "mdt" : "ost" /** \defgroup lu_fid lu_fid @@ -265,106 +315,80 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, /** * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. + * Deprecated since HSM and SOM attributes are now stored in separate on-disk + * xattr. */ enum lma_compat { - LMAC_HSM = 0x00000001, - LMAC_SOM = 0x00000002, + LMAC_HSM = 0x00000001, + LMAC_SOM = 0x00000002, + LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ + LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is + * under /O//d. */ }; /** * Masks for all features that should be supported by a Lustre version to * access a specific file. * This information is stored in lustre_mdt_attrs::lma_incompat. - * - * NOTE: No incompat feature should be added before bug #17670 is landed. */ -#define LMA_INCOMPAT_SUPP 0x0 - +enum lma_incompat { + LMAI_RELEASED = 0x00000001, /* file is released */ + LMAI_AGENT = 0x00000002, /* agent inode */ + LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object + is on the remote MDT */ +}; +#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) + +extern void lustre_lma_swab(struct lustre_mdt_attrs *lma); +extern void lustre_lma_init(struct lustre_mdt_attrs *lma, + const struct lu_fid *fid, + __u32 compat, __u32 incompat); /** - * Following struct for MDT attributes, that will be kept inode's EA. - * Introduced in 2.0 release (please see b15993, for details) + * SOM on-disk attributes stored in a separate xattr. */ -struct lustre_mdt_attrs { - /** - * Bitfield for supported data in this structure. From enum lma_compat. - * lma_self_fid and lma_flags are always available. - */ - __u32 lma_compat; - /** - * Per-file incompat feature list. Lustre version should support all - * flags set in this field. The supported feature mask is available in - * LMA_INCOMPAT_SUPP. - */ - __u32 lma_incompat; - /** FID of this inode */ - struct lu_fid lma_self_fid; - /** mdt/ost type, others */ - __u64 lma_flags; - /* IO Epoch SOM attributes belongs to */ - __u64 lma_ioepoch; - /** total file size in objects */ - __u64 lma_som_size; - /** total fs blocks in objects */ - __u64 lma_som_blocks; - /** mds mount id the size is valid for */ - __u64 lma_som_mountid; -}; +struct som_attrs { + /** Bitfield for supported data in this structure. For future use. */ + __u32 som_compat; -/** - * Fill \a lma with its first content. - * Only fid is stored. - */ -static inline void lustre_lma_init(struct lustre_mdt_attrs *lma, - const struct lu_fid *fid) -{ - lma->lma_compat = 0; - lma->lma_incompat = 0; - memcpy(&lma->lma_self_fid, fid, sizeof(*fid)); - lma->lma_flags = 0; - lma->lma_ioepoch = 0; - lma->lma_som_size = 0; - lma->lma_som_blocks = 0; - lma->lma_som_mountid = 0; + /** Incompat feature list. The supported feature mask is availabe in + * SOM_INCOMPAT_SUPP */ + __u32 som_incompat; - /* If a field is added in struct lustre_mdt_attrs, zero it explicitly - * and change the test below. */ - LASSERT(sizeof(*lma) == - (offsetof(struct lustre_mdt_attrs, lma_som_mountid) + - sizeof(lma->lma_som_mountid))); + /** IO Epoch SOM attributes belongs to */ + __u64 som_ioepoch; + /** total file size in objects */ + __u64 som_size; + /** total fs blocks in objects */ + __u64 som_blocks; + /** mds mount id the size is valid for */ + __u64 som_mountid; }; +extern void lustre_som_swab(struct som_attrs *attrs); -extern void lustre_swab_lu_fid(struct lu_fid *fid); +#define SOM_INCOMPAT_SUPP 0x0 /** - * Swab, if needed, lustre_mdt_attr struct to on-disk format. - * Otherwise, do not touch it. + * HSM on-disk attributes stored in a separate xattr. */ -static inline void lustre_lma_swab(struct lustre_mdt_attrs *lma) -{ - /* Use LUSTRE_MSG_MAGIC to detect local endianess. */ - if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) { - __swab32s(&lma->lma_compat); - __swab32s(&lma->lma_incompat); - lustre_swab_lu_fid(&lma->lma_self_fid); - __swab64s(&lma->lma_flags); - __swab64s(&lma->lma_ioepoch); - __swab64s(&lma->lma_som_size); - __swab64s(&lma->lma_som_blocks); - __swab64s(&lma->lma_som_mountid); - } -}; - -/* This is the maximum number of MDTs allowed in CMD testing until such - * a time that FID-on-OST is implemented. This is due to the limitations - * of packing non-0-MDT numbers into the FID SEQ namespace. Once FID-on-OST - * is implemented this limit will be virtually unlimited. */ -#define MAX_MDT_COUNT 8 +struct hsm_attrs { + /** Bitfield for supported data in this structure. For future use. */ + __u32 hsm_compat; + /** HSM flags, see hsm_flags enum below */ + __u32 hsm_flags; + /** backend archive id associated with the file */ + __u64 hsm_arch_id; + /** version associated with the last archiving, if any */ + __u64 hsm_arch_ver; +}; +extern void lustre_hsm_swab(struct hsm_attrs *attrs); /** * fid constants */ enum { + /** LASTID file has zero OID */ + LUSTRE_FID_LASTID_OID = 0UL, /** initial fid id value */ LUSTRE_FID_INIT_OID = 1UL }; @@ -406,30 +430,35 @@ static inline obd_id fid_ver_oid(const struct lu_fid *fid) * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0 */ enum fid_seq { - FID_SEQ_OST_MDT0 = 0, - FID_SEQ_LLOG = 1, - FID_SEQ_ECHO = 2, - FID_SEQ_OST_MDT1 = 3, - FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ - FID_SEQ_RSVD = 11, - FID_SEQ_IGIF = 12, - FID_SEQ_IGIF_MAX = 0x0ffffffffULL, - FID_SEQ_IDIF = 0x100000000ULL, - FID_SEQ_IDIF_MAX = 0x1ffffffffULL, - /* Normal FID sequence starts from this value, i.e. 1<<33 */ - FID_SEQ_START = 0x200000000ULL, + FID_SEQ_OST_MDT0 = 0, + FID_SEQ_LLOG = 1, /* unnamed llogs */ + FID_SEQ_ECHO = 2, + FID_SEQ_OST_MDT1 = 3, + FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ + FID_SEQ_LLOG_NAME = 10, /* named llogs */ + FID_SEQ_RSVD = 11, + FID_SEQ_IGIF = 12, + FID_SEQ_IGIF_MAX = 0x0ffffffffULL, + FID_SEQ_IDIF = 0x100000000ULL, + FID_SEQ_IDIF_MAX = 0x1ffffffffULL, + /* Normal FID sequence starts from this value, i.e. 1<<33 */ + FID_SEQ_START = 0x200000000ULL, /* sequence for local pre-defined FIDs listed in local_oid */ - FID_SEQ_LOCAL_FILE = 0x200000001ULL, - FID_SEQ_DOT_LUSTRE = 0x200000002ULL, - /* XXX 0x200000003ULL is reserved for FID_SEQ_LLOG_OBJ */ + FID_SEQ_LOCAL_FILE = 0x200000001ULL, + FID_SEQ_DOT_LUSTRE = 0x200000002ULL, /* sequence is used for local named objects FIDs generated * by local_object_storage library */ - FID_SEQ_SPECIAL = 0x200000004ULL, - FID_SEQ_QUOTA = 0x200000005ULL, - FID_SEQ_QUOTA_GLB = 0x200000006ULL, - FID_SEQ_LOCAL_NAME = 0x200000007ULL, - FID_SEQ_NORMAL = 0x200000400ULL, - FID_SEQ_LOV_DEFAULT= 0xffffffffffffffffULL + FID_SEQ_LOCAL_NAME = 0x200000003ULL, + /* Because current FLD will only cache the fid sequence, instead + * of oid on the client side, if the FID needs to be exposed to + * clients sides, it needs to make sure all of fids under one + * sequence will be located in one MDT. */ + FID_SEQ_SPECIAL = 0x200000004ULL, + FID_SEQ_QUOTA = 0x200000005ULL, + FID_SEQ_QUOTA_GLB = 0x200000006ULL, + FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */ + FID_SEQ_NORMAL = 0x200000400ULL, + FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL }; #define OBIF_OID_MAX_BITS 32 @@ -456,27 +485,75 @@ static inline int fid_seq_is_mdt0(obd_seq seq) return (seq == FID_SEQ_OST_MDT0); } -static inline int fid_seq_is_cmd(const __u64 seq) +static inline int fid_seq_is_mdt(const __u64 seq) { - return (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX); + return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL; }; -static inline int fid_seq_is_mdt(const __u64 seq) +static inline int fid_seq_is_echo(obd_seq seq) { - return seq == FID_SEQ_OST_MDT0 || - (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX); -}; + return (seq == FID_SEQ_ECHO); +} + +static inline int fid_is_echo(const struct lu_fid *fid) +{ + return fid_seq_is_echo(fid_seq(fid)); +} + +static inline int fid_seq_is_llog(obd_seq seq) +{ + return (seq == FID_SEQ_LLOG); +} + +static inline int fid_is_llog(const struct lu_fid *fid) +{ + /* file with OID == 0 is not llog but contains last oid */ + return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0; +} static inline int fid_seq_is_rsvd(const __u64 seq) { return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD); }; +static inline int fid_seq_is_special(const __u64 seq) +{ + return seq == FID_SEQ_SPECIAL; +}; + +static inline int fid_seq_is_local_file(const __u64 seq) +{ + return seq == FID_SEQ_LOCAL_FILE || + seq == FID_SEQ_LOCAL_NAME; +}; + +static inline int fid_seq_is_root(const __u64 seq) +{ + return seq == FID_SEQ_ROOT; +} + +static inline int fid_seq_is_dot(const __u64 seq) +{ + return seq == FID_SEQ_DOT_LUSTRE; +} + +static inline int fid_seq_is_default(const __u64 seq) +{ + return seq == FID_SEQ_LOV_DEFAULT; +} + static inline int fid_is_mdt0(const struct lu_fid *fid) { return fid_seq_is_mdt0(fid_seq(fid)); } +static inline void lu_root_fid(struct lu_fid *fid) +{ + fid->f_seq = FID_SEQ_ROOT; + fid->f_oid = 1; + fid->f_ver = 0; +} + /** * Check if a fid is igif or not. * \param fid the fid to be tested. @@ -507,10 +584,10 @@ static inline int fid_is_idif(const struct lu_fid *fid) return fid_seq_is_idif(fid_seq(fid)); } -struct ost_id { - obd_id oi_id; - obd_seq oi_seq; -}; +static inline int fid_is_local_file(const struct lu_fid *fid) +{ + return fid_seq_is_local_file(fid_seq(fid)); +} static inline int fid_seq_is_norm(const __u64 seq) { @@ -534,143 +611,191 @@ static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver) return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; } -/* unpack an ostid (id/seq) from a wire/disk structure into an IDIF FID */ -static inline void ostid_idif_unpack(struct ost_id *ostid, - struct lu_fid *fid, __u32 ost_idx) +static inline __u32 idif_ost_idx(obd_seq seq) { - fid->f_seq = fid_idif_seq(ostid->oi_id, ost_idx); - fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */ - fid->f_ver = ostid->oi_id >> 48; /* in theory, not currently used */ + return (seq >> 16) & 0xffff; } -/* unpack an ostid (id/seq) from a wire/disk structure into a non-IDIF FID */ -static inline void ostid_fid_unpack(struct ost_id *ostid, struct lu_fid *fid) +/* extract ost index from IDIF FID */ +static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid) { - fid->f_seq = ostid->oi_seq; - fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */ - fid->f_ver = ostid->oi_id >> 32; /* in theory, not currently used */ + return idif_ost_idx(fid_seq(fid)); } -/* Unpack an OST object id/seq (group) into a FID. This is needed for - * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper - * FIDs. Note that if an id/seq is already in FID/IDIF format it will - * be passed through unchanged. Only legacy OST objects in "group 0" - * will be mapped into the IDIF namespace so that they can fit into the - * struct lu_fid fields without loss. For reference see: - * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs - */ -static inline int fid_ostid_unpack(struct lu_fid *fid, struct ost_id *ostid, - __u32 ost_idx) +/* extract OST sequence (group) from a wire ost_id (id/seq) pair */ +static inline obd_seq ostid_seq(const struct ost_id *ostid) { - if (ost_idx > 0xffff) { - CERROR("bad ost_idx, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - } + if (fid_seq_is_mdt0(ostid->oi.oi_seq)) + return FID_SEQ_OST_MDT0; - if (fid_seq_is_mdt0(ostid->oi_seq)) { - /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" - * that we map into the IDIF namespace. It allows up to 2^48 - * objects per OST, as this is the object namespace that has - * been in production for years. This can handle create rates - * of 1M objects/s/OST for 9 years, or combinations thereof. */ - if (ostid->oi_id >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - } - ostid_idif_unpack(ostid, fid, ost_idx); - - } else if (fid_seq_is_rsvd(ostid->oi_seq)) { - /* These are legacy OST objects for LLOG/ECHO and CMD testing. - * We only support 2^32 objects in these groups, and cannot - * uniquely identify them in the system (i.e. they are the - * duplicated on all OSTs), but this is not strictly required - * for the old object protocol, which has a separate ost_idx. */ - if (ostid->oi_id >= 0xffffffffULL) { - CERROR("bad RSVD id, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - } - ostid_fid_unpack(ostid, fid); - - } else if (unlikely(fid_seq_is_igif(ostid->oi_seq))) { - /* This is an MDT inode number, which should never collide with - * proper OST object IDs, and is probably a broken filesystem */ - CERROR("bad IGIF, seq:"LPU64" id:"LPU64" ost_idx:%u\n", - ostid->oi_seq, ostid->oi_id, ost_idx); - return -EBADF; - - } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { - /* This is either an IDIF object, which identifies objects across - * all OSTs, or a regular FID. The IDIF namespace maps legacy - * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ - ostid_fid_unpack(ostid, fid); - } + if (fid_seq_is_default(ostid->oi.oi_seq)) + return FID_SEQ_LOV_DEFAULT; - return 0; + if (fid_is_idif(&ostid->oi_fid)) + return FID_SEQ_OST_MDT0; + + return fid_seq(&ostid->oi_fid); } -/* pack an IDIF FID into an ostid (id/seq) for the wire/disk */ -static inline void ostid_idif_pack(const struct lu_fid *fid, - struct ost_id *ostid) +/* extract OST objid from a wire ost_id (id/seq) pair */ +static inline obd_id ostid_id(const struct ost_id *ostid) { - ostid->oi_seq = FID_SEQ_OST_MDT0; - ostid->oi_id = fid_idif_id(fid->f_seq, fid->f_oid, fid->f_ver); + if (fid_seq_is_mdt0(ostid_seq(ostid))) + return ostid->oi.oi_id & IDIF_OID_MASK; + + if (fid_is_idif(&ostid->oi_fid)) + return fid_idif_id(fid_seq(&ostid->oi_fid), + fid_oid(&ostid->oi_fid), 0); + + return fid_oid(&ostid->oi_fid); } -/* pack a non-IDIF FID into an ostid (id/seq) for the wire/disk */ -static inline void ostid_fid_pack(const struct lu_fid *fid, - struct ost_id *ostid) -{ - ostid->oi_seq = fid_seq(fid); - ostid->oi_id = fid_ver_oid(fid); +static inline void ostid_set_seq(struct ost_id *oi, __u64 seq) +{ + if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) { + oi->oi.oi_seq = seq; + } else { + oi->oi_fid.f_seq = seq; + /* Note: if f_oid + f_ver is zero, we need init it + * to be 1, otherwise, ostid_seq will treat this + * as old ostid (oi_seq == 0) */ + if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0) + oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID; + } } -/* pack any OST FID into an ostid (id/seq) for the wire/disk */ -static inline int fid_ostid_pack(const struct lu_fid *fid, - struct ost_id *ostid) +static inline void ostid_set_seq_mdt0(struct ost_id *oi) { - if (unlikely(fid_seq_is_igif(fid->f_seq))) { - CERROR("bad IGIF, "DFID"\n", PFID(fid)); - return -EBADF; - } - - if (fid_is_idif(fid)) - ostid_idif_pack(fid, ostid); - else - ostid_fid_pack(fid, ostid); + ostid_set_seq(oi, FID_SEQ_OST_MDT0); +} - return 0; +static inline void ostid_set_seq_echo(struct ost_id *oi) +{ + ostid_set_seq(oi, FID_SEQ_ECHO); } -/* extract OST sequence (group) from a wire ost_id (id/seq) pair */ -static inline obd_seq ostid_seq(struct ost_id *ostid) +static inline void ostid_set_seq_llog(struct ost_id *oi) { - if (unlikely(fid_seq_is_igif(ostid->oi_seq))) - CWARN("bad IGIF, oi_seq: "LPU64" oi_id: "LPX64"\n", - ostid->oi_seq, ostid->oi_id); + ostid_set_seq(oi, FID_SEQ_LLOG); +} - if (unlikely(fid_seq_is_idif(ostid->oi_seq))) - return FID_SEQ_OST_MDT0; +/** + * Note: we need check oi_seq to decide where to set oi_id, + * so oi_seq should always be set ahead of oi_id. + */ +static inline void ostid_set_id(struct ost_id *oi, __u64 oid) +{ + if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi.oi_id = oid; + } else { + if (oid > OBIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi_fid.f_oid = oid; + } +} - return ostid->oi_seq; +static inline void ostid_inc_id(struct ost_id *oi) +{ + if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) { + CERROR("Bad inc "DOSTID"\n", POSTID(oi)); + return; + } + oi->oi.oi_id++; + } else { + oi->oi_fid.f_oid++; + } } -/* extract OST objid from a wire ost_id (id/seq) pair */ -static inline obd_id ostid_id(struct ost_id *ostid) +static inline void ostid_dec_id(struct ost_id *oi) { - if (ostid->oi_seq == FID_SEQ_OST_MDT0) - return ostid->oi_id & IDIF_OID_MASK; + if (fid_seq_is_mdt0(ostid_seq(oi))) + oi->oi.oi_id--; + else + oi->oi_fid.f_oid--; +} + +/** + * Unpack an OST object id/seq (group) into a FID. This is needed for + * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper + * FIDs. Note that if an id/seq is already in FID/IDIF format it will + * be passed through unchanged. Only legacy OST objects in "group 0" + * will be mapped into the IDIF namespace so that they can fit into the + * struct lu_fid fields without loss. For reference see: + * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs + */ +static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, + __u32 ost_idx) +{ + if (ost_idx > 0xffff) { + CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid), + ost_idx); + return -EBADF; + } + + if (fid_seq_is_mdt0(ostid_seq(ostid))) { + /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" + * that we map into the IDIF namespace. It allows up to 2^48 + * objects per OST, as this is the object namespace that has + * been in production for years. This can handle create rates + * of 1M objects/s/OST for 9 years, or combinations thereof. */ + if (ostid_id(ostid) >= IDIF_MAX_OID) { + CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; + } + fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); + /* truncate to 32 bits by assignment */ + fid->f_oid = ostid_id(ostid); + /* in theory, not currently used */ + fid->f_ver = ostid_id(ostid) >> 48; + } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { + /* This is either an IDIF object, which identifies objects across + * all OSTs, or a regular FID. The IDIF namespace maps legacy + * OST objects into the FID namespace. In both cases, we just + * pass the FID through, no conversion needed. */ + if (ostid->oi_fid.f_ver != 0) { + CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; + } + *fid = ostid->oi_fid; + } - if (fid_seq_is_rsvd(ostid->oi_seq)) - return ostid->oi_id & OBIF_OID_MASK; + return 0; +} - if (fid_seq_is_idif(ostid->oi_seq)) - return fid_idif_id(ostid->oi_seq, ostid->oi_id, 0); +/* pack any OST FID into an ostid (id/seq) for the wire/disk */ +static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid) +{ + if (unlikely(fid_seq_is_igif(fid->f_seq))) { + CERROR("bad IGIF, "DFID"\n", PFID(fid)); + return -EBADF; + } + + if (fid_is_idif(fid)) { + ostid_set_seq_mdt0(ostid); + ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid), + fid_ver(fid))); + } else { + ostid->oi_fid = *fid; + } + + return 0; +} - return ostid->oi_id; +/* Check whether the fid is for LAST_ID */ +static inline int fid_is_last_id(const struct lu_fid *fid) +{ + return (fid_oid(fid) == 0); } /** @@ -683,20 +808,7 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) return fid_seq(fid); } -/** - * Build igif from the inode number/generation. - */ -#define LU_IGIF_BUILD(fid, ino, gen) \ -do { \ - fid->f_seq = ino; \ - fid->f_oid = gen; \ - fid->f_ver = 0; \ -} while(0) -static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen) -{ - LU_IGIF_BUILD(fid, ino, gen); - LASSERT(fid_is_igif(fid)); -} +extern void lustre_swab_ost_id(struct ost_id *oid); /** * Get inode generation from a igif. @@ -708,65 +820,54 @@ static inline __u32 lu_igif_gen(const struct lu_fid *fid) return fid_oid(fid); } +/** + * Build igif from the inode number/generation. + */ +static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen) +{ + fid->f_seq = ino; + fid->f_oid = gen; + fid->f_ver = 0; +} + /* * Fids are transmitted across network (in the sender byte-ordering), * and stored on disk in big-endian order. */ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); - LASSERTF(fid_is_igif(src) || fid_ver(src) == 0, DFID"\n", PFID(src)); - dst->f_seq = cpu_to_le64(fid_seq(src)); - dst->f_oid = cpu_to_le32(fid_oid(src)); - dst->f_ver = cpu_to_le32(fid_ver(src)); + dst->f_seq = cpu_to_le64(fid_seq(src)); + dst->f_oid = cpu_to_le32(fid_oid(src)); + dst->f_ver = cpu_to_le32(fid_ver(src)); } static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); - dst->f_seq = le64_to_cpu(fid_seq(src)); - dst->f_oid = le32_to_cpu(fid_oid(src)); - dst->f_ver = le32_to_cpu(fid_ver(src)); - LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst)); + dst->f_seq = le64_to_cpu(fid_seq(src)); + dst->f_oid = le32_to_cpu(fid_oid(src)); + dst->f_ver = le32_to_cpu(fid_ver(src)); } static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); - LASSERTF(fid_is_igif(src) || fid_ver(src) == 0, DFID"\n", PFID(src)); - dst->f_seq = cpu_to_be64(fid_seq(src)); - dst->f_oid = cpu_to_be32(fid_oid(src)); - dst->f_ver = cpu_to_be32(fid_ver(src)); + dst->f_seq = cpu_to_be64(fid_seq(src)); + dst->f_oid = cpu_to_be32(fid_oid(src)); + dst->f_ver = cpu_to_be32(fid_ver(src)); } static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); - dst->f_seq = be64_to_cpu(fid_seq(src)); - dst->f_oid = be32_to_cpu(fid_oid(src)); - dst->f_ver = be32_to_cpu(fid_ver(src)); - LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst)); + dst->f_seq = be64_to_cpu(fid_seq(src)); + dst->f_oid = be32_to_cpu(fid_oid(src)); + dst->f_ver = be32_to_cpu(fid_ver(src)); } static inline int fid_is_sane(const struct lu_fid *fid) { - return - fid != NULL && - ((fid_seq(fid) >= FID_SEQ_START && fid_oid(fid) != 0 - && fid_ver(fid) == 0) || - fid_is_igif(fid) || fid_seq_is_rsvd(fid_seq(fid))); + return fid != NULL && + ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || + fid_is_igif(fid) || fid_is_idif(fid) || + fid_seq_is_rsvd(fid_seq(fid))); } static inline int fid_is_zero(const struct lu_fid *fid) @@ -777,17 +878,9 @@ static inline int fid_is_zero(const struct lu_fid *fid) extern void lustre_swab_lu_fid(struct lu_fid *fid); extern void lustre_swab_lu_seq_range(struct lu_seq_range *range); -static inline int lu_fid_eq(const struct lu_fid *f0, - const struct lu_fid *f1) +static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { - /* Check that there is no alignment padding. */ - CLASSERT(sizeof *f0 == - sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver); - LASSERTF((fid_is_igif(f0) || fid_is_idif(f0)) || - fid_ver(f0) == 0, DFID, PFID(f0)); - LASSERTF((fid_is_igif(f1) || fid_is_idif(f1)) || - fid_ver(f1) == 0, DFID, PFID(f1)); - return memcmp(f0, f1, sizeof *f0) == 0; + return memcmp(f0, f1, sizeof *f0) == 0; } #define __diff_normalize(val0, val1) \ @@ -807,6 +900,28 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, __diff_normalize(fid_ver(f0), fid_ver(f1)); } +static inline void ostid_cpu_to_le(const struct ost_id *src_oi, + struct ost_id *dst_oi) +{ + if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); + } else { + fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid); + } +} + +static inline void ostid_le_to_cpu(const struct ost_id *src_oi, + struct ost_id *dst_oi) +{ + if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); + } else { + fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid); + } +} + /** @} lu_fid */ /** \defgroup lu_dir lu_dir @@ -819,11 +934,27 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, * enumeration. */ enum lu_dirent_attrs { - LUDA_FID = 0x0001, - LUDA_TYPE = 0x0002, - LUDA_64BITHASH = 0x0004, + LUDA_FID = 0x0001, + LUDA_TYPE = 0x0002, + LUDA_64BITHASH = 0x0004, + + /* The following attrs are used for MDT interanl only, + * not visible to client */ + + /* Verify the dirent consistency */ + LUDA_VERIFY = 0x8000, + /* Only check but not repair the dirent inconsistency */ + LUDA_VERIFY_DRYRUN = 0x4000, + /* The dirent has been repaired, or to be repaired (dryrun). */ + LUDA_REPAIR = 0x2000, + /* The system is upgraded, has beed or to be repaired (dryrun). */ + LUDA_UPGRADE = 0x1000, + /* Ignore this record, go to next directly. */ + LUDA_IGNORE = 0x0800, }; +#define LU_DIRENT_ATTRS_MASK 0xf800 + /** * Layout of readdir pages, as transmitted on wire. */ @@ -943,16 +1074,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent) * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than CFS_PAGE_SIZE because the client needs to + * It's different than PAGE_CACHE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server CFS_PAGE_SIZE differ. + * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (CFS_PAGE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ @@ -1130,7 +1261,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x80000ULL /*64bit qunit_data.qd_count */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */ #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ @@ -1139,7 +1270,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ -#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*shrink/enlarge qunit */ +#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */ #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ @@ -1159,15 +1290,22 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * RPC error properly */ #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for * finer space reservation */ -#define OBD_CONNECT_NANOSEC_TIME 0x200000000000ULL /* nanosecond timestamps */ +#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8 + * policy and 2.x server */ #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */ +#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */ #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ +#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */ +#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */ +#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* improved flock deadlock detection */ + /* XXX README XXX: * Please DO NOT add flag values here before first ensuring that this same * flag value is not in use on some other branch. Please clear any such * changes with senior engineers before starting to use a new flag. Then, - * submit a small patch against EVERY branch that ONLY adds the new flag - * and updates obd_connect_names[] for lprocfs_rd_connect_flags(), so it + * submit a small patch against EVERY branch that ONLY adds the new flag, + * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the + * flag to check_obd_connect_data(), and updates wiretests accordingly, so it * can be approved and landed easily to reserve the flag for future use. */ /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS @@ -1198,27 +1336,33 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \ OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \ OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \ - OBD_CONNECT_64BITHASH | \ - OBD_CONNECT_EINPROGRESS | OBD_CONNECT_JOBSTATS) + OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \ + OBD_CONNECT_EINPROGRESS | \ + OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ + OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ + OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ + OBD_CONNECT_FLOCK_DEAD) #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ - OBD_CONNECT_BRW_SIZE | OBD_CONNECT_QUOTA64 | \ + OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \ OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ - OBD_CONNECT_CHANGE_QS | \ - OBD_CONNECT_OSS_CAPA | \ OBD_CONNECT_RMT_CLIENT | \ OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \ OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \ OBD_CONNECT_MAX_EASIZE | \ - OBD_CONNECT_EINPROGRESS | OBD_CONNECT_JOBSTATS) + OBD_CONNECT_EINPROGRESS | \ + OBD_CONNECT_JOBSTATS | \ + OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ + OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ + OBD_CONNECT_PINGLESS) #define ECHO_CONNECT_SUPPORTED (0) #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ - OBD_CONNECT_MNE_SWAB) + OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS) /* Features required for this version of the client to work with server */ #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ @@ -1236,11 +1380,11 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * If we eventually have separate connect data for different types, which we * almost certainly will, then perhaps we stick a union in here. */ struct obd_connect_data_v1 { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes */ + __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ + __u32 ocd_version; /* lustre release version number */ + __u32 ocd_grant; /* initial cache grant amount (bytes) */ + __u32 ocd_index; /* LOV index to connect to */ + __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */ __u64 ocd_ibits_known; /* inode bits this client understands */ __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ @@ -1255,11 +1399,11 @@ struct obd_connect_data_v1 { }; struct obd_connect_data { - __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ - __u32 ocd_version; /* lustre release version number */ - __u32 ocd_grant; /* initial cache grant amount (bytes) */ - __u32 ocd_index; /* LOV index to connect to */ - __u32 ocd_brw_size; /* Maximum BRW size in bytes */ + __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */ + __u32 ocd_version; /* lustre release version number */ + __u32 ocd_grant; /* initial cache grant amount (bytes) */ + __u32 ocd_index; /* LOV index to connect to */ + __u32 ocd_brw_size; /* Maximum BRW size in bytes */ __u64 ocd_ibits_known; /* inode bits this client understands */ __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */ __u8 ocd_inodespace; /* log2 of the per-inode space consumption */ @@ -1338,7 +1482,7 @@ typedef enum { OST_SET_INFO = 17, OST_QUOTACHECK = 18, OST_QUOTACTL = 19, - OST_QUOTA_ADJUST_QUNIT = 20, + OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */ OST_LAST_OPC } ost_cmd_t; #define OST_FIRST_OPC OST_REPLY @@ -1366,6 +1510,7 @@ enum obdo_flags { * clients prior than 2.2 */ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ + OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */ /* Note that while these checksum values are currently separate bits, * in 2.x we can actually allow all values from 1-31 if we wanted. */ @@ -1381,32 +1526,111 @@ enum obdo_flags { #define LOV_MAGIC_JOIN_V1 0x0BD20BD0 #define LOV_MAGIC_V3 0x0BD30BD0 -#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ -#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ -#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ -#define LOV_PATTERN_CMOBD 0x200 +/* + * magic for fully defined striping + * the idea is that we should have different magics for striping "hints" + * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct + * lov_mds_md_v[13]). at the moment the magics are used in wire protocol, + * we can't just change it w/o long way preparation, but we still need a + * mechanism to allow LOD to differentiate hint versus ready striping. + * so, at the moment we do a trick: MDT knows what to expect from request + * depending on the case (replay uses ready striping, non-replay req uses + * hints), so MDT replaces magic with appropriate one and now LOD can + * easily understand what's inside -bzzz + */ +#define LOV_MAGIC_V1_DEF 0x0CD10BD0 +#define LOV_MAGIC_V3_DEF 0x0CD30BD0 + +#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ +#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ +#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ +#define LOV_PATTERN_CMOBD 0x200 + +#define LOV_PATTERN_F_MASK 0xffff0000 +#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */ + +#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK) +#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK) #define lov_ost_data lov_ost_data_v1 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/ - __u64 l_object_id; /* OST object ID */ - __u64 l_object_seq; /* OST object seq number */ - __u32 l_ost_gen; /* generation of this l_ost_idx */ - __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */ + struct ost_id l_ost_oi; /* OST object ID */ + __u32 l_ost_gen; /* generation of this l_ost_idx */ + __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */ }; #define lov_mds_md lov_mds_md_v1 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ - __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - __u64 lmm_object_id; /* LOV object ID */ - __u64 lmm_object_seq; /* LOV object seq number */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - /* lmm_stripe_count used to be __u32 */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - __u16 lmm_layout_gen; /* layout generation number */ - struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ + __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */ + __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ + struct ost_id lmm_oi; /* LOV object ID */ + __u32 lmm_stripe_size; /* size of stripe in bytes */ + /* lmm_stripe_count used to be __u32 */ + __u16 lmm_stripe_count; /* num stripes in use for this object */ + __u16 lmm_layout_gen; /* layout generation number */ + struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; +/** + * Sigh, because pre-2.4 uses + * struct lov_mds_md_v1 { + * ........ + * __u64 lmm_object_id; + * __u64 lmm_object_seq; + * ...... + * } + * to identify the LOV(MDT) object, and lmm_object_seq will + * be normal_fid, which make it hard to combine these conversion + * to ostid_to FID. so we will do lmm_oi/fid conversion separately + * + * We can tell the lmm_oi by this way, + * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0 + * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL + * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k}, + * lmm_oi.f_ver = 0 + * + * But currently lmm_oi/lsm_oi does not have any "real" usages, + * except for printing some information, and the user can always + * get the real FID from LMA, besides this multiple case check might + * make swab more complicate. So we will keep using id/seq for lmm_oi. + */ + +static inline void fid_to_lmm_oi(const struct lu_fid *fid, + struct ost_id *oi) +{ + oi->oi.oi_id = fid_oid(fid); + oi->oi.oi_seq = fid_seq(fid); +} + +static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq) +{ + oi->oi.oi_seq = seq; +} + +static inline __u64 lmm_oi_id(struct ost_id *oi) +{ + return oi->oi.oi_id; +} + +static inline __u64 lmm_oi_seq(struct ost_id *oi) +{ + return oi->oi.oi_seq; +} + +static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi, + struct ost_id *src_oi) +{ + dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); +} + +static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, + struct ost_id *src_oi) +{ + dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); + dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); +} + /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */ #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) @@ -1425,20 +1649,31 @@ struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ #define XATTR_NAME_LINK "trusted.link" #define XATTR_NAME_FID "trusted.fid" #define XATTR_NAME_VERSION "trusted.version" - +#define XATTR_NAME_SOM "trusted.som" +#define XATTR_NAME_HSM "trusted.hsm" +#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace" struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ - __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ - __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ - __u64 lmm_object_id; /* LOV object ID */ - __u64 lmm_object_seq; /* LOV object seq number */ - __u32 lmm_stripe_size; /* size of stripe in bytes */ - /* lmm_stripe_count used to be __u32 */ - __u16 lmm_stripe_count; /* num stripes in use for this object */ - __u16 lmm_layout_gen; /* layout generation number */ - char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ - struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ -}; + __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ + __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ + struct ost_id lmm_oi; /* LOV object ID */ + __u32 lmm_stripe_size; /* size of stripe in bytes */ + /* lmm_stripe_count used to be __u32 */ + __u16 lmm_stripe_count; /* num stripes in use for this object */ + __u16 lmm_layout_gen; /* layout generation number */ + char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ + struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ +}; + +static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) +{ + if (lmm_magic == LOV_MAGIC_V3) + return sizeof(struct lov_mds_md_v3) + + stripes * sizeof(struct lov_ost_data_v1); + else + return sizeof(struct lov_mds_md_v1) + + stripes * sizeof(struct lov_ost_data_v1); +} #define OBD_MD_FLID (0x00000001ULL) /* object ID */ @@ -1477,7 +1712,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ -#define OBD_MD_MDTIDX (0x0000000800000000ULL) /* Get MDT index */ +#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ @@ -1489,7 +1724,9 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */ #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */ #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes - * under lock */ + * under lock; for xattr + * requests means the + * client holds the lock */ #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -1498,6 +1735,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */ #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ +#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */ #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \ OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \ @@ -1505,9 +1743,26 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \ OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP) +#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) + /* don't forget obdo_fid which is way down at the bottom so it can * come after the definition of llog_cookie */ +enum hss_valid { + HSS_SETMASK = 0x01, + HSS_CLEARMASK = 0x02, + HSS_ARCHIVE_ID = 0x04, +}; + +struct hsm_state_set { + __u32 hss_valid; + __u32 hss_archive_id; + __u64 hss_setmask; + __u64 hss_clearmask; +}; + +extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus); +extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss); extern void lustre_swab_obd_statfs (struct obd_statfs *os); @@ -1529,6 +1784,10 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ +#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server + * that the client is running low on + * space for unstable pages; asking + * it to sync quickly */ #define OBD_OBJECT_EOF 0xffffffffffffffffULL @@ -1536,12 +1795,19 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OST_MAX_PRECREATE 20000 struct obd_ioobj { - obd_id ioo_id; - obd_seq ioo_seq; - __u32 ioo_type; - __u32 ioo_bufcnt; + struct ost_id ioo_oid; /* object ID, if multi-obj BRW */ + __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4, + * now (PTLRPC_BULK_OPS_COUNT - 1) in + * high 16 bits in 2.4 and later */ + __u32 ioo_bufcnt; /* number of niobufs for this object */ }; +#define IOOBJ_MAX_BRW_BITS 16 +#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1) +#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1) +#define ioobj_max_brw_set(ioo, num) \ +do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0) + extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo); /* multiple of 8 bytes => can array */ @@ -1565,59 +1831,271 @@ extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); do { blocks = OST_LVB_ERR_INIT + rc; } while (0) #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT) +struct ost_lvb_v1 { + __u64 lvb_size; + obd_time lvb_mtime; + obd_time lvb_atime; + obd_time lvb_ctime; + __u64 lvb_blocks; +}; + +extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb); + struct ost_lvb { - __u64 lvb_size; - obd_time lvb_mtime; - obd_time lvb_atime; - obd_time lvb_ctime; - __u64 lvb_blocks; + __u64 lvb_size; + obd_time lvb_mtime; + obd_time lvb_atime; + obd_time lvb_ctime; + __u64 lvb_blocks; + __u32 lvb_mtime_ns; + __u32 lvb_atime_ns; + __u32 lvb_ctime_ns; + __u32 lvb_padding; +}; + +extern void lustre_swab_ost_lvb(struct ost_lvb *lvb); + +/* + * lquota data structures + */ + +#ifndef QUOTABLOCK_BITS +#define QUOTABLOCK_BITS 10 +#endif + +#ifndef QUOTABLOCK_SIZE +#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) +#endif + +#ifndef toqb +#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) +#endif + +/* The lquota_id structure is an union of all the possible identifier types that + * can be used with quota, this includes: + * - 64-bit user ID + * - 64-bit group ID + * - a FID which can be used for per-directory quota in the future */ +union lquota_id { + struct lu_fid qid_fid; /* FID for per-directory quota */ + __u64 qid_uid; /* user identifier */ + __u64 qid_gid; /* group identifier */ +}; + +/* quotactl management */ +struct obd_quotactl { + __u32 qc_cmd; + __u32 qc_type; /* see Q_* flag below */ + __u32 qc_id; + __u32 qc_stat; + struct obd_dqinfo qc_dqinfo; + struct obd_dqblk qc_dqblk; +}; + +extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); + +#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */ +#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */ +#define Q_GETOINFO 0x800102 /* get obd quota info */ +#define Q_GETOQUOTA 0x800103 /* get obd quotas */ +#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ + +#define Q_COPY(out, in, member) (out)->member = (in)->member + +#define QCTL_COPY(out, in) \ +do { \ + Q_COPY(out, in, qc_cmd); \ + Q_COPY(out, in, qc_type); \ + Q_COPY(out, in, qc_id); \ + Q_COPY(out, in, qc_stat); \ + Q_COPY(out, in, qc_dqinfo); \ + Q_COPY(out, in, qc_dqblk); \ +} while (0) + +/* Body of quota request used for quota acquire/release RPCs between quota + * master (aka QMT) and slaves (ak QSD). */ +struct quota_body { + struct lu_fid qb_fid; /* FID of global index packing the pool ID + * and type (data or metadata) as well as + * the quota type (user or group). */ + union lquota_id qb_id; /* uid or gid or directory FID */ + __u32 qb_flags; /* see below */ + __u32 qb_padding; + __u64 qb_count; /* acquire/release count (kbytes/inodes) */ + __u64 qb_usage; /* current slave usage (kbytes/inodes) */ + __u64 qb_slv_ver; /* slave index file version */ + struct lustre_handle qb_lockh; /* per-ID lock handle */ + struct lustre_handle qb_glb_lockh; /* global lock handle */ + __u64 qb_padding1[4]; +}; + +/* When the quota_body is used in the reply of quota global intent + * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */ +#define qb_slv_fid qb_fid +/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in + * quota reply */ +#define qb_qunit qb_usage + +#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */ +#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */ +#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */ +#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */ + +extern void lustre_swab_quota_body(struct quota_body *b); + +/* Quota types currently supported */ +enum { + LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */ + LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */ + LQUOTA_TYPE_MAX }; +/* There are 2 different resource types on which a quota limit can be enforced: + * - inodes on the MDTs + * - blocks on the OSTs */ +enum { + LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */ + LQUOTA_RES_DT = 0x02, + LQUOTA_LAST_RES, + LQUOTA_FIRST_RES = LQUOTA_RES_MD +}; +#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1) + +/* + * Space accounting support + * Format of an accounting record, providing disk usage information for a given + * user or group + */ +struct lquota_acct_rec { /* 16 bytes */ + __u64 bspace; /* current space in use */ + __u64 ispace; /* current # inodes in use */ +}; + +/* + * Global quota index support + * Format of a global record, providing global quota settings for a given quota + * identifier + */ +struct lquota_glb_rec { /* 32 bytes */ + __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */ + __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */ + __u64 qbr_time; /* grace time, in seconds */ + __u64 qbr_granted; /* how much is granted to slaves, in #inodes or + * kbytes */ +}; + +/* + * Slave index support + * Format of a slave record, recording how much space is granted to a given + * slave + */ +struct lquota_slv_rec { /* 8 bytes */ + __u64 qsr_granted; /* space granted to the slave for the key=ID, + * in #inodes or kbytes */ +}; + +/* Data structures associated with the quota locks */ + +/* Glimpse descriptor used for the index & per-ID quota locks */ +struct ldlm_gl_lquota_desc { + union lquota_id gl_id; /* quota ID subject to the glimpse */ + __u64 gl_flags; /* see LQUOTA_FL* below */ + __u64 gl_ver; /* new index version */ + __u64 gl_hardlimit; /* new hardlimit or qunit value */ + __u64 gl_softlimit; /* new softlimit */ + __u64 gl_time; + __u64 gl_pad2; +}; +#define gl_qunit gl_hardlimit /* current qunit value used when + * glimpsing per-ID quota locks */ + +/* quota glimpse flags */ +#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */ + +/* LVB used with quota (global and per-ID) locks */ +struct lquota_lvb { + __u64 lvb_flags; /* see LQUOTA_FL* above */ + __u64 lvb_id_may_rel; /* space that might be released later */ + __u64 lvb_id_rel; /* space released by the slave for this ID */ + __u64 lvb_id_qunit; /* current qunit value */ + __u64 lvb_pad1; +}; + +extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb); + +/* LVB used with global quota lock */ +#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */ + +/* op codes */ +typedef enum { + QUOTA_DQACQ = 601, + QUOTA_DQREL = 602, + QUOTA_LAST_OPC +} quota_cmd_t; +#define QUOTA_FIRST_OPC QUOTA_DQACQ + /* * MDS REQ RECORDS */ /* opcodes */ typedef enum { - MDS_GETATTR = 33, - MDS_GETATTR_NAME = 34, - MDS_CLOSE = 35, - MDS_REINT = 36, - MDS_READPAGE = 37, - MDS_CONNECT = 38, - MDS_DISCONNECT = 39, - MDS_GETSTATUS = 40, - MDS_STATFS = 41, - MDS_PIN = 42, - MDS_UNPIN = 43, - MDS_SYNC = 44, - MDS_DONE_WRITING = 45, - MDS_SET_INFO = 46, - MDS_QUOTACHECK = 47, - MDS_QUOTACTL = 48, - MDS_GETXATTR = 49, - MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ - MDS_WRITEPAGE = 51, - MDS_IS_SUBDIR = 52, - MDS_GET_INFO = 53, - MDS_LAST_OPC + MDS_GETATTR = 33, + MDS_GETATTR_NAME = 34, + MDS_CLOSE = 35, + MDS_REINT = 36, + MDS_READPAGE = 37, + MDS_CONNECT = 38, + MDS_DISCONNECT = 39, + MDS_GETSTATUS = 40, + MDS_STATFS = 41, + MDS_PIN = 42, + MDS_UNPIN = 43, + MDS_SYNC = 44, + MDS_DONE_WRITING = 45, + MDS_SET_INFO = 46, + MDS_QUOTACHECK = 47, + MDS_QUOTACTL = 48, + MDS_GETXATTR = 49, + MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ + MDS_WRITEPAGE = 51, + MDS_IS_SUBDIR = 52, + MDS_GET_INFO = 53, + MDS_HSM_STATE_GET = 54, + MDS_HSM_STATE_SET = 55, + MDS_HSM_ACTION = 56, + MDS_HSM_PROGRESS = 57, + MDS_HSM_REQUEST = 58, + MDS_HSM_CT_REGISTER = 59, + MDS_HSM_CT_UNREGISTER = 60, + MDS_SWAP_LAYOUTS = 61, + MDS_LAST_OPC } mds_cmd_t; #define MDS_FIRST_OPC MDS_GETATTR + +/* opcodes for object update */ +typedef enum { + UPDATE_OBJ = 1000, + UPDATE_LAST_OPC +} update_cmd_t; + +#define UPDATE_FIRST_OPC UPDATE_OBJ + /* * Do not exceed 63 */ typedef enum { - REINT_SETATTR = 1, - REINT_CREATE = 2, - REINT_LINK = 3, - REINT_UNLINK = 4, - REINT_RENAME = 5, - REINT_OPEN = 6, - REINT_SETXATTR = 7, -// REINT_CLOSE = 8, + REINT_SETATTR = 1, + REINT_CREATE = 2, + REINT_LINK = 3, + REINT_UNLINK = 4, + REINT_RENAME = 5, + REINT_OPEN = 6, + REINT_SETXATTR = 7, + REINT_RMENTRY = 8, // REINT_WRITE = 9, REINT_MAX } mds_reint_t, mdt_reint_t; @@ -1635,17 +2113,17 @@ extern void lustre_swab_generic_32s (__u32 *val); #define DISP_ENQ_OPEN_REF 0x00800000 #define DISP_ENQ_CREATE_REF 0x01000000 #define DISP_OPEN_LOCK 0x02000000 +#define DISP_OPEN_LEASE 0x04000000 /* INODE LOCK PARTS */ #define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */ #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ +#define MDS_INODELOCK_PERM 0x000010 /* for permission */ +#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ -/* Do not forget to increase MDS_INODELOCK_MAXSHIFT when adding new bits - * XXX: MDS_INODELOCK_MAXSHIFT should be increased to 3 once the layout lock is - * supported */ -#define MDS_INODELOCK_MAXSHIFT 2 +#define MDS_INODELOCK_MAXSHIFT 5 /* This FULL lock is useful to take on unlink sort of operations */ #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1) @@ -1658,6 +2136,8 @@ enum { LUSTRE_RES_ID_SEQ_OFF = 0, LUSTRE_RES_ID_VER_OID_OFF = 1, LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */ + LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2, + LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3, LUSTRE_RES_ID_HSH_OFF = 3 }; @@ -1666,19 +2146,20 @@ enum { /* mdt_thread_info.mti_flags. */ enum md_op_flags { - /* The flag indicates Size-on-MDS attributes are changed. */ - MF_SOM_CHANGE = (1 << 0), - /* Flags indicates an epoch opens or closes. */ - MF_EPOCH_OPEN = (1 << 1), - MF_EPOCH_CLOSE = (1 << 2), - MF_MDC_CANCEL_FID1 = (1 << 3), - MF_MDC_CANCEL_FID2 = (1 << 4), - MF_MDC_CANCEL_FID3 = (1 << 5), - MF_MDC_CANCEL_FID4 = (1 << 6), - /* There is a pending attribute update. */ - MF_SOM_AU = (1 << 7), - /* Cancel OST locks while getattr OST attributes. */ - MF_GETATTR_LOCK = (1 << 8), + /* The flag indicates Size-on-MDS attributes are changed. */ + MF_SOM_CHANGE = (1 << 0), + /* Flags indicates an epoch opens or closes. */ + MF_EPOCH_OPEN = (1 << 1), + MF_EPOCH_CLOSE = (1 << 2), + MF_MDC_CANCEL_FID1 = (1 << 3), + MF_MDC_CANCEL_FID2 = (1 << 4), + MF_MDC_CANCEL_FID3 = (1 << 5), + MF_MDC_CANCEL_FID4 = (1 << 6), + /* There is a pending attribute update. */ + MF_SOM_AU = (1 << 7), + /* Cancel OST locks while getattr OST attributes. */ + MF_GETATTR_LOCK = (1 << 8), + MF_GET_MDT_IDX = (1 << 9), }; #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE) @@ -1723,6 +2204,11 @@ static inline int ll_inode_to_ext_flags(int iflags) } #endif +/* 64 possible states */ +enum md_transient_state { + MS_RESTORE = (1 << 0), /* restore is running */ +}; + struct mdt_body { struct lu_fid fid1; struct lu_fid fid2; @@ -1734,7 +2220,9 @@ struct mdt_body { obd_time ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ __u64 ioepoch; - __u64 ino; + __u64 t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -1744,7 +2232,7 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 generation; + __u32 unused2; /* was "generation" until 2.4.0 */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; @@ -1771,167 +2259,6 @@ struct mdt_ioepoch { extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b); -/* The lquota_id structure is an union of all the possible identifier types that - * can be used with quota, this includes: - * - 64-bit user ID - * - 64-bit group ID - * - a FID which can be used for per-directory quota in the future */ -union lquota_id { - struct lu_fid qid_fid; /* FID for per-directory quota */ - __u64 qid_uid; /* user identifier */ - __u64 qid_gid; /* group identifier */ -}; - -#define Q_QUOTACHECK 0x800100 -#define Q_INITQUOTA 0x800101 /* init slave limits */ -#define Q_GETOINFO 0x800102 /* get obd quota info */ -#define Q_GETOQUOTA 0x800103 /* get obd quotas */ -#define Q_FINVALIDATE 0x800104 /* invalidate operational quotas */ - -#define Q_TYPEMATCH(id, type) \ - ((id) == (type) || (id) == UGQUOTA) - -#define Q_TYPESET(oqc, type) Q_TYPEMATCH((oqc)->qc_type, type) - -#define Q_GETOCMD(oqc) \ - ((oqc)->qc_cmd == Q_GETOINFO || (oqc)->qc_cmd == Q_GETOQUOTA) - -#define QCTL_COPY(out, in) \ -do { \ - Q_COPY(out, in, qc_cmd); \ - Q_COPY(out, in, qc_type); \ - Q_COPY(out, in, qc_id); \ - Q_COPY(out, in, qc_stat); \ - Q_COPY(out, in, qc_dqinfo); \ - Q_COPY(out, in, qc_dqblk); \ -} while (0) - -struct obd_quotactl { - __u32 qc_cmd; - __u32 qc_type; - __u32 qc_id; - __u32 qc_stat; - struct obd_dqinfo qc_dqinfo; - struct obd_dqblk qc_dqblk; -}; - -extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); - -#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */ -#define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */ -#define QUOTA_DQACQ_FL_REL 0x4 /* release quota */ -#define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */ - -struct quota_body { - struct lu_fid qb_fid; /* FID of global index packing the pool ID - * and type (data or metadata) as well as - * the quota type (user or group). */ - union lquota_id qb_id; /* uid or gid or directory FID */ - __u32 qb_flags; /* see above */ - __u32 qb_padding; - __u64 qb_count; /* acquire/release count (kbytes/inodes) */ - __u64 qb_usage; /* current slave usage (kbytes/inodes) */ - __u64 qb_slv_ver; /* slave index file version */ - struct lustre_handle qb_lockh; /* per-ID lock handle */ - struct lustre_handle qb_glb_lockh; /* global lock handle */ - __u64 qb_padding1[4]; -}; - -/* When the quota_body is used in the reply of quota global intent - * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */ -#define qb_slv_fid qb_fid -/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in - * acquire reply */ -#define qb_qunit qb_usage - -extern void lustre_swab_quota_body(struct quota_body *b); - -struct quota_adjust_qunit { - __u32 qaq_flags; - __u32 qaq_id; - __u64 qaq_bunit_sz; - __u64 qaq_iunit_sz; - __u64 padding1; -}; -extern void lustre_swab_quota_adjust_qunit(struct quota_adjust_qunit *q); - -/* Quota types currently supported */ -enum { - LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */ - LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */ - LQUOTA_TYPE_MAX -}; - -/* There are 2 different resource types on which a quota limit can be enforced: - * - inodes on the MDTs - * - blocks on the OSTs */ -enum { - LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */ - LQUOTA_RES_DT = 0x02, - LQUOTA_LAST_RES, - LQUOTA_FIRST_RES = LQUOTA_RES_MD -}; -#define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1) - -/* - * Space accounting support - * Format of an accounting record, providing disk usage information for a given - * user or group - */ -struct lquota_acct_rec { /* 16 bytes */ - __u64 bspace; /* current space in use */ - __u64 ispace; /* current # inodes in use */ -}; - -/* - * Global quota index support - * Format of a global record, providing global quota settings for a given quota - * identifier - */ -struct lquota_glb_rec { /* 32 bytes */ - __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */ - __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */ - __u64 qbr_time; /* grace time, in seconds */ - __u64 qbr_granted; /* how much is granted to slaves, in #inodes or - * kbytes */ -}; - -/* - * Slave index support - * Format of a slave record, recording how much space is granted to a given - * slave - */ -struct lquota_slv_rec { /* 8 bytes */ - __u64 qsr_granted; /* space granted to the slave for the key=ID, - * in #inodes or kbytes */ -}; - -/* flags is shared among quota structures */ -#define LQUOTA_FLAGS_GRP 1UL /* 0 is user, 1 is group */ -#define LQUOTA_FLAGS_BLK 2UL /* 0 is inode, 1 is block */ -#define LQUOTA_FLAGS_ADJBLK 4UL /* adjust the block qunit size */ -#define LQUOTA_FLAGS_ADJINO 8UL /* adjust the inode qunit size */ -#define LQUOTA_FLAGS_CHG_QS 16UL /* indicate whether it has capability of - * OBD_CONNECT_CHANGE_QS */ -#define LQUOTA_FLAGS_RECOVERY 32UL /* recovery is going on a uid/gid */ -#define LQUOTA_FLAGS_SETQUOTA 64UL /* being setquota on a uid/gid */ - -/* flags is specific for quota_adjust_qunit */ -#define LQUOTA_QAQ_CREATE_LQS (1UL << 31) /* when it is set, need create lqs */ - -/* the status of lqs_flags in struct lustre_qunit_size */ -#define LQUOTA_QUNIT_FLAGS (LQUOTA_FLAGS_GRP | LQUOTA_FLAGS_BLK) - -#define QAQ_IS_GRP(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_GRP) -#define QAQ_IS_ADJBLK(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJBLK) -#define QAQ_IS_ADJINO(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJINO) -#define QAQ_IS_CREATE_LQS(qaq) ((qaq)->qaq_flags & LQUOTA_QAQ_CREATE_LQS) - -#define QAQ_SET_GRP(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_GRP) -#define QAQ_SET_ADJBLK(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJBLK) -#define QAQ_SET_ADJINO(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJINO) -#define QAQ_SET_CREATE_LQS(qaq) ((qaq)->qaq_flags |= LQUOTA_QAQ_CREATE_LQS) - /* permissions for md_perm.mp_perm */ enum { CFS_SETUID_PERM = 0x01, @@ -1978,7 +2305,7 @@ struct mdt_rec_setattr { obd_time sa_ctime; __u32 sa_attr_flags; __u32 sa_mode; - __u32 sa_padding_2; + __u32 sa_bias; /* some operation flags */ __u32 sa_padding_3; __u32 sa_padding_4; __u32 sa_padding_5; @@ -2041,17 +2368,20 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); * anymore, reserve this flags * just for preventing such bit * to be reused. */ -#define MDS_CREATE_RMT_ACL 01000000000 /* indicate create on remote server - * with default ACL */ -#define MDS_CREATE_SLAVE_OBJ 02000000000 /* indicate create slave object - * actually, this is for create, not - * conflict with other open flags */ + #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or * hsm restore) */ +#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created + unlinked */ +#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease + * delegation, succeed if it's not + * being opened with conflict mode. + */ +#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */ /* permission for create non-directory file */ #define MAY_CREATE (1 << 7) @@ -2070,17 +2400,20 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); /* lfs rgetfacl permission check */ #define MAY_RGETFACL (1 << 14) -enum { - MDS_CHECK_SPLIT = 1 << 0, - MDS_CROSS_REF = 1 << 1, - MDS_VTX_BYPASS = 1 << 2, - MDS_PERM_BYPASS = 1 << 3, - MDS_SOM = 1 << 4, - MDS_QUOTA_IGNORE = 1 << 5, - MDS_CLOSE_CLEANUP = 1 << 6, - MDS_KEEP_ORPHAN = 1 << 7, - MDS_RECOV_OPEN = 1 << 8, - MDS_UNLINK_DESTROY = 1 << 9, /* Destory ost object in mdd_unlink */ +enum mds_op_bias { + MDS_CHECK_SPLIT = 1 << 0, + MDS_CROSS_REF = 1 << 1, + MDS_VTX_BYPASS = 1 << 2, + MDS_PERM_BYPASS = 1 << 3, + MDS_SOM = 1 << 4, + MDS_QUOTA_IGNORE = 1 << 5, + /* Was MDS_CLOSE_CLEANUP (1 << 6), No more used */ + MDS_KEEP_ORPHAN = 1 << 7, + MDS_RECOV_OPEN = 1 << 8, + MDS_DATA_MODIFIED = 1 << 9, + MDS_CREATE_VOLATILE = 1 << 10, + MDS_OWNEROVERRIDE = 1 << 11, + MDS_HSM_RELEASE = 1 << 12, }; /* instance of mdt_reint_rec */ @@ -2109,7 +2442,7 @@ struct mdt_rec_create { * extend cr_flags size without breaking 1.8 compat */ __u32 cr_flags_l; /* for use with open, low 32 bits */ __u32 cr_flags_h; /* for use with open, high 32 bits */ - __u32 cr_padding_3; /* rr_padding_3 */ + __u32 cr_umask; /* umask for create */ __u32 cr_padding_4; /* rr_padding_4 */ }; @@ -2243,29 +2576,29 @@ struct mdt_rec_setxattr { * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also. */ struct mdt_rec_reint { - __u32 rr_opcode; - __u32 rr_cap; - __u32 rr_fsuid; - __u32 rr_fsuid_h; - __u32 rr_fsgid; - __u32 rr_fsgid_h; - __u32 rr_suppgid1; - __u32 rr_suppgid1_h; - __u32 rr_suppgid2; - __u32 rr_suppgid2_h; - struct lu_fid rr_fid1; - struct lu_fid rr_fid2; - obd_time rr_mtime; - obd_time rr_atime; - obd_time rr_ctime; - __u64 rr_size; - __u64 rr_blocks; - __u32 rr_bias; - __u32 rr_mode; - __u32 rr_flags; - __u32 rr_padding_2; /* also fix lustre_swab_mdt_rec_reint */ - __u32 rr_padding_3; /* also fix lustre_swab_mdt_rec_reint */ - __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ + __u32 rr_opcode; + __u32 rr_cap; + __u32 rr_fsuid; + __u32 rr_fsuid_h; + __u32 rr_fsgid; + __u32 rr_fsgid_h; + __u32 rr_suppgid1; + __u32 rr_suppgid1_h; + __u32 rr_suppgid2; + __u32 rr_suppgid2_h; + struct lu_fid rr_fid1; + struct lu_fid rr_fid2; + obd_time rr_mtime; + obd_time rr_atime; + obd_time rr_ctime; + __u64 rr_size; + __u64 rr_blocks; + __u32 rr_bias; + __u32 rr_mode; + __u32 rr_flags; + __u32 rr_flags_h; + __u32 rr_umask; + __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ }; extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); @@ -2375,6 +2708,10 @@ struct ldlm_res_id { __u64 name[RES_NAME_SIZE]; }; +#define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i +#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \ + (res)->lr_name.name[2], (res)->lr_name.name[3] + extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id); static inline int ldlm_res_eq(const struct ldlm_res_id *res0, @@ -2454,14 +2791,11 @@ typedef union { extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d); -/* Similarly to ldlm_wire_policy_data_t, there is one common swabber for all - * LVB types. As a result, any new LVB structure must match the fields of the - * ost_lvb structure. */ -union ldlm_wire_lvb { - struct ost_lvb l_ost; +union ldlm_gl_desc { + struct ldlm_gl_lquota_desc lquota_desc; }; -extern void lustre_swab_lvb(union ldlm_wire_lvb *); +extern void lustre_swab_gl_desc(union ldlm_gl_desc *); struct ldlm_intent { __u64 opc; @@ -2520,6 +2854,9 @@ struct ldlm_reply { extern void lustre_swab_ldlm_reply (struct ldlm_reply *r); +#define ldlm_flags_to_wire(flags) ((__u32)(flags)) +#define ldlm_flags_from_wire(flags) ((__u64)(flags)) + /* * Opcodes for mountconf (mgs and mgc) */ @@ -2631,8 +2968,7 @@ typedef enum { /** Identifier for a single log object */ struct llog_logid { - __u64 lgl_oid; - __u64 lgl_oseq; + struct ost_id lgl_oi; __u32 lgl_ogen; } __attribute__((packed)); @@ -2668,6 +3004,7 @@ typedef enum { /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */ CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000, CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000, + HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, } llog_op_type; @@ -2691,6 +3028,14 @@ struct llog_rec_tail { __u32 lrt_index; }; +/* Where data follow just after header */ +#define REC_DATA(ptr) \ + ((void *)((char *)ptr + sizeof(struct llog_rec_hdr))) + +#define REC_DATA_LEN(rec) \ + (rec->lrh_len - sizeof(struct llog_rec_hdr) - \ + sizeof(struct llog_rec_tail)) + struct llog_logid_rec { struct llog_rec_hdr lid_hdr; struct llog_logid lid_id; @@ -2720,8 +3065,7 @@ struct llog_unlink64_rec { struct llog_setattr64_rec { struct llog_rec_hdr lsr_hdr; - obd_id lsr_oid; - obd_seq lsr_oseq; + struct ost_id lsr_oi; __u32 lsr_uid; __u32 lsr_uid_h; __u32 lsr_gid; @@ -2780,6 +3124,52 @@ struct llog_changelog_user_rec { struct llog_rec_tail cur_tail; } __attribute__((packed)); +enum agent_req_status { + ARS_WAITING, + ARS_STARTED, + ARS_FAILED, + ARS_CANCELED, + ARS_SUCCEED, +}; + +static inline char *agent_req_status2name(enum agent_req_status ars) +{ + switch (ars) { + case ARS_WAITING: + return "WAITING"; + case ARS_STARTED: + return "STARTED"; + case ARS_FAILED: + return "FAILED"; + case ARS_CANCELED: + return "CANCELED"; + case ARS_SUCCEED: + return "SUCCEED"; + default: + return "UNKNOWN"; + } +} + +static inline bool agent_req_in_final_state(enum agent_req_status ars) +{ + return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) || + (ars == ARS_CANCELED)); +} + +struct llog_agent_req_rec { + struct llog_rec_hdr arr_hdr; /**< record header */ + __u32 arr_status; /**< status of the request */ + /* must match enum + * agent_req_status */ + __u32 arr_archive_id; /**< backend archive number */ + __u64 arr_flags; /**< req flags */ + __u64 arr_compound_id; /**< compound cookie */ + __u64 arr_req_create; /**< req. creation time */ + __u64 arr_req_change; /**< req. status change time */ + struct hsm_action_item arr_hai; /**< req. to the agent */ + struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ +} __attribute__((packed)); + /* Old llog gen for compatibility */ struct llog_gen { __u64 mnt_cnt; @@ -2870,7 +3260,7 @@ struct llogd_conn_body { /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { obd_valid o_valid; /* hot fields in this obdo */ - struct ost_id o_oi; + struct ost_id o_oi; obd_id o_parent_seq; obd_size o_size; /* o_size-o_blocks == ost_lvb */ obd_time o_mtime; @@ -2887,7 +3277,8 @@ struct obdo { obd_flag o_flags; obd_count o_nlink; /* brw: checksum */ obd_count o_parent_oid; - obd_count o_misc; /* brw: o_dropped */ + obd_count o_misc; /* brw: o_dropped */ + __u64 o_ioepoch; /* epoch in ost writes */ __u32 o_stripe_idx; /* holds stripe idx */ __u32 o_parent_ver; @@ -2895,47 +3286,67 @@ struct obdo { * locks */ struct llog_cookie o_lcookie; /* destroy: unlink cookie from * MDS */ - __u32 o_uid_h; - __u32 o_gid_h; + __u32 o_uid_h; + __u32 o_gid_h; - __u64 o_data_version; /* getattr: sum of iversion for - * each stripe. - * brw: grant space consumed on - * the client for the write */ - __u64 o_padding_4; - __u64 o_padding_5; - __u64 o_padding_6; + __u64 o_data_version; /* getattr: sum of iversion for + * each stripe. + * brw: grant space consumed on + * the client for the write */ + __u64 o_padding_4; + __u64 o_padding_5; + __u64 o_padding_6; }; -#define o_id o_oi.oi_id -#define o_seq o_oi.oi_seq #define o_dirty o_blocks #define o_undirty o_mode #define o_dropped o_misc #define o_cksum o_nlink #define o_grant_used o_data_version -static inline void lustre_set_wire_obdo(struct obdo *wobdo, struct obdo *lobdo) -{ - memcpy(wobdo, lobdo, sizeof(*lobdo)); - wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; +static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, + struct obdo *wobdo, + const struct obdo *lobdo) +{ + *wobdo = *lobdo; + wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + if (ocd == NULL) + return; + + if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && + fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { + /* Currently OBD_FL_OSTID will only be used when 2.4 echo + * client communicate with pre-2.4 server */ + wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); + wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); + } } -static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo) +static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, + struct obdo *lobdo, + const struct obdo *wobdo) { obd_flag local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; - LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK)); - - memcpy(lobdo, wobdo, sizeof(*lobdo)); - if (local_flags != 0) { - lobdo->o_valid |= OBD_MD_FLFLAGS; - lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - lobdo->o_flags |= local_flags; - } + *lobdo = *wobdo; + if (local_flags != 0) { + lobdo->o_valid |= OBD_MD_FLFLAGS; + lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + lobdo->o_flags |= local_flags; + } + if (ocd == NULL) + return; + + if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && + fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) { + /* see above */ + lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq; + lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id; + lobdo->o_oi.oi_fid.f_ver = 0; + } } extern void lustre_swab_obdo (struct obdo *o); @@ -2967,6 +3378,7 @@ extern void lustre_swab_llogd_body (struct llogd_body *d); extern void lustre_swab_llog_hdr (struct llog_log_hdr *h); extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d); extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec); +extern void lustre_swab_llog_id(struct llog_logid *lid); struct lustre_cfg; extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); @@ -3053,6 +3465,7 @@ struct lu_idxpage { * For the time being, we only support fixed-size key & record. */ char lip_entries[0]; }; +extern void lustre_swab_lip_header(struct lu_idxpage *lip); #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries)) @@ -3063,67 +3476,6 @@ union lu_page { char lp_array[LU_PAGE_SIZE]; }; -/* this will be used when OBD_CONNECT_CHANGE_QS is set */ -struct qunit_data { - /** - * ID appiles to (uid, gid) - */ - __u32 qd_id; - /** - * LQUOTA_FLAGS_* affect the responding bits - */ - __u32 qd_flags; - /** - * acquire/release count (bytes for block quota) - */ - __u64 qd_count; - /** - * when a master returns the reply to a slave, it will - * contain the current corresponding qunit size - */ - __u64 qd_qunit; - __u64 padding; -}; - -#define QDATA_IS_GRP(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_GRP) -#define QDATA_IS_BLK(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_BLK) -#define QDATA_IS_ADJBLK(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_ADJBLK) -#define QDATA_IS_ADJINO(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_ADJINO) -#define QDATA_IS_CHANGE_QS(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_CHG_QS) - -#define QDATA_SET_GRP(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_GRP) -#define QDATA_SET_BLK(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_BLK) -#define QDATA_SET_ADJBLK(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_ADJBLK) -#define QDATA_SET_ADJINO(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_ADJINO) -#define QDATA_SET_CHANGE_QS(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_CHG_QS) - -#define QDATA_CLR_GRP(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_GRP) -#define QDATA_CLR_CHANGE_QS(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_CHG_QS) - -extern void lustre_swab_qdata(struct qunit_data *d); -extern struct qunit_data *quota_get_qdata(void *req, int is_req, int is_exp); -extern int quota_copy_qdata(void *request, struct qunit_data *qdata, - int is_req, int is_exp); - -typedef enum { - QUOTA_DQACQ = 601, - QUOTA_DQREL = 602, - QUOTA_LAST_OPC -} quota_cmd_t; -#define QUOTA_FIRST_OPC QUOTA_DQACQ - -#define QUOTA_REQUEST 1 -#define QUOTA_REPLY 0 -#define QUOTA_EXPORT 1 -#define QUOTA_IMPORT 0 - -/* quota check function */ -#define QUOTA_RET_OK 0 /**< return successfully */ -#define QUOTA_RET_NOQUOTA 1 /**< not support quota */ -#define QUOTA_RET_NOLIMIT 2 /**< quota limit isn't set */ -#define QUOTA_RET_ACQUOTA 4 /**< need to acquire extra quota */ - - /* security opcodes */ typedef enum { SEC_CTX_INIT = 801, @@ -3245,6 +3597,154 @@ struct getinfo_fid2path { void lustre_swab_fid2path (struct getinfo_fid2path *gf); +enum { + LAYOUT_INTENT_ACCESS = 0, + LAYOUT_INTENT_READ = 1, + LAYOUT_INTENT_WRITE = 2, + LAYOUT_INTENT_GLIMPSE = 3, + LAYOUT_INTENT_TRUNC = 4, + LAYOUT_INTENT_RELEASE = 5, + LAYOUT_INTENT_RESTORE = 6 +}; + +/* enqueue layout lock with intent */ +struct layout_intent { + __u32 li_opc; /* intent operation for enqueue, read, write etc */ + __u32 li_flags; + __u64 li_start; + __u64 li_end; +}; + +void lustre_swab_layout_intent(struct layout_intent *li); + +/** + * On the wire version of hsm_progress structure. + * + * Contains the userspace hsm_progress and some internal fields. + */ +struct hsm_progress_kernel { + /* Field taken from struct hsm_progress */ + lustre_fid hpk_fid; + __u64 hpk_cookie; + struct hsm_extent hpk_extent; + __u16 hpk_flags; + __u16 hpk_errval; /* positive val */ + __u32 hpk_padding1; + /* Additional fields */ + __u64 hpk_data_version; + __u64 hpk_padding2; +} __attribute__((packed)); + +extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus); +extern void lustre_swab_hsm_current_action(struct hsm_current_action *action); +extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk); +extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus); +extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui); +extern void lustre_swab_hsm_request(struct hsm_request *hr); + +/** + * These are object update opcode under UPDATE_OBJ, which is currently + * being used by cross-ref operations between MDT. + * + * During the cross-ref operation, the Master MDT, which the client send the + * request to, will disassembly the operation into object updates, then OSP + * will send these updates to the remote MDT to be executed. + * + * Update request format + * magic: UPDATE_BUFFER_MAGIC_V1 + * Count: How many updates in the req. + * bufs[0] : following are packets of object. + * update[0]: + * type: object_update_op, the op code of update + * fid: The object fid of the update. + * lens/bufs: other parameters of the update. + * update[1]: + * type: object_update_op, the op code of update + * fid: The object fid of the update. + * lens/bufs: other parameters of the update. + * .......... + * update[7]: type: object_update_op, the op code of update + * fid: The object fid of the update. + * lens/bufs: other parameters of the update. + * Current 8 maxim updates per object update request. + * + ******************************************************************* + * update reply format: + * + * ur_version: UPDATE_REPLY_V1 + * ur_count: The count of the reply, which is usually equal + * to the number of updates in the request. + * ur_lens: The reply lengths of each object update. + * + * replies: 1st update reply [4bytes_ret: other body] + * 2nd update reply [4bytes_ret: other body] + * ..... + * nth update reply [4bytes_ret: other body] + * + * For each reply of the update, the format would be + * result(4 bytes):Other stuff + */ + +#define UPDATE_MAX_OPS 10 +#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 +#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 +#define UPDATE_BUF_COUNT 8 +enum object_update_op { + OBJ_CREATE = 1, + OBJ_DESTROY = 2, + OBJ_REF_ADD = 3, + OBJ_REF_DEL = 4, + OBJ_ATTR_SET = 5, + OBJ_ATTR_GET = 6, + OBJ_XATTR_SET = 7, + OBJ_XATTR_GET = 8, + OBJ_INDEX_LOOKUP = 9, + OBJ_INDEX_INSERT = 10, + OBJ_INDEX_DELETE = 11, + OBJ_LAST +}; + +struct update { + __u32 u_type; + __u32 u_batchid; + struct lu_fid u_fid; + __u32 u_lens[UPDATE_BUF_COUNT]; + __u32 u_bufs[0]; +}; + +struct update_buf { + __u32 ub_magic; + __u32 ub_count; + __u32 ub_bufs[0]; +}; + +#define UPDATE_REPLY_V1 0x00BD0001 +struct update_reply { + __u32 ur_version; + __u32 ur_count; + __u32 ur_lens[0]; +}; + +void lustre_swab_update_buf(struct update_buf *ub); +void lustre_swab_update_reply_buf(struct update_reply *ur); + +/** layout swap request structure + * fid1 and fid2 are in mdt_body + */ +struct mdc_swap_layouts { + __u64 msl_flags; +} __packed; + +void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl); + +struct close_data { + struct lustre_handle cd_handle; + struct lu_fid cd_fid; + __u64 cd_data_version; + __u64 cd_reserved[8]; +}; + +void lustre_swab_close_data(struct close_data *data); #endif /** @} lustreidl */