X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=7e82994d1559d986f0aa06467f900dfe3b97598d;hp=efcca73d2651e562bb701183ce0b26fe6d54527b;hb=2bc5bcb7efa247fcd8cc65d013ffc9f6c33dd788;hpb=57d8d7f152b1a1cabc103193f859bd927b3fbe9c diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index efcca73..7e82994 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2013, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -91,12 +91,11 @@ #ifndef _LUSTRE_IDL_H_ #define _LUSTRE_IDL_H_ -#if !defined(LASSERT) && !defined(LPU64) -#include /* for LASSERT, LPUX64, etc */ -#endif - -/* Defn's shared with user-space. */ -#include +#include /* for LPUX64, etc */ +#include +#include /* Defn's shared with user-space. */ +#include +#include /* * GENERAL STUFF @@ -129,8 +128,7 @@ //#define PTLBD_BULK_PORTAL 21 #define MDS_SETATTR_PORTAL 22 #define MDS_READPAGE_PORTAL 23 -#define MDS_MDS_PORTAL 24 - +#define OUT_PORTAL 24 #define MGC_REPLY_PORTAL 25 #define MGS_REQUEST_PORTAL 26 #define MGS_REPLY_PORTAL 27 @@ -166,8 +164,7 @@ #define LUSTRE_LOG_VERSION 0x00050000 #define LUSTRE_MGS_VERSION 0x00060000 -typedef __u32 mdsno_t; -typedef __u64 seqno_t; +/* TODO: All obd_* typedefs will be removed in last patch in series */ typedef __u64 obd_id; typedef __u64 obd_seq; typedef __s64 obd_time; @@ -195,6 +192,12 @@ struct lu_seq_range { __u32 lsr_flags; }; +struct lu_seq_range_array { + __u32 lsra_count; + __u32 lsra_padding; + struct lu_seq_range lsra_lsr[0]; +}; + #define LU_SEQ_RANGE_MDT 0x0 #define LU_SEQ_RANGE_OST 0x1 #define LU_SEQ_RANGE_ANY 0x3 @@ -206,12 +209,12 @@ static inline unsigned fld_range_type(const struct lu_seq_range *range) return range->lsr_flags & LU_SEQ_RANGE_MASK; } -static inline int fld_range_is_ost(const struct lu_seq_range *range) +static inline bool fld_range_is_ost(const struct lu_seq_range *range) { return fld_range_type(range) == LU_SEQ_RANGE_OST; } -static inline int fld_range_is_mdt(const struct lu_seq_range *range) +static inline bool fld_range_is_mdt(const struct lu_seq_range *range) { return fld_range_type(range) == LU_SEQ_RANGE_MDT; } @@ -230,7 +233,6 @@ static inline unsigned fld_range_is_any(const struct lu_seq_range *range) static inline void fld_range_set_type(struct lu_seq_range *range, unsigned flags) { - LASSERT(!(flags & ~LU_SEQ_RANGE_MASK)); range->lsr_flags |= flags; } @@ -255,7 +257,7 @@ static inline void fld_range_set_any(struct lu_seq_range *range) static inline __u64 range_space(const struct lu_seq_range *range) { - return range->lsr_end - range->lsr_start; + return range->lsr_end - range->lsr_start; } /** @@ -264,33 +266,32 @@ static inline __u64 range_space(const struct lu_seq_range *range) static inline void range_init(struct lu_seq_range *range) { - range->lsr_start = range->lsr_end = range->lsr_index = 0; + memset(range, 0, sizeof(*range)); } /** * check if given seq id \a s is within given range \a r */ -static inline int range_within(const struct lu_seq_range *range, - __u64 s) +static inline bool range_within(const struct lu_seq_range *range, + __u64 s) { - return s >= range->lsr_start && s < range->lsr_end; + return s >= range->lsr_start && s < range->lsr_end; } -static inline int range_is_sane(const struct lu_seq_range *range) +static inline bool range_is_sane(const struct lu_seq_range *range) { - return (range->lsr_end >= range->lsr_start); + return range->lsr_end >= range->lsr_start; } -static inline int range_is_zero(const struct lu_seq_range *range) +static inline bool range_is_zero(const struct lu_seq_range *range) { - return (range->lsr_start == 0 && range->lsr_end == 0); + return range->lsr_start == 0 && range->lsr_end == 0; } -static inline int range_is_exhausted(const struct lu_seq_range *range) - +static inline bool range_is_exhausted(const struct lu_seq_range *range) { - return range_space(range) == 0; + return range_space(range) == 0; } /* return 0 if two range have the same location */ @@ -319,8 +320,11 @@ static inline int range_compare_loc(const struct lu_seq_range *r1, * xattr. */ enum lma_compat { - LMAC_HSM = 0x00000001, - LMAC_SOM = 0x00000002, + LMAC_HSM = 0x00000001, + LMAC_SOM = 0x00000002, + LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */ + LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is + * under /O//d. */ }; /** @@ -329,16 +333,18 @@ enum lma_compat { * This information is stored in lustre_mdt_attrs::lma_incompat. */ enum lma_incompat { - LMAI_RELEASED = 0x0000001, /* file is released */ - LMAI_AGENT = 0x00000002, /* agent inode */ - LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object - is on the remote MDT */ + LMAI_RELEASED = 0x00000001, /* file is released */ + LMAI_AGENT = 0x00000002, /* agent inode */ + LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object + is on the remote MDT */ + LMAI_STRIPED = 0x00000008, /* striped directory inode */ }; -#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) +#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT | LMAI_STRIPED) extern void lustre_lma_swab(struct lustre_mdt_attrs *lma); extern void lustre_lma_init(struct lustre_mdt_attrs *lma, - const struct lu_fid *fid, __u32 incompat); + const struct lu_fid *fid, + __u32 compat, __u32 incompat); /** * SOM on-disk attributes stored in a separate xattr. */ @@ -412,7 +418,7 @@ static inline void fid_zero(struct lu_fid *fid) memset(fid, 0, sizeof(*fid)); } -static inline obd_id fid_ver_oid(const struct lu_fid *fid) +static inline __u64 fid_ver_oid(const struct lu_fid *fid) { return ((__u64)fid_ver(fid) << 32 | fid_oid(fid)); } @@ -453,6 +459,7 @@ enum fid_seq { FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */ + FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL, FID_SEQ_NORMAL = 0x200000400ULL, FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL }; @@ -472,75 +479,76 @@ enum special_oid { /** OID for FID_SEQ_DOT_LUSTRE */ enum dot_lustre_oid { - FID_OID_DOT_LUSTRE = 1UL, - FID_OID_DOT_LUSTRE_OBF = 2UL, + FID_OID_DOT_LUSTRE = 1UL, + FID_OID_DOT_LUSTRE_OBF = 2UL, + FID_OID_DOT_LUSTRE_LPF = 3UL, }; -static inline int fid_seq_is_mdt0(obd_seq seq) +static inline bool fid_seq_is_mdt0(__u64 seq) { - return (seq == FID_SEQ_OST_MDT0); + return seq == FID_SEQ_OST_MDT0; } -static inline int fid_seq_is_mdt(const __u64 seq) +static inline bool fid_seq_is_mdt(__u64 seq) { return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL; }; -static inline int fid_seq_is_echo(obd_seq seq) +static inline bool fid_seq_is_echo(__u64 seq) { - return (seq == FID_SEQ_ECHO); + return seq == FID_SEQ_ECHO; } -static inline int fid_is_echo(const struct lu_fid *fid) +static inline bool fid_is_echo(const struct lu_fid *fid) { return fid_seq_is_echo(fid_seq(fid)); } -static inline int fid_seq_is_llog(obd_seq seq) +static inline bool fid_seq_is_llog(__u64 seq) { - return (seq == FID_SEQ_LLOG); + return seq == FID_SEQ_LLOG; } -static inline int fid_is_llog(const struct lu_fid *fid) +static inline bool fid_is_llog(const struct lu_fid *fid) { /* file with OID == 0 is not llog but contains last oid */ return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0; } -static inline int fid_seq_is_rsvd(const __u64 seq) +static inline bool fid_seq_is_rsvd(__u64 seq) { - return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD); + return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD; }; -static inline int fid_seq_is_special(const __u64 seq) +static inline bool fid_seq_is_special(__u64 seq) { return seq == FID_SEQ_SPECIAL; }; -static inline int fid_seq_is_local_file(const __u64 seq) +static inline bool fid_seq_is_local_file(__u64 seq) { return seq == FID_SEQ_LOCAL_FILE || seq == FID_SEQ_LOCAL_NAME; }; -static inline int fid_seq_is_root(const __u64 seq) +static inline bool fid_seq_is_root(__u64 seq) { return seq == FID_SEQ_ROOT; } -static inline int fid_seq_is_dot(const __u64 seq) +static inline bool fid_seq_is_dot(__u64 seq) { return seq == FID_SEQ_DOT_LUSTRE; } -static inline int fid_seq_is_default(const __u64 seq) +static inline bool fid_seq_is_default(__u64 seq) { return seq == FID_SEQ_LOV_DEFAULT; } -static inline int fid_is_mdt0(const struct lu_fid *fid) +static inline bool fid_is_mdt0(const struct lu_fid *fid) { - return fid_seq_is_mdt0(fid_seq(fid)); + return fid_seq_is_mdt0(fid_seq(fid)); } static inline void lu_root_fid(struct lu_fid *fid) @@ -555,14 +563,14 @@ static inline void lu_root_fid(struct lu_fid *fid) * \param fid the fid to be tested. * \return true if the fid is a igif; otherwise false. */ -static inline int fid_seq_is_igif(const __u64 seq) +static inline bool fid_seq_is_igif(__u64 seq) { - return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; + return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; } -static inline int fid_is_igif(const struct lu_fid *fid) +static inline bool fid_is_igif(const struct lu_fid *fid) { - return fid_seq_is_igif(fid_seq(fid)); + return fid_seq_is_igif(fid_seq(fid)); } /** @@ -570,57 +578,66 @@ static inline int fid_is_igif(const struct lu_fid *fid) * \param fid the fid to be tested. * \return true if the fid is a idif; otherwise false. */ -static inline int fid_seq_is_idif(const __u64 seq) +static inline bool fid_seq_is_idif(__u64 seq) { - return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; + return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; } -static inline int fid_is_idif(const struct lu_fid *fid) +static inline bool fid_is_idif(const struct lu_fid *fid) { - return fid_seq_is_idif(fid_seq(fid)); + return fid_seq_is_idif(fid_seq(fid)); } -static inline int fid_is_local_file(const struct lu_fid *fid) +static inline bool fid_is_local_file(const struct lu_fid *fid) { return fid_seq_is_local_file(fid_seq(fid)); } -static inline int fid_seq_is_norm(const __u64 seq) +static inline bool fid_seq_is_norm(__u64 seq) +{ + return (seq >= FID_SEQ_NORMAL); +} + +static inline bool fid_is_norm(const struct lu_fid *fid) { - return (seq >= FID_SEQ_NORMAL); + return fid_seq_is_norm(fid_seq(fid)); } -static inline int fid_is_norm(const struct lu_fid *fid) +static inline int fid_is_layout_rbtree(const struct lu_fid *fid) { - return fid_seq_is_norm(fid_seq(fid)); + return fid_seq(fid) == FID_SEQ_LAYOUT_RBTREE; } /* convert an OST objid into an IDIF FID SEQ number */ -static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx) +static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx) { - return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); + return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); } /* convert a packed IDIF FID into an OST objid */ -static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver) +static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver) { - return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; + return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; +} + +static inline __u32 idif_ost_idx(__u64 seq) +{ + return (seq >> 16) & 0xffff; } /* extract ost index from IDIF FID */ static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid) { - LASSERT(fid_is_idif(fid)); - return (fid_seq(fid) >> 16) & 0xffff; + return idif_ost_idx(fid_seq(fid)); } /* extract OST sequence (group) from a wire ost_id (id/seq) pair */ -static inline obd_seq ostid_seq(const struct ost_id *ostid) +static inline __u64 ostid_seq(const struct ost_id *ostid) { if (fid_seq_is_mdt0(ostid->oi.oi_seq)) return FID_SEQ_OST_MDT0; - if (fid_seq_is_default(ostid->oi.oi_seq)) + if (unlikely(fid_seq_is_default(ostid->oi.oi_seq))) return FID_SEQ_LOV_DEFAULT; if (fid_is_idif(&ostid->oi_fid)) @@ -630,11 +647,14 @@ static inline obd_seq ostid_seq(const struct ost_id *ostid) } /* extract OST objid from a wire ost_id (id/seq) pair */ -static inline obd_id ostid_id(const struct ost_id *ostid) +static inline __u64 ostid_id(const struct ost_id *ostid) { - if (fid_seq_is_mdt0(ostid_seq(ostid))) + if (fid_seq_is_mdt0(ostid->oi.oi_seq)) return ostid->oi.oi_id & IDIF_OID_MASK; + if (unlikely(fid_seq_is_default(ostid->oi.oi_seq))) + return ostid->oi.oi_id; + if (fid_is_idif(&ostid->oi_fid)) return fid_idif_id(fid_seq(&ostid->oi_fid), fid_oid(&ostid->oi_fid), 0); @@ -677,13 +697,23 @@ static inline void ostid_set_seq_llog(struct ost_id *oi) */ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { - if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (fid_seq_is_mdt0(oi->oi.oi_seq)) { if (oid >= IDIF_MAX_OID) { CERROR("Bad "LPU64" to set "DOSTID"\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; + } else if (fid_is_idif(&oi->oi_fid)) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi_fid.f_seq = fid_idif_seq(oid, + fid_idif_ost_idx(&oi->oi_fid)); + oi->oi_fid.f_oid = oid; + oi->oi_fid.f_ver = oid >> 48; } else { if (oid > OBIF_MAX_OID) { CERROR("Bad "LPU64" to set "DOSTID"\n", @@ -694,25 +724,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) } } -static inline void ostid_inc_id(struct ost_id *oi) +static inline int fid_set_id(struct lu_fid *fid, __u64 oid) { - if (fid_seq_is_mdt0(ostid_seq(oi))) { - if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) { - CERROR("Bad inc "DOSTID"\n", POSTID(oi)); - return; + if (unlikely(fid_seq_is_igif(fid->f_seq))) { + CERROR("bad IGIF, "DFID"\n", PFID(fid)); + return -EBADF; + } + + if (fid_is_idif(fid)) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DFID"\n", + oid, PFID(fid)); + return -EBADF; } - oi->oi.oi_id++; + fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid)); + fid->f_oid = oid; + fid->f_ver = oid >> 48; } else { - oi->oi_fid.f_oid++; + if (oid > OBIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DFID"\n", + oid, PFID(fid)); + return -EBADF; + } + fid->f_oid = oid; } -} - -static inline void ostid_dec_id(struct ost_id *oi) -{ - if (fid_seq_is_mdt0(ostid_seq(oi))) - oi->oi.oi_id--; - else - oi->oi_fid.f_oid--; + return 0; } /** @@ -724,45 +760,50 @@ static inline void ostid_dec_id(struct ost_id *oi) * struct lu_fid fields without loss. For reference see: * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs */ -static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, +static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid, __u32 ost_idx) { + __u64 seq = ostid_seq(ostid); + if (ost_idx > 0xffff) { CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid), ost_idx); return -EBADF; } - if (fid_seq_is_mdt0(ostid_seq(ostid))) { + if (fid_seq_is_mdt0(seq)) { + __u64 oid = ostid_id(ostid); + /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates * of 1M objects/s/OST for 9 years, or combinations thereof. */ - if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + if (oid >= IDIF_MAX_OID) { + CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } - fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); + fid->f_seq = fid_idif_seq(oid, ost_idx); /* truncate to 32 bits by assignment */ - fid->f_oid = ostid_id(ostid); + fid->f_oid = oid; /* in theory, not currently used */ - fid->f_ver = ostid_id(ostid) >> 48; - } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { - /* This is either an IDIF object, which identifies objects across - * all OSTs, or a regular FID. The IDIF namespace maps legacy - * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + fid->f_ver = oid >> 48; + } else if (likely(!fid_seq_is_default(seq))) + /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { + /* This is either an IDIF object, which identifies objects across + * all OSTs, or a regular FID. The IDIF namespace maps legacy + * OST objects into the FID namespace. In both cases, we just + * pass the FID through, no conversion needed. */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n", POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; } - return 0; + return 0; } /* pack any OST FID into an ostid (id/seq) for the wire/disk */ @@ -785,9 +826,9 @@ static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid) } /* Check whether the fid is for LAST_ID */ -static inline int fid_is_last_id(const struct lu_fid *fid) +static inline bool fid_is_last_id(const struct lu_fid *fid) { - return (fid_oid(fid) == 0); + return fid_oid(fid) == 0; } /** @@ -828,10 +869,6 @@ static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen) */ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); dst->f_seq = cpu_to_le64(fid_seq(src)); dst->f_oid = cpu_to_le32(fid_oid(src)); dst->f_ver = cpu_to_le32(fid_ver(src)); @@ -839,10 +876,6 @@ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src) static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); dst->f_seq = le64_to_cpu(fid_seq(src)); dst->f_oid = le32_to_cpu(fid_oid(src)); dst->f_ver = le32_to_cpu(fid_ver(src)); @@ -850,10 +883,6 @@ static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src) static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); dst->f_seq = cpu_to_be64(fid_seq(src)); dst->f_oid = cpu_to_be32(fid_oid(src)); dst->f_ver = cpu_to_be32(fid_ver(src)); @@ -861,16 +890,12 @@ static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src) static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) { - /* check that all fields are converted */ - CLASSERT(sizeof *src == - sizeof fid_seq(src) + - sizeof fid_oid(src) + sizeof fid_ver(src)); dst->f_seq = be64_to_cpu(fid_seq(src)); dst->f_oid = be32_to_cpu(fid_oid(src)); dst->f_ver = be32_to_cpu(fid_ver(src)); } -static inline int fid_is_sane(const struct lu_fid *fid) +static inline bool fid_is_sane(const struct lu_fid *fid) { return fid != NULL && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || @@ -878,19 +903,11 @@ static inline int fid_is_sane(const struct lu_fid *fid) fid_seq_is_rsvd(fid_seq(fid))); } -static inline int fid_is_zero(const struct lu_fid *fid) -{ - return fid_seq(fid) == 0 && fid_oid(fid) == 0; -} - extern void lustre_swab_lu_fid(struct lu_fid *fid); extern void lustre_swab_lu_seq_range(struct lu_seq_range *range); -static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) +static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { - /* Check that there is no alignment padding. */ - CLASSERT(sizeof *f0 == - sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver); return memcmp(f0, f1, sizeof *f0) == 0; } @@ -911,10 +928,10 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, __diff_normalize(fid_ver(f0), fid_ver(f1)); } -static inline void ostid_cpu_to_le(struct ost_id *src_oi, +static inline void ostid_cpu_to_le(const struct ost_id *src_oi, struct ost_id *dst_oi) { - if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); } else { @@ -922,10 +939,10 @@ static inline void ostid_cpu_to_le(struct ost_id *src_oi, } } -static inline void ostid_le_to_cpu(struct ost_id *src_oi, +static inline void ostid_le_to_cpu(const struct ost_id *src_oi, struct ost_id *dst_oi) { - if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); } else { @@ -933,6 +950,20 @@ static inline void ostid_le_to_cpu(struct ost_id *src_oi, } } +struct lu_orphan_rec { + /* The MDT-object's FID referenced by the orphan OST-object */ + struct lu_fid lor_fid; + __u32 lor_uid; + __u32 lor_gid; +}; + +struct lu_orphan_ent { + /* The orphan OST-object's FID */ + struct lu_fid loe_key; + struct lu_orphan_rec loe_rec; +}; +void lustre_swab_orphan_ent(struct lu_orphan_ent *ent); + /** @} lu_fid */ /** \defgroup lu_dir lu_dir @@ -1056,12 +1087,12 @@ static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent) return next; } -static inline int lu_dirent_calc_size(int namelen, __u16 attr) +static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr) { - int size; + size_t size; - if (attr & LUDA_TYPE) { - const unsigned align = sizeof(struct luda_type) - 1; + if (attr & LUDA_TYPE) { + const size_t align = sizeof(struct luda_type) - 1; size = (sizeof(struct lu_dirent) + namelen + align) & ~align; size += sizeof(struct luda_type); } else @@ -1070,31 +1101,22 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr) return (size + 7) & ~7; } -static inline int lu_dirent_size(struct lu_dirent *ent) -{ - if (le16_to_cpu(ent->lde_reclen) == 0) { - return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen), - le32_to_cpu(ent->lde_attrs)); - } - return le16_to_cpu(ent->lde_reclen); -} - #define MDS_DIR_END_OFF 0xfffffffffffffffeULL /** * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than CFS_PAGE_SIZE because the client needs to + * It's different than PAGE_CACHE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server CFS_PAGE_SIZE differ. + * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (CFS_PAGE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ @@ -1103,21 +1125,21 @@ struct lustre_handle { }; #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL -static inline int lustre_handle_is_used(struct lustre_handle *lh) +static inline bool lustre_handle_is_used(const struct lustre_handle *lh) { - return lh->cookie != 0ull; + return lh->cookie != 0; } -static inline int lustre_handle_equal(const struct lustre_handle *lh1, - const struct lustre_handle *lh2) +static inline bool lustre_handle_equal(const struct lustre_handle *lh1, + const struct lustre_handle *lh2) { - return lh1->cookie == lh2->cookie; + return lh1->cookie == lh2->cookie; } static inline void lustre_handle_copy(struct lustre_handle *tgt, - struct lustre_handle *src) + const struct lustre_handle *src) { - tgt->cookie = src->cookie; + tgt->cookie = src->cookie; } /* flags for lm_flags */ @@ -1141,7 +1163,6 @@ struct lustre_msg_v2 { /* without gss, ptlrpc_body is put at the first buffer. */ #define PTLRPC_NUM_VERSIONS 4 -#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */ struct ptlrpc_body_v3 { struct lustre_handle pb_handle; __u32 pb_type; @@ -1163,7 +1184,7 @@ struct ptlrpc_body_v3 { __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; /* padding for future needs */ __u64 pb_padding[4]; - char pb_jobid[JOBSTATS_JOBID_SIZE]; + char pb_jobid[LUSTRE_JOBID_SIZE]; }; #define ptlrpc_body ptlrpc_body_v3 @@ -1308,6 +1329,14 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */ #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */ #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */ +#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* improved flock deadlock detection */ +#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/* create stripe disposition*/ +#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack + name in request */ +#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */ +#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */ +#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */ + /* XXX README XXX: * Please DO NOT add flag values here before first ensuring that this same * flag value is not in use on some other branch. Please clear any such @@ -1349,7 +1378,12 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_EINPROGRESS | \ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \ OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ - OBD_CONNECT_PINGLESS) + OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ + OBD_CONNECT_FLOCK_DEAD | \ + OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | \ + OBD_CONNECT_OPEN_BY_FID | \ + OBD_CONNECT_DIR_STRIPE) + #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ @@ -1366,7 +1400,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_JOBSTATS | \ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) + OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK) #define ECHO_CONNECT_SUPPORTED (0) #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ @@ -1376,13 +1410,6 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) -#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\ - ((patch)<<8) + (fix)) -#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255) -#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255) -#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255) -#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255) - /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we @@ -1518,6 +1545,8 @@ enum obdo_flags { * clients prior than 2.2 */ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ + OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */ + OBD_FL_SHORT_IO = 0x00400000, /* short io request */ /* Note that while these checksum values are currently separate bits, * in 2.x we can actually allow all values from 1-31 if we wanted. */ @@ -1528,10 +1557,23 @@ enum obdo_flags { OBD_FL_LOCAL_MASK = 0xF0000000, }; -#define LOV_MAGIC_V1 0x0BD10BD0 -#define LOV_MAGIC LOV_MAGIC_V1 -#define LOV_MAGIC_JOIN_V1 0x0BD20BD0 -#define LOV_MAGIC_V3 0x0BD30BD0 +/* + * All LOV EA magics should have the same postfix, if some new version + * Lustre instroduces new LOV EA magic, then when down-grade to an old + * Lustre, even though the old version system does not recognizes such + * new magic, it still can distinguish the corrupted cases by checking + * the magic's postfix. + */ +#define LOV_MAGIC_MAGIC 0x0BD0 +#define LOV_MAGIC_MASK 0xFFFF + +#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC) +/* reserved for specifying OSTs */ +#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC LOV_MAGIC_V1 /* * magic for fully defined striping @@ -1548,10 +1590,8 @@ enum obdo_flags { #define LOV_MAGIC_V1_DEF 0x0CD10BD0 #define LOV_MAGIC_V3_DEF 0x0CD30BD0 -#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ -#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ -#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ -#define LOV_PATTERN_CMOBD 0x200 +#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK) +#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK) #define lov_ost_data lov_ost_data_v1 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/ @@ -1608,25 +1648,30 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq) oi->oi.oi_seq = seq; } -static inline __u64 lmm_oi_id(struct ost_id *oi) +static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid) +{ + oi->oi.oi_id = oid; +} + +static inline __u64 lmm_oi_id(const struct ost_id *oi) { return oi->oi.oi_id; } -static inline __u64 lmm_oi_seq(struct ost_id *oi) +static inline __u64 lmm_oi_seq(const struct ost_id *oi) { return oi->oi.oi_seq; } static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi, - struct ost_id *src_oi) + const struct ost_id *src_oi) { dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); } static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, - struct ost_id *src_oi) + const struct ost_id *src_oi) { dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); @@ -1637,22 +1682,35 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) #define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data)) +/* This is the default MDT reply size allocated, should the striping be bigger, + * it will be reallocated in mdt_fix_reply. + * 100 stripes is a bit less than 2.5k of data */ +#define DEF_REP_MD_SIZE (sizeof(struct lov_mds_md) + \ + 100 * sizeof(struct lov_ost_data)) + #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access" #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default" #define XATTR_USER_PREFIX "user." #define XATTR_TRUSTED_PREFIX "trusted." #define XATTR_SECURITY_PREFIX "security." -#define XATTR_LUSTRE_PREFIX "lustre." #define XATTR_NAME_LOV "trusted.lov" #define XATTR_NAME_LMA "trusted.lma" #define XATTR_NAME_LMV "trusted.lmv" +#define XATTR_NAME_DEFAULT_LMV "trusted.dmv" #define XATTR_NAME_LINK "trusted.link" #define XATTR_NAME_FID "trusted.fid" #define XATTR_NAME_VERSION "trusted.version" #define XATTR_NAME_SOM "trusted.som" #define XATTR_NAME_HSM "trusted.hsm" -#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace" +#define XATTR_NAME_LFSCK_BITMAP "trusted.lfsck_bitmap" + +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0) +# define XATTR_NAME_LFSCK_NAMESPACE_OLD "trusted.lfsck_namespace" +#endif + +#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_ns" +#define XATTR_NAME_MAX_LEN 32 /* increase this, if there is longer name. */ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ @@ -1662,10 +1720,45 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ /* lmm_stripe_count used to be __u32 */ __u16 lmm_stripe_count; /* num stripes in use for this object */ __u16 lmm_layout_gen; /* layout generation number */ - char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ + char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; +static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) +{ + if (lmm_magic == LOV_MAGIC_V3) + return sizeof(struct lov_mds_md_v3) + + stripes * sizeof(struct lov_ost_data_v1); + else + return sizeof(struct lov_mds_md_v1) + + stripes * sizeof(struct lov_ost_data_v1); +} + +static inline __u32 +lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) +{ + switch (lmm_magic) { + case LOV_MAGIC_V1: { + struct lov_mds_md_v1 lmm; + + if (buf_size < sizeof(lmm)) + return 0; + + return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); + } + case LOV_MAGIC_V3: { + struct lov_mds_md_v3 lmm; + + if (buf_size < sizeof(lmm)) + return 0; + + return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); + } + default: + return 0; + } +} + #define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */ @@ -1702,10 +1795,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ - -/* OBD_MD_MDTIDX is used to get MDT index, but it is never been used overwire, - * and it is already obsolete since 2.3 */ -/* #define OBD_MD_MDTIDX (0x0000000800000000ULL) */ +#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ @@ -1717,7 +1807,9 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */ #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */ #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes - * under lock */ + * under lock; for xattr + * requests means the + * client holds the lock */ #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -1726,6 +1818,9 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */ #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ +#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */ + +#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */ #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \ OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \ @@ -1733,6 +1828,8 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \ OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP) +#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS) + /* don't forget obdo_fid which is way down at the bottom so it can * come after the definition of llog_cookie */ @@ -1772,8 +1869,12 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */ #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */ +#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server + * that the client is running low on + * space for unstable pages; asking + * it to sync quickly */ -#define OBD_OBJECT_EOF 0xffffffffffffffffULL +#define OBD_OBJECT_EOF LUSTRE_EOF #define OST_MIN_PRECREATE 32 #define OST_MAX_PRECREATE 20000 @@ -1796,12 +1897,12 @@ extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo); /* multiple of 8 bytes => can array */ struct niobuf_remote { - __u64 offset; - __u32 len; - __u32 flags; + __u64 rnb_offset; + __u32 rnb_len; + __u32 rnb_flags; }; -extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); +void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ @@ -1816,25 +1917,25 @@ extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT) struct ost_lvb_v1 { - __u64 lvb_size; - obd_time lvb_mtime; - obd_time lvb_atime; - obd_time lvb_ctime; - __u64 lvb_blocks; + __u64 lvb_size; + __s64 lvb_mtime; + __s64 lvb_atime; + __s64 lvb_ctime; + __u64 lvb_blocks; }; extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb); struct ost_lvb { - __u64 lvb_size; - obd_time lvb_mtime; - obd_time lvb_atime; - obd_time lvb_ctime; - __u64 lvb_blocks; - __u32 lvb_mtime_ns; - __u32 lvb_atime_ns; - __u32 lvb_ctime_ns; - __u32 lvb_padding; + __u64 lvb_size; + __s64 lvb_mtime; + __s64 lvb_atime; + __s64 lvb_ctime; + __u64 lvb_blocks; + __u32 lvb_mtime_ns; + __u32 lvb_atime_ns; + __u32 lvb_ctime_ns; + __u32 lvb_padding; }; extern void lustre_swab_ost_lvb(struct ost_lvb *lvb); @@ -1844,15 +1945,15 @@ extern void lustre_swab_ost_lvb(struct ost_lvb *lvb); */ #ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 +# define QUOTABLOCK_BITS LUSTRE_QUOTABLOCK_BITS #endif #ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) +# define QUOTABLOCK_SIZE LUSTRE_QUOTABLOCK_SIZE #endif #ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) +# define toqb lustre_stoqb #endif /* The lquota_id structure is an union of all the possible identifier types that @@ -1878,12 +1979,6 @@ struct obd_quotactl { extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); -#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */ -#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */ -#define Q_GETOINFO 0x800102 /* get obd quota info */ -#define Q_GETOQUOTA 0x800103 /* get obd quotas */ -#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ - #define Q_COPY(out, in, member) (out)->member = (in)->member #define QCTL_COPY(out, in) \ @@ -2033,8 +2128,8 @@ typedef enum { MDS_DISCONNECT = 39, MDS_GETSTATUS = 40, MDS_STATFS = 41, - MDS_PIN = 42, - MDS_UNPIN = 43, + MDS_PIN = 42, /* obsolete, never used in a release */ + MDS_UNPIN = 43, /* obsolete, never used in a release */ MDS_SYNC = 44, MDS_DONE_WRITING = 45, MDS_SET_INFO = 46, @@ -2043,7 +2138,7 @@ typedef enum { MDS_GETXATTR = 49, MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ MDS_WRITEPAGE = 51, - MDS_IS_SUBDIR = 52, + MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */ MDS_GET_INFO = 53, MDS_HSM_STATE_GET = 54, MDS_HSM_STATE_SET = 55, @@ -2061,11 +2156,11 @@ typedef enum { /* opcodes for object update */ typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC + OUT_UPDATE = 1000, + OUT_UPDATE_LAST_OPC } update_cmd_t; -#define UPDATE_FIRST_OPC UPDATE_OBJ +#define OUT_UPDATE_FIRST_OPC OUT_UPDATE /* * Do not exceed 63 @@ -2080,7 +2175,7 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, + REINT_MIGRATE = 9, REINT_MAX } mds_reint_t, mdt_reint_t; @@ -2093,19 +2188,35 @@ extern void lustre_swab_generic_32s (__u32 *val); #define DISP_LOOKUP_POS 0x00000008 #define DISP_OPEN_CREATE 0x00000010 #define DISP_OPEN_OPEN 0x00000020 -#define DISP_ENQ_COMPLETE 0x00400000 +#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */ #define DISP_ENQ_OPEN_REF 0x00800000 #define DISP_ENQ_CREATE_REF 0x01000000 #define DISP_OPEN_LOCK 0x02000000 +#define DISP_OPEN_LEASE 0x04000000 +#define DISP_OPEN_STRIPE 0x08000000 +#define DISP_OPEN_DENY 0x10000000 /* INODE LOCK PARTS */ -#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */ -#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ -#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ -#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ -#define MDS_INODELOCK_PERM 0x000010 /* for permission */ - -#define MDS_INODELOCK_MAXSHIFT 4 +#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also + * was used to protect permission (mode, + * owner, group etc) before 2.4. */ +#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ +#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ +#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ + +/* The PERM bit is added int 2.4, and it is used to protect permission(mode, + * owner, group, acl etc), so to separate the permission from LOOKUP lock. + * Because for remote directories(in DNE), these locks will be granted by + * different MDTs(different ldlm namespace). + * + * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. + * For Remote directory, the master MDT, where the remote directory is, will + * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, + * will grant LOOKUP_LOCK. */ +#define MDS_INODELOCK_PERM 0x000010 +#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ + +#define MDS_INODELOCK_MAXSHIFT 5 /* This FULL lock is useful to take on unlink sort of operations */ #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1) @@ -2186,41 +2297,48 @@ static inline int ll_inode_to_ext_flags(int iflags) } #endif +/* 64 possible states */ +enum md_transient_state { + MS_RESTORE = (1 << 0), /* restore is running */ +}; + struct mdt_body { - struct lu_fid fid1; - struct lu_fid fid2; - struct lustre_handle handle; - __u64 valid; - __u64 size; /* Offset, in the case of MDS_READPAGE */ - obd_time mtime; - obd_time atime; - obd_time ctime; - __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 ioepoch; - __u64 unused1; /* was "ino" until 2.4.0 */ - __u32 fsuid; - __u32 fsgid; - __u32 capability; - __u32 mode; - __u32 uid; - __u32 gid; - __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ - __u32 rdev; - __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ - __u32 suppgid; - __u32 eadatasize; - __u32 aclsize; - __u32 max_mdsize; - __u32 max_cookiesize; - __u32 uid_h; /* high 32-bits of uid, for FUID */ - __u32 gid_h; /* high 32-bits of gid, for FUID */ - __u32 padding_5; /* also fix lustre_swab_mdt_body */ - __u64 padding_6; - __u64 padding_7; - __u64 padding_8; - __u64 padding_9; - __u64 padding_10; + struct lu_fid mbo_fid1; + struct lu_fid mbo_fid2; + struct lustre_handle mbo_handle; + __u64 mbo_valid; + __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */ + __s64 mbo_mtime; + __s64 mbo_atime; + __s64 mbo_ctime; + __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */ + __u64 mbo_ioepoch; + __u64 mbo_t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 */ + __u32 mbo_fsuid; + __u32 mbo_fsgid; + __u32 mbo_capability; + __u32 mbo_mode; + __u32 mbo_uid; + __u32 mbo_gid; + __u32 mbo_flags; + __u32 mbo_rdev; + __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */ + __u32 mbo_unused2; /* was "generation" until 2.4.0 */ + __u32 mbo_suppgid; + __u32 mbo_eadatasize; + __u32 mbo_aclsize; + __u32 mbo_max_mdsize; + __u32 mbo_max_cookiesize; + __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */ + __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */ + __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */ + __u64 mbo_padding_6; + __u64 mbo_padding_7; + __u64 mbo_padding_8; + __u64 mbo_padding_9; + __u64 mbo_padding_10; }; /* 216 */ extern void lustre_swab_mdt_body (struct mdt_body *b); @@ -2275,9 +2393,9 @@ struct mdt_rec_setattr { __u32 sa_gid; __u64 sa_size; __u64 sa_blocks; - obd_time sa_mtime; - obd_time sa_atime; - obd_time sa_ctime; + __s64 sa_mtime; + __s64 sa_atime; + __s64 sa_ctime; __u32 sa_attr_flags; __u32 sa_mode; __u32 sa_bias; /* some operation flags */ @@ -2352,6 +2470,17 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); * hsm restore) */ #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created unlinked */ +#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease + * delegation, succeed if it's not + * being opened with conflict mode. + */ +#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */ + +/* lustre internal open flags, which should not be set from user space */ +#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \ + MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \ + MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \ + MDS_OPEN_RELEASE) /* permission for create non-directory file */ #define MAY_CREATE (1 << 7) @@ -2370,19 +2499,21 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); /* lfs rgetfacl permission check */ #define MAY_RGETFACL (1 << 14) -enum { +enum mds_op_bias { MDS_CHECK_SPLIT = 1 << 0, MDS_CROSS_REF = 1 << 1, MDS_VTX_BYPASS = 1 << 2, MDS_PERM_BYPASS = 1 << 3, MDS_SOM = 1 << 4, MDS_QUOTA_IGNORE = 1 << 5, - MDS_CLOSE_CLEANUP = 1 << 6, + /* Was MDS_CLOSE_CLEANUP (1 << 6), No more used */ MDS_KEEP_ORPHAN = 1 << 7, MDS_RECOV_OPEN = 1 << 8, MDS_DATA_MODIFIED = 1 << 9, MDS_CREATE_VOLATILE = 1 << 10, MDS_OWNEROVERRIDE = 1 << 11, + MDS_HSM_RELEASE = 1 << 12, + MDS_RENAME_MIGRATE = 1 << 13, }; /* instance of mdt_reint_rec */ @@ -2400,7 +2531,7 @@ struct mdt_rec_create { struct lu_fid cr_fid1; struct lu_fid cr_fid2; struct lustre_handle cr_old_handle; /* handle in case of open replay */ - obd_time cr_time; + __s64 cr_time; __u64 cr_rdev; __u64 cr_ioepoch; __u64 cr_padding_1; /* rr_blocks */ @@ -2440,7 +2571,7 @@ struct mdt_rec_link { __u32 lk_suppgid2_h; struct lu_fid lk_fid1; struct lu_fid lk_fid2; - obd_time lk_time; + __s64 lk_time; __u64 lk_padding_1; /* rr_atime */ __u64 lk_padding_2; /* rr_ctime */ __u64 lk_padding_3; /* rr_size */ @@ -2467,7 +2598,7 @@ struct mdt_rec_unlink { __u32 ul_suppgid2_h; struct lu_fid ul_fid1; struct lu_fid ul_fid2; - obd_time ul_time; + __s64 ul_time; __u64 ul_padding_2; /* rr_atime */ __u64 ul_padding_3; /* rr_ctime */ __u64 ul_padding_4; /* rr_size */ @@ -2494,7 +2625,7 @@ struct mdt_rec_rename { __u32 rn_suppgid2_h; struct lu_fid rn_fid1; struct lu_fid rn_fid2; - obd_time rn_time; + __s64 rn_time; __u64 rn_padding_1; /* rr_atime */ __u64 rn_padding_2; /* rr_ctime */ __u64 rn_padding_3; /* rr_size */ @@ -2524,7 +2655,7 @@ struct mdt_rec_setxattr { __u32 sx_padding_2; __u32 sx_padding_3; __u64 sx_valid; - obd_time sx_time; + __s64 sx_time; __u64 sx_padding_5; /* rr_ctime */ __u64 sx_padding_6; /* rr_size */ __u64 sx_padding_7; /* rr_blocks */ @@ -2557,9 +2688,9 @@ struct mdt_rec_reint { __u32 rr_suppgid2_h; struct lu_fid rr_fid1; struct lu_fid rr_fid2; - obd_time rr_mtime; - obd_time rr_atime; - obd_time rr_ctime; + __s64 rr_mtime; + __s64 rr_atime; + __s64 rr_ctime; __u64 rr_size; __u64 rr_blocks; __u32 rr_bias; @@ -2572,11 +2703,12 @@ struct mdt_rec_reint { extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); +/* lmv structures */ struct lmv_desc { __u32 ld_tgt_count; /* how many MDS's */ __u32 ld_active_tgt_count; /* how many active */ __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default MEA_MAGIC_* */ + __u32 ld_pattern; /* default hash pattern */ __u64 ld_default_hash_size; __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */ @@ -2588,31 +2720,131 @@ struct lmv_desc { extern void lustre_swab_lmv_desc (struct lmv_desc *ld); -/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */ -struct lmv_stripe_md { - __u32 mea_magic; - __u32 mea_count; - __u32 mea_master; - __u32 mea_padding; - char mea_pool_name[LOV_MAXPOOLNAME]; - struct lu_fid mea_ids[0]; +/* LMV layout EA, and it will be stored both in master and slave object */ +struct lmv_mds_md_v1 { + __u32 lmv_magic; + __u32 lmv_stripe_count; + __u32 lmv_master_mdt_index; /* On master object, it is master + * MDT index, on slave object, it + * is stripe index of the slave obj */ + __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate + * which hash function to be used, + * Note: only lower 16 bits is being + * used for now. Higher 16 bits will + * be used to mark the object status, + * for example migrating or dead. */ + __u32 lmv_layout_version; /* Used for directory restriping */ + __u32 lmv_padding1; + __u64 lmv_padding2; + __u64 lmv_padding3; + char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */ + struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */ }; -extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea); +#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */ +#define LMV_MAGIC LMV_MAGIC_V1 -/* lmv structures */ -#define MEA_MAGIC_LAST_CHAR 0xb2221ca1 -#define MEA_MAGIC_ALL_CHARS 0xb222a11c -#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b +/* #define LMV_USER_MAGIC 0x0CD30CD0 */ +#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */ + +/* Right now only the lower part(0-16bits) of lmv_hash_type is being used, + * and the higher part will be the flag to indicate the status of object, + * for example the object is being migrated. And the hash function + * might be interpreted differently with different flags. */ +#define LMV_HASH_TYPE_MASK 0x0000ffff + +#define LMV_HASH_FLAG_MIGRATION 0x80000000 +#define LMV_HASH_FLAG_DEAD 0x40000000 +#define LMV_HASH_FLAG_BAD_TYPE 0x20000000 + +/* The striped directory has ever lost its master LMV EA, then LFSCK + * re-generated it. This flag is used to indicate such case. It is an + * on-disk flag. */ +#define LMV_HASH_FLAG_LOST_LMV 0x10000000 + +/** + * The FNV-1a hash algorithm is as follows: + * hash = FNV_offset_basis + * for each octet_of_data to be hashed + * hash = hash XOR octet_of_data + * hash = hash × FNV_prime + * return hash + * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + * + * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source + * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL + **/ +#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL +#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL +static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size) +{ + __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS; + const unsigned char *p = buf; + size_t i; + + for (i = 0; i < size; i++) { + hash ^= p[i]; + hash *= LUSTRE_FNV_1A_64_PRIME; + } + + return hash; +} + +union lmv_mds_md { + __u32 lmv_magic; + struct lmv_mds_md_v1 lmv_md_v1; + struct lmv_user_md lmv_user_md; +}; + +extern void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm); -#define MAX_HASH_SIZE_32 0x7fffffffUL -#define MAX_HASH_SIZE 0x7fffffffffffffffULL -#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL +static inline int lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) +{ + switch (lmm_magic) { + case LMV_MAGIC_V1:{ + struct lmv_mds_md_v1 *lmm1; + + return sizeof(*lmm1) + stripe_count * + sizeof(lmm1->lmv_stripe_fids[0]); + } + default: + return -EINVAL; + } +} + +static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm) +{ + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count); + case LMV_USER_MAGIC: + return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count); + default: + return -EINVAL; + } +} + +static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm, + unsigned int stripe_count) +{ + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count); + break; + case LMV_USER_MAGIC: + lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count); + break; + default: + return -EINVAL; + } + return 0; +} enum fld_rpc_opc { - FLD_QUERY = 900, - FLD_LAST_OPC, - FLD_FIRST_OPC = FLD_QUERY + FLD_QUERY = 900, + FLD_READ = 901, + FLD_LAST_OPC, + FLD_FIRST_OPC = FLD_QUERY }; enum seq_rpc_opc { @@ -2626,6 +2858,20 @@ enum seq_op { SEQ_ALLOC_META = 1 }; +enum fld_op { + FLD_CREATE = 0, + FLD_DELETE = 1, + FLD_LOOKUP = 2, +}; + +/* LFSCK opcodes */ +typedef enum { + LFSCK_NOTIFY = 1101, + LFSCK_QUERY = 1102, + LFSCK_LAST_OPC, + LFSCK_FIRST_OPC = LFSCK_NOTIFY +} lfsck_cmd_t; + /* * LOV data structures */ @@ -2636,6 +2882,8 @@ enum seq_op { * protocol, this will limit the max number of OSTs per LOV */ #define LOV_DESC_MAGIC 0xB0CCDE5C +#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ +#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS) /* LOV settings descriptor (should only contain static info) */ struct lov_desc { @@ -2677,12 +2925,16 @@ struct ldlm_res_id { __u64 name[RES_NAME_SIZE]; }; +#define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i +#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \ + (res)->lr_name.name[2], (res)->lr_name.name[3] + extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id); -static inline int ldlm_res_eq(const struct ldlm_res_id *res0, - const struct ldlm_res_id *res1) +static inline bool ldlm_res_eq(const struct ldlm_res_id *res0, + const struct ldlm_res_id *res1) { - return !memcmp(res0, res1, sizeof(*res0)); + return memcmp(res0, res1, sizeof(*res0)) == 0; } /* lock types */ @@ -2717,17 +2969,17 @@ struct ldlm_extent { __u64 gid; }; -static inline int ldlm_extent_overlap(struct ldlm_extent *ex1, - struct ldlm_extent *ex2) +static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1, + const struct ldlm_extent *ex2) { - return (ex1->start <= ex2->end) && (ex2->start <= ex1->end); + return ex1->start <= ex2->end && ex2->start <= ex1->end; } /* check if @ex1 contains @ex2 */ -static inline int ldlm_extent_contain(struct ldlm_extent *ex1, - struct ldlm_extent *ex2) +static inline int ldlm_extent_contain(const struct ldlm_extent *ex1, + const struct ldlm_extent *ex2) { - return (ex1->start <= ex2->start) && (ex1->end >= ex2->end); + return ex1->start <= ex2->start && ex1->end >= ex2->end; } struct ldlm_inodebits { @@ -2903,14 +3155,14 @@ extern void lustre_swab_mgs_config_res(struct mgs_config_res *body); #define CM_START_SKIP (CM_START | CM_SKIP) struct cfg_marker { - __u32 cm_step; /* aka config version */ - __u32 cm_flags; - __u32 cm_vers; /* lustre release version number */ - __u32 cm_padding; /* 64 bit align */ - obd_time cm_createtime; /*when this record was first created */ - obd_time cm_canceltime; /*when this record is no longer valid*/ - char cm_tgtname[MTI_NAME_MAXLEN]; - char cm_comment[MTI_NAME_MAXLEN]; + __u32 cm_step; /* aka config version */ + __u32 cm_flags; + __u32 cm_vers; /* lustre release version number */ + __u32 cm_padding; /* 64 bit align */ + __s64 cm_createtime; /*when this record was first created */ + __s64 cm_canceltime; /*when this record is no longer valid*/ + char cm_tgtname[MTI_NAME_MAXLEN]; + char cm_comment[MTI_NAME_MAXLEN]; }; extern void lustre_swab_cfg_marker(struct cfg_marker *marker, @@ -2929,7 +3181,29 @@ typedef enum { } obd_cmd_t; #define OBD_FIRST_OPC OBD_PING -/* catalog of log objects */ +/** + * llog contexts indices. + * + * There is compatibility problem with indexes below, they are not + * continuous and must keep their numbers for compatibility needs. + * See LU-5218 for details. + */ +enum llog_ctxt_id { + LLOG_CONFIG_ORIG_CTXT = 0, + LLOG_CONFIG_REPL_CTXT = 1, + LLOG_MDS_OST_ORIG_CTXT = 2, + LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */ + LLOG_SIZE_ORIG_CTXT = 4, + LLOG_SIZE_REPL_CTXT = 5, + LLOG_TEST_ORIG_CTXT = 8, + LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */ + LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */ + LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */ + /* for multiple changelog consumers */ + LLOG_CHANGELOG_USER_ORIG_CTXT = 14, + LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */ + LLOG_MAX_CTXTS +}; /** Identifier for a single log object */ struct llog_logid { @@ -2969,6 +3243,7 @@ typedef enum { /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */ CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000, CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000, + HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, } llog_op_type; @@ -3000,6 +3275,12 @@ struct llog_rec_tail { (rec->lrh_len - sizeof(struct llog_rec_hdr) - \ sizeof(struct llog_rec_tail)) +static inline void *rec_tail(struct llog_rec_hdr *rec) +{ + return (void *)((char *)rec + rec->lrh_len - + sizeof(struct llog_rec_tail)); +} + struct llog_logid_rec { struct llog_rec_hdr lid_hdr; struct llog_logid lid_id; @@ -3011,16 +3292,16 @@ struct llog_logid_rec { struct llog_unlink_rec { struct llog_rec_hdr lur_hdr; - obd_id lur_oid; - obd_count lur_oseq; - obd_count lur_count; + __u64 lur_oid; + __u32 lur_oseq; + __u32 lur_count; struct llog_rec_tail lur_tail; } __attribute__((packed)); struct llog_unlink64_rec { struct llog_rec_hdr lur_hdr; struct lu_fid lur_fid; - obd_count lur_count; /* to destroy the lost precreated */ + __u32 lur_count; /* to destroy the lost precreated */ __u32 lur_padding1; __u64 lur_padding2; __u64 lur_padding3; @@ -3034,7 +3315,7 @@ struct llog_setattr64_rec { __u32 lsr_uid_h; __u32 lsr_gid; __u32 lsr_gid_h; - __u64 lsr_padding; + __u64 lsr_valid; struct llog_rec_tail lsr_tail; } __attribute__((packed)); @@ -3067,15 +3348,9 @@ struct changelog_setinfo { /** changelog record */ struct llog_changelog_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_rec cr; - struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -struct llog_changelog_ext_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_ext_rec cr; - struct llog_rec_tail cr_tail; /**< for_sizezof_only */ + struct llog_rec_hdr cr_hdr; + struct changelog_rec cr; /**< Variable length field */ + struct llog_rec_tail cr_do_not_use; /**< for_sizeof_only */ } __attribute__((packed)); #define CHANGELOG_USER_PREFIX "cl" @@ -3088,6 +3363,52 @@ struct llog_changelog_user_rec { struct llog_rec_tail cur_tail; } __attribute__((packed)); +enum agent_req_status { + ARS_WAITING, + ARS_STARTED, + ARS_FAILED, + ARS_CANCELED, + ARS_SUCCEED, +}; + +static inline const char *agent_req_status2name(enum agent_req_status ars) +{ + switch (ars) { + case ARS_WAITING: + return "WAITING"; + case ARS_STARTED: + return "STARTED"; + case ARS_FAILED: + return "FAILED"; + case ARS_CANCELED: + return "CANCELED"; + case ARS_SUCCEED: + return "SUCCEED"; + default: + return "UNKNOWN"; + } +} + +static inline bool agent_req_in_final_state(enum agent_req_status ars) +{ + return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) || + (ars == ARS_CANCELED)); +} + +struct llog_agent_req_rec { + struct llog_rec_hdr arr_hdr; /**< record header */ + __u32 arr_status; /**< status of the request */ + /* must match enum + * agent_req_status */ + __u32 arr_archive_id; /**< backend archive number */ + __u64 arr_flags; /**< req flags */ + __u64 arr_compound_id; /**< compound cookie */ + __u64 arr_req_create; /**< req. creation time */ + __u64 arr_req_change; /**< req. status change time */ + struct hsm_action_item arr_hai; /**< req. to the agent */ + struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */ +} __attribute__((packed)); + /* Old llog gen for compatibility */ struct llog_gen { __u64 mnt_cnt; @@ -3115,11 +3436,14 @@ enum llog_flag { LLOG_F_ZAP_WHEN_EMPTY = 0x1, LLOG_F_IS_CAT = 0x2, LLOG_F_IS_PLAIN = 0x4, + LLOG_F_EXT_JOBID = 0x8, + + LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID, }; struct llog_log_hdr { - struct llog_rec_hdr llh_hdr; - obd_time llh_timestamp; + struct llog_rec_hdr llh_hdr; + __s64 llh_timestamp; __u32 llh_count; __u32 llh_bitmap_offset; __u32 llh_size; @@ -3177,37 +3501,37 @@ struct llogd_conn_body { /* Note: 64-bit types are 64-bit aligned in structure */ struct obdo { - obd_valid o_valid; /* hot fields in this obdo */ - struct ost_id o_oi; - obd_id o_parent_seq; - obd_size o_size; /* o_size-o_blocks == ost_lvb */ - obd_time o_mtime; - obd_time o_atime; - obd_time o_ctime; - obd_blocks o_blocks; /* brw: cli sent cached bytes */ - obd_size o_grant; - - /* 32-bit fields start here: keep an even number of them via padding */ - obd_blksize o_blksize; /* optimal IO blocksize */ - obd_mode o_mode; /* brw: cli sent cache remain */ - obd_uid o_uid; - obd_gid o_gid; - obd_flag o_flags; - obd_count o_nlink; /* brw: checksum */ - obd_count o_parent_oid; - obd_count o_misc; /* brw: o_dropped */ - - __u64 o_ioepoch; /* epoch in ost writes */ - __u32 o_stripe_idx; /* holds stripe idx */ - __u32 o_parent_ver; - struct lustre_handle o_handle; /* brw: lock handle to prolong - * locks */ - struct llog_cookie o_lcookie; /* destroy: unlink cookie from - * MDS */ + __u64 o_valid; /* hot fields in this obdo */ + struct ost_id o_oi; + __u64 o_parent_seq; + __u64 o_size; /* o_size-o_blocks == ost_lvb */ + __s64 o_mtime; + __s64 o_atime; + __s64 o_ctime; + __u64 o_blocks; /* brw: cli sent cached bytes */ + __u64 o_grant; + + /* 32-bit fields start here: keep an even number of them via padding */ + __u32 o_blksize; /* optimal IO blocksize */ + __u32 o_mode; /* brw: cli sent cache remain */ + __u32 o_uid; + __u32 o_gid; + __u32 o_flags; + __u32 o_nlink; /* brw: checksum */ + __u32 o_parent_oid; + __u32 o_misc; /* brw: o_dropped */ + + __u64 o_ioepoch; /* epoch in ost writes */ + __u32 o_stripe_idx; /* holds stripe idx */ + __u32 o_parent_ver; + struct lustre_handle o_handle; /* brw: lock handle to prolong + * locks */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from + * MDS */ __u32 o_uid_h; __u32 o_gid_h; - __u64 o_data_version; /* getattr: sum of iversion for + __u64 o_data_version; /* getattr: sum of iversion for * each stripe. * brw: grant space consumed on * the client for the write */ @@ -3222,27 +3546,115 @@ struct obdo { #define o_cksum o_nlink #define o_grant_used o_data_version -static inline void lustre_set_wire_obdo(struct obdo *wobdo, struct obdo *lobdo) +struct lfsck_request { + __u32 lr_event; + __u32 lr_index; + __u32 lr_flags; + __u32 lr_valid; + union { + __u32 lr_speed; + __u32 lr_status; + __u32 lr_type; + }; + __u16 lr_version; + __u16 lr_active; + __u16 lr_param; + __u16 lr_async_windows; + union { + __u32 lr_flags2; + __u32 lr_layout_version; + }; + struct lu_fid lr_fid; + struct lu_fid lr_fid2; + union { + struct lu_fid lr_fid3; + char lr_pool_name[LOV_MAXPOOLNAME + 1]; + }; + __u32 lr_stripe_count; + __u32 lr_hash_type; + __u64 lr_padding_3; +}; + +void lustre_swab_lfsck_request(struct lfsck_request *lr); + +struct lfsck_reply { + __u32 lr_status; + __u32 lr_padding_1; + __u64 lr_padding_2; +}; + +void lustre_swab_lfsck_reply(struct lfsck_reply *lr); + +enum lfsck_events { + LE_LASTID_REBUILDING = 1, + LE_LASTID_REBUILT = 2, + LE_PHASE1_DONE = 3, + LE_PHASE2_DONE = 4, + LE_START = 5, + LE_STOP = 6, + LE_QUERY = 7, + LE_FID_ACCESSED = 8, + LE_PEER_EXIT = 9, + LE_CONDITIONAL_DESTROY = 10, + LE_PAIRS_VERIFY = 11, + LE_CREATE_ORPHAN = 12, + LE_SKIP_NLINK_DECLARE = 13, + LE_SKIP_NLINK = 14, + LE_SET_LMV_MASTER = 15, + LE_SET_LMV_SLAVE = 16, +}; + +enum lfsck_event_flags { + LEF_TO_OST = 0x00000001, + LEF_FROM_OST = 0x00000002, + LEF_SET_LMV_HASH = 0x00000004, + LEF_SET_LMV_ALL = 0x00000008, + LEF_RECHECK_NAME_HASH = 0x00000010, +}; + +static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd, + struct obdo *wobdo, + const struct obdo *lobdo) { - memcpy(wobdo, lobdo, sizeof(*lobdo)); - wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + *wobdo = *lobdo; + wobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + if (ocd == NULL) + return; + + if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && + fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) { + /* Currently OBD_FL_OSTID will only be used when 2.4 echo + * client communicate with pre-2.4 server */ + wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid); + wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid); + } } -static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo) +static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd, + struct obdo *lobdo, + const struct obdo *wobdo) { - obd_flag local_flags = 0; + __u32 local_flags = 0; if (lobdo->o_valid & OBD_MD_FLFLAGS) local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; - LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK)); - - memcpy(lobdo, wobdo, sizeof(*lobdo)); - if (local_flags != 0) { - lobdo->o_valid |= OBD_MD_FLFLAGS; - lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; - lobdo->o_flags |= local_flags; - } + *lobdo = *wobdo; + if (local_flags != 0) { + lobdo->o_valid |= OBD_MD_FLFLAGS; + lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + lobdo->o_flags |= local_flags; + } + if (ocd == NULL) + return; + + if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) && + fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) { + /* see above */ + lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq; + lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id; + lobdo->o_oi.oi_fid.f_ver = 0; + } } extern void lustre_swab_obdo (struct obdo *o); @@ -3260,7 +3672,7 @@ struct ll_fiemap_info_key { }; extern void lustre_swab_ost_body (struct ost_body *b); -extern void lustre_swab_ost_last_id(obd_id *id); +extern void lustre_swab_ost_last_id(__u64 *id); extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap); extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); @@ -3268,6 +3680,8 @@ extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, int stripe_count); extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); +void lustre_print_user_md(unsigned int level, struct lov_user_md *lum, + const char *msg); /* llog_swab.c */ extern void lustre_swab_llogd_body (struct llogd_body *d); @@ -3339,6 +3753,7 @@ enum idx_info_flags { II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ II_FL_VARREC = 1 << 2, /* records can be of variable size */ II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ + II_FL_NOKEY = 1 << 4, /* client doesn't care about key */ }; #define LIP_MAGIC 0x8A6D6B6C @@ -3443,6 +3858,16 @@ static inline int capa_for_oss(struct lustre_capa *c) return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0; } +static inline bool lovea_slot_is_dummy(const struct lov_ost_data_v1 *obj) +{ + /* zero area does not care about the bytes-order. */ + if (obj->l_ost_oi.oi.oi_id == 0 && obj->l_ost_oi.oi.oi_seq == 0 && + obj->l_ost_idx == 0 && obj->l_ost_gen == 0) + return true; + + return false; +} + /* lustre_capa::lc_hmac_alg */ enum { CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */ @@ -3493,6 +3918,14 @@ struct getinfo_fid2path { void lustre_swab_fid2path (struct getinfo_fid2path *gf); +/** path2parent request/reply structures */ +struct getparent { + struct lu_fid gp_fid; /**< parent FID */ + __u32 gp_linkno; /**< hardlink number */ + __u32 gp_name_size; /**< size of the name field */ + char gp_name[0]; /**< zero-terminated link name */ +} __attribute__((packed)); + enum { LAYOUT_INTENT_ACCESS = 0, LAYOUT_INTENT_READ = 1, @@ -3539,90 +3972,188 @@ extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui); extern void lustre_swab_hsm_request(struct hsm_request *hr); /** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. + * OUT_UPDATE RPC Format * * During the cross-ref operation, the Master MDT, which the client send the * request to, will disassembly the operation into object updates, then OSP * will send these updates to the remote MDT to be executed. * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. + * An UPDATE_OBJ RPC does a list of updates. Each update belongs to an + * operation and does a type of modification to an object. + * + * Request Format * - ******************************************************************* - * update reply format: + * update_buf + * update (1st) + * update (2nd) + * ... + * update (ub_count-th) * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. + * ub_count must be less than or equal to UPDATE_PER_RPC_MAX. * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] + * Reply Format * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff + * update_reply + * rc [+ buffers] (1st) + * rc [+ buffers] (2st) + * ... + * rc [+ buffers] (nr_count-th) + * + * ur_count must be less than or equal to UPDATE_PER_RPC_MAX and should usually + * be equal to ub_count. */ -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); +/** + * Type of each update + */ +enum update_type { + OUT_CREATE = 1, + OUT_DESTROY = 2, + OUT_REF_ADD = 3, + OUT_REF_DEL = 4, + OUT_ATTR_SET = 5, + OUT_ATTR_GET = 6, + OUT_XATTR_SET = 7, + OUT_XATTR_GET = 8, + OUT_INDEX_LOOKUP = 9, + OUT_INDEX_INSERT = 10, + OUT_INDEX_DELETE = 11, + OUT_WRITE = 12, + OUT_XATTR_DEL = 13, + OUT_LAST +}; + +enum update_flag { + UPDATE_FL_OST = 0x00000001, /* op from OST (not MDT) */ + UPDATE_FL_SYNC = 0x00000002, /* commit before replying */ + UPDATE_FL_COMMITTED = 0x00000004, /* op committed globally */ + UPDATE_FL_NOLOG = 0x00000008 /* for idempotent updates */ +}; + +struct object_update_param { + __u16 oup_len; /* length of this parameter */ + __u16 oup_padding; + __u32 oup_padding2; + char oup_buf[0]; +}; + +static inline size_t +object_update_param_size(const struct object_update_param *param) +{ + return cfs_size_round(sizeof(*param) + param->oup_len); +} + +/* object update */ +struct object_update { + __u16 ou_type; /* enum update_type */ + __u16 ou_params_count; /* update parameters count */ + __u32 ou_master_index; /* master MDT/OST index */ + __u32 ou_flags; /* enum update_flag */ + __u32 ou_padding1; /* padding 1 */ + __u64 ou_batchid; /* op transno on master */ + struct lu_fid ou_fid; /* object to be updated */ + struct object_update_param ou_params[0]; /* update params */ +}; + +#define UPDATE_REQUEST_MAGIC_V1 0xBDDE0001 +#define UPDATE_REQUEST_MAGIC_V2 0xBDDE0002 +#define UPDATE_REQUEST_MAGIC UPDATE_REQUEST_MAGIC_V2 +/* Hold object_updates sending to the remote OUT in single RPC */ +struct object_update_request { + __u32 ourq_magic; + __u16 ourq_count; /* number of ourq_updates[] */ + __u16 ourq_padding; + struct object_update ourq_updates[0]; +}; + +void lustre_swab_object_update(struct object_update *ou); +void lustre_swab_object_update_request(struct object_update_request *our); + +static inline size_t +object_update_size(const struct object_update *update) +{ + const struct object_update_param *param; + size_t size; + unsigned int i; + + size = offsetof(struct object_update, ou_params[0]); + for (i = 0; i < update->ou_params_count; i++) { + param = (struct object_update_param *)((char *)update + size); + size += object_update_param_size(param); + } + + return size; +} + +static inline struct object_update * +object_update_request_get(const struct object_update_request *our, + unsigned int index, size_t *size) +{ + void *ptr; + unsigned int i; + + if (index >= our->ourq_count) + return NULL; + + ptr = (void *)&our->ourq_updates[0]; + for (i = 0; i < index; i++) + ptr += object_update_size(ptr); + + if (size != NULL) + *size = object_update_size(ptr); + + return ptr; +} + + +/* the result of object update */ +struct object_update_result { + __u32 our_rc; + __u16 our_datalen; + __u16 our_padding; + __u32 our_data[0]; +}; + +#define UPDATE_REPLY_MAGIC_V1 0x00BD0001 +#define UPDATE_REPLY_MAGIC_V2 0x00BD0002 +#define UPDATE_REPLY_MAGIC UPDATE_REPLY_MAGIC_V2 +/* Hold object_update_results being replied from the remote OUT. */ +struct object_update_reply { + __u32 ourp_magic; + __u16 ourp_count; + __u16 ourp_padding; + __u16 ourp_lens[0]; +}; + +void lustre_swab_object_update_result(struct object_update_result *our); +void lustre_swab_object_update_reply(struct object_update_reply *our); + +static inline struct object_update_result * +object_update_result_get(const struct object_update_reply *reply, + unsigned int index, size_t *size) +{ + __u16 count = reply->ourp_count; + unsigned int i; + void *ptr; + + if (index >= count) + return NULL; + + ptr = (char *)reply + + cfs_size_round(offsetof(struct object_update_reply, + ourp_lens[count])); + for (i = 0; i < index; i++) { + if (reply->ourp_lens[i] == 0) + return NULL; + + ptr += cfs_size_round(reply->ourp_lens[i]); + } + + if (size != NULL) + *size = reply->ourp_lens[index]; + + return ptr; +} /** layout swap request structure * fid1 and fid2 are in mdt_body @@ -3633,5 +4164,14 @@ struct mdc_swap_layouts { void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl); +struct close_data { + struct lustre_handle cd_handle; + struct lu_fid cd_fid; + __u64 cd_data_version; + __u64 cd_reserved[8]; +}; + +void lustre_swab_close_data(struct close_data *data); + #endif /** @} lustreidl */