X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=26da4c306f52a88e4286c0701e4c9a823bdafb32;hp=9b7e9414f30cf61384d546b86b09044fc658b628;hb=b425c1f4ab1aa9c914acf2d6b500817259a240e9;hpb=12d2b04f2204bc087f380cb214a29c126f50d709 diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index 9b7e941..26da4c3 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -91,14 +91,11 @@ #ifndef _LUSTRE_IDL_H_ #define _LUSTRE_IDL_H_ -#if !defined(LPU64) #include /* for LPUX64, etc */ -#endif - -/* Defn's shared with user-space. */ -#include - +#include +#include /* Defn's shared with user-space. */ #include +#include /* * GENERAL STUFF @@ -167,8 +164,6 @@ #define LUSTRE_LOG_VERSION 0x00050000 #define LUSTRE_MGS_VERSION 0x00060000 -typedef __u32 mdsno_t; -typedef __u64 seqno_t; typedef __u64 obd_id; typedef __u64 obd_seq; typedef __s64 obd_time; @@ -196,6 +191,12 @@ struct lu_seq_range { __u32 lsr_flags; }; +struct lu_seq_range_array { + __u32 lsra_count; + __u32 lsra_padding; + struct lu_seq_range lsra_lsr[0]; +}; + #define LU_SEQ_RANGE_MDT 0x0 #define LU_SEQ_RANGE_OST 0x1 #define LU_SEQ_RANGE_ANY 0x3 @@ -207,12 +208,12 @@ static inline unsigned fld_range_type(const struct lu_seq_range *range) return range->lsr_flags & LU_SEQ_RANGE_MASK; } -static inline int fld_range_is_ost(const struct lu_seq_range *range) +static inline bool fld_range_is_ost(const struct lu_seq_range *range) { return fld_range_type(range) == LU_SEQ_RANGE_OST; } -static inline int fld_range_is_mdt(const struct lu_seq_range *range) +static inline bool fld_range_is_mdt(const struct lu_seq_range *range) { return fld_range_type(range) == LU_SEQ_RANGE_MDT; } @@ -255,7 +256,7 @@ static inline void fld_range_set_any(struct lu_seq_range *range) static inline __u64 range_space(const struct lu_seq_range *range) { - return range->lsr_end - range->lsr_start; + return range->lsr_end - range->lsr_start; } /** @@ -271,26 +272,25 @@ static inline void range_init(struct lu_seq_range *range) * check if given seq id \a s is within given range \a r */ -static inline int range_within(const struct lu_seq_range *range, - __u64 s) +static inline bool range_within(const struct lu_seq_range *range, + __u64 s) { - return s >= range->lsr_start && s < range->lsr_end; + return s >= range->lsr_start && s < range->lsr_end; } -static inline int range_is_sane(const struct lu_seq_range *range) +static inline bool range_is_sane(const struct lu_seq_range *range) { - return (range->lsr_end >= range->lsr_start); + return range->lsr_end >= range->lsr_start; } -static inline int range_is_zero(const struct lu_seq_range *range) +static inline bool range_is_zero(const struct lu_seq_range *range) { - return (range->lsr_start == 0 && range->lsr_end == 0); + return range->lsr_start == 0 && range->lsr_end == 0; } -static inline int range_is_exhausted(const struct lu_seq_range *range) - +static inline bool range_is_exhausted(const struct lu_seq_range *range) { - return range_space(range) == 0; + return range_space(range) == 0; } /* return 0 if two range have the same location */ @@ -336,8 +336,9 @@ enum lma_incompat { LMAI_AGENT = 0x00000002, /* agent inode */ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object is on the remote MDT */ + LMAI_STRIPED = 0x00000008, /* striped directory inode */ }; -#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT) +#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT | LMAI_STRIPED) extern void lustre_lma_swab(struct lustre_mdt_attrs *lma); extern void lustre_lma_init(struct lustre_mdt_attrs *lma, @@ -457,6 +458,7 @@ enum fid_seq { FID_SEQ_QUOTA = 0x200000005ULL, FID_SEQ_QUOTA_GLB = 0x200000006ULL, FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */ + FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL, FID_SEQ_NORMAL = 0x200000400ULL, FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL }; @@ -476,75 +478,76 @@ enum special_oid { /** OID for FID_SEQ_DOT_LUSTRE */ enum dot_lustre_oid { - FID_OID_DOT_LUSTRE = 1UL, - FID_OID_DOT_LUSTRE_OBF = 2UL, + FID_OID_DOT_LUSTRE = 1UL, + FID_OID_DOT_LUSTRE_OBF = 2UL, + FID_OID_DOT_LUSTRE_LPF = 3UL, }; -static inline int fid_seq_is_mdt0(obd_seq seq) +static inline bool fid_seq_is_mdt0(obd_seq seq) { - return (seq == FID_SEQ_OST_MDT0); + return seq == FID_SEQ_OST_MDT0; } -static inline int fid_seq_is_mdt(const __u64 seq) +static inline bool fid_seq_is_mdt(__u64 seq) { return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL; }; -static inline int fid_seq_is_echo(obd_seq seq) +static inline bool fid_seq_is_echo(obd_seq seq) { - return (seq == FID_SEQ_ECHO); + return seq == FID_SEQ_ECHO; } -static inline int fid_is_echo(const struct lu_fid *fid) +static inline bool fid_is_echo(const struct lu_fid *fid) { return fid_seq_is_echo(fid_seq(fid)); } -static inline int fid_seq_is_llog(obd_seq seq) +static inline bool fid_seq_is_llog(obd_seq seq) { - return (seq == FID_SEQ_LLOG); + return seq == FID_SEQ_LLOG; } -static inline int fid_is_llog(const struct lu_fid *fid) +static inline bool fid_is_llog(const struct lu_fid *fid) { /* file with OID == 0 is not llog but contains last oid */ return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0; } -static inline int fid_seq_is_rsvd(const __u64 seq) +static inline bool fid_seq_is_rsvd(__u64 seq) { - return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD); + return seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD; }; -static inline int fid_seq_is_special(const __u64 seq) +static inline bool fid_seq_is_special(__u64 seq) { return seq == FID_SEQ_SPECIAL; }; -static inline int fid_seq_is_local_file(const __u64 seq) +static inline bool fid_seq_is_local_file(__u64 seq) { return seq == FID_SEQ_LOCAL_FILE || seq == FID_SEQ_LOCAL_NAME; }; -static inline int fid_seq_is_root(const __u64 seq) +static inline bool fid_seq_is_root(__u64 seq) { return seq == FID_SEQ_ROOT; } -static inline int fid_seq_is_dot(const __u64 seq) +static inline bool fid_seq_is_dot(__u64 seq) { return seq == FID_SEQ_DOT_LUSTRE; } -static inline int fid_seq_is_default(const __u64 seq) +static inline bool fid_seq_is_default(__u64 seq) { return seq == FID_SEQ_LOV_DEFAULT; } -static inline int fid_is_mdt0(const struct lu_fid *fid) +static inline bool fid_is_mdt0(const struct lu_fid *fid) { - return fid_seq_is_mdt0(fid_seq(fid)); + return fid_seq_is_mdt0(fid_seq(fid)); } static inline void lu_root_fid(struct lu_fid *fid) @@ -559,14 +562,14 @@ static inline void lu_root_fid(struct lu_fid *fid) * \param fid the fid to be tested. * \return true if the fid is a igif; otherwise false. */ -static inline int fid_seq_is_igif(const __u64 seq) +static inline bool fid_seq_is_igif(__u64 seq) { - return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; + return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; } -static inline int fid_is_igif(const struct lu_fid *fid) +static inline bool fid_is_igif(const struct lu_fid *fid) { - return fid_seq_is_igif(fid_seq(fid)); + return fid_seq_is_igif(fid_seq(fid)); } /** @@ -574,41 +577,46 @@ static inline int fid_is_igif(const struct lu_fid *fid) * \param fid the fid to be tested. * \return true if the fid is a idif; otherwise false. */ -static inline int fid_seq_is_idif(const __u64 seq) +static inline bool fid_seq_is_idif(__u64 seq) { - return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; + return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; } -static inline int fid_is_idif(const struct lu_fid *fid) +static inline bool fid_is_idif(const struct lu_fid *fid) { - return fid_seq_is_idif(fid_seq(fid)); + return fid_seq_is_idif(fid_seq(fid)); } -static inline int fid_is_local_file(const struct lu_fid *fid) +static inline bool fid_is_local_file(const struct lu_fid *fid) { return fid_seq_is_local_file(fid_seq(fid)); } -static inline int fid_seq_is_norm(const __u64 seq) +static inline bool fid_seq_is_norm(__u64 seq) { - return (seq >= FID_SEQ_NORMAL); + return (seq >= FID_SEQ_NORMAL); } -static inline int fid_is_norm(const struct lu_fid *fid) +static inline bool fid_is_norm(const struct lu_fid *fid) { - return fid_seq_is_norm(fid_seq(fid)); + return fid_seq_is_norm(fid_seq(fid)); +} + +static inline int fid_is_layout_rbtree(const struct lu_fid *fid) +{ + return fid_seq(fid) == FID_SEQ_LAYOUT_RBTREE; } /* convert an OST objid into an IDIF FID SEQ number */ static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx) { - return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); + return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); } /* convert a packed IDIF FID into an OST objid */ static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver) { - return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; + return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; } static inline __u32 idif_ost_idx(obd_seq seq) @@ -628,7 +636,7 @@ static inline obd_seq ostid_seq(const struct ost_id *ostid) if (fid_seq_is_mdt0(ostid->oi.oi_seq)) return FID_SEQ_OST_MDT0; - if (fid_seq_is_default(ostid->oi.oi_seq)) + if (unlikely(fid_seq_is_default(ostid->oi.oi_seq))) return FID_SEQ_LOV_DEFAULT; if (fid_is_idif(&ostid->oi_fid)) @@ -640,9 +648,12 @@ static inline obd_seq ostid_seq(const struct ost_id *ostid) /* extract OST objid from a wire ost_id (id/seq) pair */ static inline obd_id ostid_id(const struct ost_id *ostid) { - if (fid_seq_is_mdt0(ostid_seq(ostid))) + if (fid_seq_is_mdt0(ostid->oi.oi_seq)) return ostid->oi.oi_id & IDIF_OID_MASK; + if (unlikely(fid_seq_is_default(ostid->oi.oi_seq))) + return ostid->oi.oi_id; + if (fid_is_idif(&ostid->oi_fid)) return fid_idif_id(fid_seq(&ostid->oi_fid), fid_oid(&ostid->oi_fid), 0); @@ -685,13 +696,23 @@ static inline void ostid_set_seq_llog(struct ost_id *oi) */ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) { - if (fid_seq_is_mdt0(ostid_seq(oi))) { + if (fid_seq_is_mdt0(oi->oi.oi_seq)) { if (oid >= IDIF_MAX_OID) { CERROR("Bad "LPU64" to set "DOSTID"\n", oid, POSTID(oi)); return; } oi->oi.oi_id = oid; + } else if (fid_is_idif(&oi->oi_fid)) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DOSTID"\n", + oid, POSTID(oi)); + return; + } + oi->oi_fid.f_seq = fid_idif_seq(oid, + fid_idif_ost_idx(&oi->oi_fid)); + oi->oi_fid.f_oid = oid; + oi->oi_fid.f_ver = oid >> 48; } else { if (oid > OBIF_MAX_OID) { CERROR("Bad "LPU64" to set "DOSTID"\n", @@ -702,25 +723,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid) } } -static inline void ostid_inc_id(struct ost_id *oi) +static inline int fid_set_id(struct lu_fid *fid, __u64 oid) { - if (fid_seq_is_mdt0(ostid_seq(oi))) { - if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) { - CERROR("Bad inc "DOSTID"\n", POSTID(oi)); - return; + if (unlikely(fid_seq_is_igif(fid->f_seq))) { + CERROR("bad IGIF, "DFID"\n", PFID(fid)); + return -EBADF; + } + + if (fid_is_idif(fid)) { + if (oid >= IDIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DFID"\n", + oid, PFID(fid)); + return -EBADF; } - oi->oi.oi_id++; + fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid)); + fid->f_oid = oid; + fid->f_ver = oid >> 48; } else { - oi->oi_fid.f_oid++; + if (oid > OBIF_MAX_OID) { + CERROR("Bad "LPU64" to set "DFID"\n", + oid, PFID(fid)); + return -EBADF; + } + fid->f_oid = oid; } -} - -static inline void ostid_dec_id(struct ost_id *oi) -{ - if (fid_seq_is_mdt0(ostid_seq(oi))) - oi->oi.oi_id--; - else - oi->oi_fid.f_oid--; + return 0; } /** @@ -732,45 +759,50 @@ static inline void ostid_dec_id(struct ost_id *oi) * struct lu_fid fields without loss. For reference see: * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs */ -static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid, +static inline int ostid_to_fid(struct lu_fid *fid, const struct ost_id *ostid, __u32 ost_idx) { + obd_seq seq = ostid_seq(ostid); + if (ost_idx > 0xffff) { CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid), ost_idx); return -EBADF; } - if (fid_seq_is_mdt0(ostid_seq(ostid))) { + if (fid_seq_is_mdt0(seq)) { + obd_id oid = ostid_id(ostid); + /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" * that we map into the IDIF namespace. It allows up to 2^48 * objects per OST, as this is the object namespace that has * been in production for years. This can handle create rates * of 1M objects/s/OST for 9 years, or combinations thereof. */ - if (ostid_id(ostid) >= IDIF_MAX_OID) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", - POSTID(ostid), ost_idx); - return -EBADF; + if (oid >= IDIF_MAX_OID) { + CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n", + POSTID(ostid), ost_idx); + return -EBADF; } - fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx); + fid->f_seq = fid_idif_seq(oid, ost_idx); /* truncate to 32 bits by assignment */ - fid->f_oid = ostid_id(ostid); + fid->f_oid = oid; /* in theory, not currently used */ - fid->f_ver = ostid_id(ostid) >> 48; - } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { - /* This is either an IDIF object, which identifies objects across - * all OSTs, or a regular FID. The IDIF namespace maps legacy - * OST objects into the FID namespace. In both cases, we just - * pass the FID through, no conversion needed. */ + fid->f_ver = oid >> 48; + } else if (likely(!fid_seq_is_default(seq))) + /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { + /* This is either an IDIF object, which identifies objects across + * all OSTs, or a regular FID. The IDIF namespace maps legacy + * OST objects into the FID namespace. In both cases, we just + * pass the FID through, no conversion needed. */ if (ostid->oi_fid.f_ver != 0) { - CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n", + CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n", POSTID(ostid), ost_idx); return -EBADF; } *fid = ostid->oi_fid; } - return 0; + return 0; } /* pack any OST FID into an ostid (id/seq) for the wire/disk */ @@ -793,9 +825,9 @@ static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid) } /* Check whether the fid is for LAST_ID */ -static inline int fid_is_last_id(const struct lu_fid *fid) +static inline bool fid_is_last_id(const struct lu_fid *fid) { - return (fid_oid(fid) == 0); + return fid_oid(fid) == 0; } /** @@ -862,7 +894,7 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) dst->f_ver = be32_to_cpu(fid_ver(src)); } -static inline int fid_is_sane(const struct lu_fid *fid) +static inline bool fid_is_sane(const struct lu_fid *fid) { return fid != NULL && ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) || @@ -870,15 +902,10 @@ static inline int fid_is_sane(const struct lu_fid *fid) fid_seq_is_rsvd(fid_seq(fid))); } -static inline int fid_is_zero(const struct lu_fid *fid) -{ - return fid_seq(fid) == 0 && fid_oid(fid) == 0; -} - extern void lustre_swab_lu_fid(struct lu_fid *fid); extern void lustre_swab_lu_seq_range(struct lu_seq_range *range); -static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) +static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { return memcmp(f0, f1, sizeof *f0) == 0; } @@ -903,7 +930,7 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, static inline void ostid_cpu_to_le(const struct ost_id *src_oi, struct ost_id *dst_oi) { - if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); } else { @@ -914,7 +941,7 @@ static inline void ostid_cpu_to_le(const struct ost_id *src_oi, static inline void ostid_le_to_cpu(const struct ost_id *src_oi, struct ost_id *dst_oi) { - if (fid_seq_is_mdt0(ostid_seq(src_oi))) { + if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) { dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); } else { @@ -922,6 +949,20 @@ static inline void ostid_le_to_cpu(const struct ost_id *src_oi, } } +struct lu_orphan_rec { + /* The MDT-object's FID referenced by the orphan OST-object */ + struct lu_fid lor_fid; + __u32 lor_uid; + __u32 lor_gid; +}; + +struct lu_orphan_ent { + /* The orphan OST-object's FID */ + struct lu_fid loe_key; + struct lu_orphan_rec loe_rec; +}; +void lustre_swab_orphan_ent(struct lu_orphan_ent *ent); + /** @} lu_fid */ /** \defgroup lu_dir lu_dir @@ -1045,12 +1086,12 @@ static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent) return next; } -static inline int lu_dirent_calc_size(int namelen, __u16 attr) +static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr) { - int size; + size_t size; - if (attr & LUDA_TYPE) { - const unsigned align = sizeof(struct luda_type) - 1; + if (attr & LUDA_TYPE) { + const size_t align = sizeof(struct luda_type) - 1; size = (sizeof(struct lu_dirent) + namelen + align) & ~align; size += sizeof(struct luda_type); } else @@ -1059,15 +1100,6 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr) return (size + 7) & ~7; } -static inline int lu_dirent_size(struct lu_dirent *ent) -{ - if (le16_to_cpu(ent->lde_reclen) == 0) { - return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen), - le32_to_cpu(ent->lde_attrs)); - } - return le16_to_cpu(ent->lde_reclen); -} - #define MDS_DIR_END_OFF 0xfffffffffffffffeULL /** @@ -1092,21 +1124,21 @@ struct lustre_handle { }; #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL -static inline int lustre_handle_is_used(struct lustre_handle *lh) +static inline bool lustre_handle_is_used(const struct lustre_handle *lh) { - return lh->cookie != 0ull; + return lh->cookie != 0; } -static inline int lustre_handle_equal(const struct lustre_handle *lh1, - const struct lustre_handle *lh2) +static inline bool lustre_handle_equal(const struct lustre_handle *lh1, + const struct lustre_handle *lh2) { - return lh1->cookie == lh2->cookie; + return lh1->cookie == lh2->cookie; } static inline void lustre_handle_copy(struct lustre_handle *tgt, - struct lustre_handle *src) + const struct lustre_handle *src) { - tgt->cookie = src->cookie; + tgt->cookie = src->cookie; } /* flags for lm_flags */ @@ -1130,7 +1162,6 @@ struct lustre_msg_v2 { /* without gss, ptlrpc_body is put at the first buffer. */ #define PTLRPC_NUM_VERSIONS 4 -#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */ struct ptlrpc_body_v3 { struct lustre_handle pb_handle; __u32 pb_type; @@ -1152,7 +1183,7 @@ struct ptlrpc_body_v3 { __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; /* padding for future needs */ __u64 pb_padding[4]; - char pb_jobid[JOBSTATS_JOBID_SIZE]; + char pb_jobid[LUSTRE_JOBID_SIZE]; }; #define ptlrpc_body ptlrpc_body_v3 @@ -1301,6 +1332,9 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/* create stripe disposition*/ #define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack name in request */ +#define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */ +#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */ +#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */ /* XXX README XXX: * Please DO NOT add flag values here before first ensuring that this same @@ -1345,7 +1379,9 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\ OBD_CONNECT_FLOCK_DEAD | \ - OBD_CONNECT_DISP_STRIPE) + OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | \ + OBD_CONNECT_OPEN_BY_FID | \ + OBD_CONNECT_DIR_STRIPE) #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ @@ -1363,7 +1399,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_JOBSTATS | \ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \ - OBD_CONNECT_PINGLESS) + OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK) #define ECHO_CONNECT_SUPPORTED (0) #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \ @@ -1373,13 +1409,6 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \ OBD_CONNECT_FULL20) -#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\ - ((patch)<<8) + (fix)) -#define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255) -#define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255) -#define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255) -#define OBD_OCD_VERSION_FIX(version) ((int)(version)&255) - /* This structure is used for both request and reply. * * If we eventually have separate connect data for different types, which we @@ -1516,6 +1545,7 @@ enum obdo_flags { OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */ OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */ + OBD_FL_SHORT_IO = 0x00400000, /* short io request */ /* Note that while these checksum values are currently separate bits, * in 2.x we can actually allow all values from 1-31 if we wanted. */ @@ -1526,10 +1556,23 @@ enum obdo_flags { OBD_FL_LOCAL_MASK = 0xF0000000, }; -#define LOV_MAGIC_V1 0x0BD10BD0 -#define LOV_MAGIC LOV_MAGIC_V1 -#define LOV_MAGIC_JOIN_V1 0x0BD20BD0 -#define LOV_MAGIC_V3 0x0BD30BD0 +/* + * All LOV EA magics should have the same postfix, if some new version + * Lustre instroduces new LOV EA magic, then when down-grade to an old + * Lustre, even though the old version system does not recognizes such + * new magic, it still can distinguish the corrupted cases by checking + * the magic's postfix. + */ +#define LOV_MAGIC_MAGIC 0x0BD0 +#define LOV_MAGIC_MASK 0xFFFF + +#define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC) +/* reserved for specifying OSTs */ +#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC) +#define LOV_MAGIC LOV_MAGIC_V1 /* * magic for fully defined striping @@ -1546,14 +1589,6 @@ enum obdo_flags { #define LOV_MAGIC_V1_DEF 0x0CD10BD0 #define LOV_MAGIC_V3_DEF 0x0CD30BD0 -#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ -#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ -#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ -#define LOV_PATTERN_CMOBD 0x200 - -#define LOV_PATTERN_F_MASK 0xffff0000 -#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */ - #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK) #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK) @@ -1612,25 +1647,30 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq) oi->oi.oi_seq = seq; } -static inline __u64 lmm_oi_id(struct ost_id *oi) +static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid) +{ + oi->oi.oi_id = oid; +} + +static inline __u64 lmm_oi_id(const struct ost_id *oi) { return oi->oi.oi_id; } -static inline __u64 lmm_oi_seq(struct ost_id *oi) +static inline __u64 lmm_oi_seq(const struct ost_id *oi) { return oi->oi.oi_seq; } static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi, - struct ost_id *src_oi) + const struct ost_id *src_oi) { dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id); dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq); } static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, - struct ost_id *src_oi) + const struct ost_id *src_oi) { dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id); dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq); @@ -1641,22 +1681,35 @@ static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi, #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) #define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data)) +/* This is the default MDT reply size allocated, should the striping be bigger, + * it will be reallocated in mdt_fix_reply. + * 100 stripes is a bit less than 2.5k of data */ +#define DEF_REP_MD_SIZE (sizeof(struct lov_mds_md) + \ + 100 * sizeof(struct lov_ost_data)) + #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access" #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default" #define XATTR_USER_PREFIX "user." #define XATTR_TRUSTED_PREFIX "trusted." #define XATTR_SECURITY_PREFIX "security." -#define XATTR_LUSTRE_PREFIX "lustre." #define XATTR_NAME_LOV "trusted.lov" #define XATTR_NAME_LMA "trusted.lma" #define XATTR_NAME_LMV "trusted.lmv" +#define XATTR_NAME_DEFAULT_LMV "trusted.dmv" #define XATTR_NAME_LINK "trusted.link" #define XATTR_NAME_FID "trusted.fid" #define XATTR_NAME_VERSION "trusted.version" #define XATTR_NAME_SOM "trusted.som" #define XATTR_NAME_HSM "trusted.hsm" -#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace" +#define XATTR_NAME_LFSCK_BITMAP "trusted.lfsck_bitmap" + +#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0) +# define XATTR_NAME_LFSCK_NAMESPACE_OLD "trusted.lfsck_namespace" +#endif + +#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_ns" +#define XATTR_NAME_MAX_LEN 32 /* increase this, if there is longer name. */ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ @@ -1666,7 +1719,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ /* lmm_stripe_count used to be __u32 */ __u16 lmm_stripe_count; /* num stripes in use for this object */ __u16 lmm_layout_gen; /* layout generation number */ - char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ + char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; @@ -1680,6 +1733,30 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) stripes * sizeof(struct lov_ost_data_v1); } +static inline __u32 +lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic) +{ + switch (lmm_magic) { + case LOV_MAGIC_V1: { + struct lov_mds_md_v1 lmm; + + if (buf_size < sizeof(lmm)) + return 0; + + return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); + } + case LOV_MAGIC_V3: { + struct lov_mds_md_v3 lmm; + + if (buf_size < sizeof(lmm)) + return 0; + + return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]); + } + default: + return 0; + } +} #define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ @@ -1742,6 +1819,8 @@ static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic) #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */ #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */ +#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */ + #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \ OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \ OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \ @@ -1794,7 +1873,7 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); * space for unstable pages; asking * it to sync quickly */ -#define OBD_OBJECT_EOF 0xffffffffffffffffULL +#define OBD_OBJECT_EOF LUSTRE_EOF #define OST_MIN_PRECREATE 32 #define OST_MAX_PRECREATE 20000 @@ -1817,12 +1896,12 @@ extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo); /* multiple of 8 bytes => can array */ struct niobuf_remote { - __u64 offset; - __u32 len; - __u32 flags; + __u64 rnb_offset; + __u32 rnb_len; + __u32 rnb_flags; }; -extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); +void lustre_swab_niobuf_remote(struct niobuf_remote *nbr); /* lock value block communicated between the filter and llite */ @@ -1865,15 +1944,15 @@ extern void lustre_swab_ost_lvb(struct ost_lvb *lvb); */ #ifndef QUOTABLOCK_BITS -#define QUOTABLOCK_BITS 10 +# define QUOTABLOCK_BITS LUSTRE_QUOTABLOCK_BITS #endif #ifndef QUOTABLOCK_SIZE -#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) +# define QUOTABLOCK_SIZE LUSTRE_QUOTABLOCK_SIZE #endif #ifndef toqb -#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS) +# define toqb lustre_stoqb #endif /* The lquota_id structure is an union of all the possible identifier types that @@ -1899,12 +1978,6 @@ struct obd_quotactl { extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); -#define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */ -#define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */ -#define Q_GETOINFO 0x800102 /* get obd quota info */ -#define Q_GETOQUOTA 0x800103 /* get obd quotas */ -#define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */ - #define Q_COPY(out, in, member) (out)->member = (in)->member #define QCTL_COPY(out, in) \ @@ -2054,8 +2127,8 @@ typedef enum { MDS_DISCONNECT = 39, MDS_GETSTATUS = 40, MDS_STATFS = 41, - MDS_PIN = 42, - MDS_UNPIN = 43, + MDS_PIN = 42, /* obsolete, never used in a release */ + MDS_UNPIN = 43, /* obsolete, never used in a release */ MDS_SYNC = 44, MDS_DONE_WRITING = 45, MDS_SET_INFO = 46, @@ -2064,7 +2137,7 @@ typedef enum { MDS_GETXATTR = 49, MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ MDS_WRITEPAGE = 51, - MDS_IS_SUBDIR = 52, + MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */ MDS_GET_INFO = 53, MDS_HSM_STATE_GET = 54, MDS_HSM_STATE_SET = 55, @@ -2082,11 +2155,11 @@ typedef enum { /* opcodes for object update */ typedef enum { - UPDATE_OBJ = 1000, - UPDATE_LAST_OPC + OUT_UPDATE = 1000, + OUT_UPDATE_LAST_OPC } update_cmd_t; -#define UPDATE_FIRST_OPC UPDATE_OBJ +#define OUT_UPDATE_FIRST_OPC OUT_UPDATE /* * Do not exceed 63 @@ -2101,7 +2174,7 @@ typedef enum { REINT_OPEN = 6, REINT_SETXATTR = 7, REINT_RMENTRY = 8, -// REINT_WRITE = 9, + REINT_MIGRATE = 9, REINT_MAX } mds_reint_t, mdt_reint_t; @@ -2114,7 +2187,7 @@ extern void lustre_swab_generic_32s (__u32 *val); #define DISP_LOOKUP_POS 0x00000008 #define DISP_OPEN_CREATE 0x00000010 #define DISP_OPEN_OPEN 0x00000020 -#define DISP_ENQ_COMPLETE 0x00400000 +#define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */ #define DISP_ENQ_OPEN_REF 0x00800000 #define DISP_ENQ_CREATE_REF 0x01000000 #define DISP_OPEN_LOCK 0x02000000 @@ -2122,12 +2195,24 @@ extern void lustre_swab_generic_32s (__u32 *val); #define DISP_OPEN_STRIPE 0x08000000 /* INODE LOCK PARTS */ -#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */ -#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ -#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ -#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ -#define MDS_INODELOCK_PERM 0x000010 /* for permission */ -#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ +#define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also + * was used to protect permission (mode, + * owner, group etc) before 2.4. */ +#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */ +#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */ +#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */ + +/* The PERM bit is added int 2.4, and it is used to protect permission(mode, + * owner, group, acl etc), so to separate the permission from LOOKUP lock. + * Because for remote directories(in DNE), these locks will be granted by + * different MDTs(different ldlm namespace). + * + * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together. + * For Remote directory, the master MDT, where the remote directory is, will + * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is, + * will grant LOOKUP_LOCK. */ +#define MDS_INODELOCK_PERM 0x000010 +#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */ #define MDS_INODELOCK_MAXSHIFT 5 /* This FULL lock is useful to take on unlink sort of operations */ @@ -2216,42 +2301,42 @@ enum md_transient_state { }; struct mdt_body { - struct lu_fid fid1; - struct lu_fid fid2; - struct lustre_handle handle; - __u64 valid; - __u64 size; /* Offset, in the case of MDS_READPAGE */ - obd_time mtime; - obd_time atime; - obd_time ctime; - __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 ioepoch; - __u64 t_state; /* transient file state defined in - * enum md_transient_state - * was "ino" until 2.4.0 */ - __u32 fsuid; - __u32 fsgid; - __u32 capability; - __u32 mode; - __u32 uid; - __u32 gid; - __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */ - __u32 rdev; - __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 unused2; /* was "generation" until 2.4.0 */ - __u32 suppgid; - __u32 eadatasize; - __u32 aclsize; - __u32 max_mdsize; - __u32 max_cookiesize; - __u32 uid_h; /* high 32-bits of uid, for FUID */ - __u32 gid_h; /* high 32-bits of gid, for FUID */ - __u32 padding_5; /* also fix lustre_swab_mdt_body */ - __u64 padding_6; - __u64 padding_7; - __u64 padding_8; - __u64 padding_9; - __u64 padding_10; + struct lu_fid mbo_fid1; + struct lu_fid mbo_fid2; + struct lustre_handle mbo_handle; + __u64 mbo_valid; + __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */ + obd_time mbo_mtime; + obd_time mbo_atime; + obd_time mbo_ctime; + __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */ + __u64 mbo_ioepoch; + __u64 mbo_t_state; /* transient file state defined in + * enum md_transient_state + * was "ino" until 2.4.0 */ + __u32 mbo_fsuid; + __u32 mbo_fsgid; + __u32 mbo_capability; + __u32 mbo_mode; + __u32 mbo_uid; + __u32 mbo_gid; + __u32 mbo_flags; + __u32 mbo_rdev; + __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */ + __u32 mbo_unused2; /* was "generation" until 2.4.0 */ + __u32 mbo_suppgid; + __u32 mbo_eadatasize; + __u32 mbo_aclsize; + __u32 mbo_max_mdsize; + __u32 mbo_max_cookiesize; + __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */ + __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */ + __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */ + __u64 mbo_padding_6; + __u64 mbo_padding_7; + __u64 mbo_padding_8; + __u64 mbo_padding_9; + __u64 mbo_padding_10; }; /* 216 */ extern void lustre_swab_mdt_body (struct mdt_body *b); @@ -2389,6 +2474,12 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); */ #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */ +/* lustre internal open flags, which should not be set from user space */ +#define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \ + MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \ + MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \ + MDS_OPEN_RELEASE) + /* permission for create non-directory file */ #define MAY_CREATE (1 << 7) /* permission for create directory file */ @@ -2420,6 +2511,7 @@ enum mds_op_bias { MDS_CREATE_VOLATILE = 1 << 10, MDS_OWNEROVERRIDE = 1 << 11, MDS_HSM_RELEASE = 1 << 12, + MDS_RENAME_MIGRATE = 1 << 13, }; /* instance of mdt_reint_rec */ @@ -2609,11 +2701,12 @@ struct mdt_rec_reint { extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); +/* lmv structures */ struct lmv_desc { __u32 ld_tgt_count; /* how many MDS's */ __u32 ld_active_tgt_count; /* how many active */ __u32 ld_default_stripe_count; /* how many objects are used */ - __u32 ld_pattern; /* default MEA_MAGIC_* */ + __u32 ld_pattern; /* default hash pattern */ __u64 ld_default_hash_size; __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */ __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */ @@ -2625,31 +2718,131 @@ struct lmv_desc { extern void lustre_swab_lmv_desc (struct lmv_desc *ld); -/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */ -struct lmv_stripe_md { - __u32 mea_magic; - __u32 mea_count; - __u32 mea_master; - __u32 mea_padding; - char mea_pool_name[LOV_MAXPOOLNAME]; - struct lu_fid mea_ids[0]; +/* LMV layout EA, and it will be stored both in master and slave object */ +struct lmv_mds_md_v1 { + __u32 lmv_magic; + __u32 lmv_stripe_count; + __u32 lmv_master_mdt_index; /* On master object, it is master + * MDT index, on slave object, it + * is stripe index of the slave obj */ + __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate + * which hash function to be used, + * Note: only lower 16 bits is being + * used for now. Higher 16 bits will + * be used to mark the object status, + * for example migrating or dead. */ + __u32 lmv_layout_version; /* Used for directory restriping */ + __u32 lmv_padding1; + __u64 lmv_padding2; + __u64 lmv_padding3; + char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */ + struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */ }; -extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea); +#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */ +#define LMV_MAGIC LMV_MAGIC_V1 -/* lmv structures */ -#define MEA_MAGIC_LAST_CHAR 0xb2221ca1 -#define MEA_MAGIC_ALL_CHARS 0xb222a11c -#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b +/* #define LMV_USER_MAGIC 0x0CD30CD0 */ +#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */ + +/* Right now only the lower part(0-16bits) of lmv_hash_type is being used, + * and the higher part will be the flag to indicate the status of object, + * for example the object is being migrated. And the hash function + * might be interpreted differently with different flags. */ +#define LMV_HASH_TYPE_MASK 0x0000ffff + +#define LMV_HASH_FLAG_MIGRATION 0x80000000 +#define LMV_HASH_FLAG_DEAD 0x40000000 +#define LMV_HASH_FLAG_BAD_TYPE 0x20000000 + +/* The striped directory has ever lost its master LMV EA, then LFSCK + * re-generated it. This flag is used to indicate such case. It is an + * on-disk flag. */ +#define LMV_HASH_FLAG_LOST_LMV 0x10000000 + +/** + * The FNV-1a hash algorithm is as follows: + * hash = FNV_offset_basis + * for each octet_of_data to be hashed + * hash = hash XOR octet_of_data + * hash = hash × FNV_prime + * return hash + * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash + * + * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source + * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL + **/ +#define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL +#define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL +static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size) +{ + __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS; + const unsigned char *p = buf; + size_t i; + + for (i = 0; i < size; i++) { + hash ^= p[i]; + hash *= LUSTRE_FNV_1A_64_PRIME; + } + + return hash; +} + +union lmv_mds_md { + __u32 lmv_magic; + struct lmv_mds_md_v1 lmv_md_v1; + struct lmv_user_md lmv_user_md; +}; -#define MAX_HASH_SIZE_32 0x7fffffffUL -#define MAX_HASH_SIZE 0x7fffffffffffffffULL -#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL +extern void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm); + +static inline int lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) +{ + switch (lmm_magic) { + case LMV_MAGIC_V1:{ + struct lmv_mds_md_v1 *lmm1; + + return sizeof(*lmm1) + stripe_count * + sizeof(lmm1->lmv_stripe_fids[0]); + } + default: + return -EINVAL; + } +} + +static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm) +{ + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count); + case LMV_USER_MAGIC: + return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count); + default: + return -EINVAL; + } +} + +static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm, + unsigned int stripe_count) +{ + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count); + break; + case LMV_USER_MAGIC: + lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count); + break; + default: + return -EINVAL; + } + return 0; +} enum fld_rpc_opc { - FLD_QUERY = 900, - FLD_LAST_OPC, - FLD_FIRST_OPC = FLD_QUERY + FLD_QUERY = 900, + FLD_READ = 901, + FLD_LAST_OPC, + FLD_FIRST_OPC = FLD_QUERY }; enum seq_rpc_opc { @@ -2663,6 +2856,20 @@ enum seq_op { SEQ_ALLOC_META = 1 }; +enum fld_op { + FLD_CREATE = 0, + FLD_DELETE = 1, + FLD_LOOKUP = 2, +}; + +/* LFSCK opcodes */ +typedef enum { + LFSCK_NOTIFY = 1101, + LFSCK_QUERY = 1102, + LFSCK_LAST_OPC, + LFSCK_FIRST_OPC = LFSCK_NOTIFY +} lfsck_cmd_t; + /* * LOV data structures */ @@ -2673,6 +2880,8 @@ enum seq_op { * protocol, this will limit the max number of OSTs per LOV */ #define LOV_DESC_MAGIC 0xB0CCDE5C +#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */ +#define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS) /* LOV settings descriptor (should only contain static info) */ struct lov_desc { @@ -2720,10 +2929,10 @@ struct ldlm_res_id { extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id); -static inline int ldlm_res_eq(const struct ldlm_res_id *res0, - const struct ldlm_res_id *res1) +static inline bool ldlm_res_eq(const struct ldlm_res_id *res0, + const struct ldlm_res_id *res1) { - return !memcmp(res0, res1, sizeof(*res0)); + return memcmp(res0, res1, sizeof(*res0)) == 0; } /* lock types */ @@ -2758,17 +2967,19 @@ struct ldlm_extent { __u64 gid; }; -static inline int ldlm_extent_overlap(struct ldlm_extent *ex1, - struct ldlm_extent *ex2) +#define LDLM_GID_ANY ((__u64) -1) + +static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1, + const struct ldlm_extent *ex2) { - return (ex1->start <= ex2->end) && (ex2->start <= ex1->end); + return ex1->start <= ex2->end && ex2->start <= ex1->end; } /* check if @ex1 contains @ex2 */ -static inline int ldlm_extent_contain(struct ldlm_extent *ex1, - struct ldlm_extent *ex2) +static inline int ldlm_extent_contain(const struct ldlm_extent *ex1, + const struct ldlm_extent *ex2) { - return (ex1->start <= ex2->start) && (ex1->end >= ex2->end); + return ex1->start <= ex2->start && ex1->end >= ex2->end; } struct ldlm_inodebits { @@ -2970,7 +3181,29 @@ typedef enum { } obd_cmd_t; #define OBD_FIRST_OPC OBD_PING -/* catalog of log objects */ +/** + * llog contexts indices. + * + * There is compatibility problem with indexes below, they are not + * continuous and must keep their numbers for compatibility needs. + * See LU-5218 for details. + */ +enum llog_ctxt_id { + LLOG_CONFIG_ORIG_CTXT = 0, + LLOG_CONFIG_REPL_CTXT = 1, + LLOG_MDS_OST_ORIG_CTXT = 2, + LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */ + LLOG_SIZE_ORIG_CTXT = 4, + LLOG_SIZE_REPL_CTXT = 5, + LLOG_TEST_ORIG_CTXT = 8, + LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */ + LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */ + LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */ + /* for multiple changelog consumers */ + LLOG_CHANGELOG_USER_ORIG_CTXT = 14, + LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */ + LLOG_MAX_CTXTS +}; /** Identifier for a single log object */ struct llog_logid { @@ -3042,6 +3275,12 @@ struct llog_rec_tail { (rec->lrh_len - sizeof(struct llog_rec_hdr) - \ sizeof(struct llog_rec_tail)) +static inline void *rec_tail(struct llog_rec_hdr *rec) +{ + return (void *)((char *)rec + rec->lrh_len - + sizeof(struct llog_rec_tail)); +} + struct llog_logid_rec { struct llog_rec_hdr lid_hdr; struct llog_logid lid_id; @@ -3076,7 +3315,7 @@ struct llog_setattr64_rec { __u32 lsr_uid_h; __u32 lsr_gid; __u32 lsr_gid_h; - __u64 lsr_padding; + __u64 lsr_valid; struct llog_rec_tail lsr_tail; } __attribute__((packed)); @@ -3109,15 +3348,9 @@ struct changelog_setinfo { /** changelog record */ struct llog_changelog_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_rec cr; - struct llog_rec_tail cr_tail; /**< for_sizezof_only */ -} __attribute__((packed)); - -struct llog_changelog_ext_rec { - struct llog_rec_hdr cr_hdr; - struct changelog_ext_rec cr; - struct llog_rec_tail cr_tail; /**< for_sizezof_only */ + struct llog_rec_hdr cr_hdr; + struct changelog_rec cr; /**< Variable length field */ + struct llog_rec_tail cr_do_not_use; /**< for_sizeof_only */ } __attribute__((packed)); #define CHANGELOG_USER_PREFIX "cl" @@ -3138,7 +3371,7 @@ enum agent_req_status { ARS_SUCCEED, }; -static inline char *agent_req_status2name(enum agent_req_status ars) +static inline const char *agent_req_status2name(enum agent_req_status ars) { switch (ars) { case ARS_WAITING: @@ -3203,6 +3436,9 @@ enum llog_flag { LLOG_F_ZAP_WHEN_EMPTY = 0x1, LLOG_F_IS_CAT = 0x2, LLOG_F_IS_PLAIN = 0x4, + LLOG_F_EXT_JOBID = 0x8, + + LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID, }; struct llog_log_hdr { @@ -3310,7 +3546,73 @@ struct obdo { #define o_cksum o_nlink #define o_grant_used o_data_version -static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, +struct lfsck_request { + __u32 lr_event; + __u32 lr_index; + __u32 lr_flags; + __u32 lr_valid; + union { + __u32 lr_speed; + __u32 lr_status; + __u32 lr_type; + }; + __u16 lr_version; + __u16 lr_active; + __u16 lr_param; + __u16 lr_async_windows; + union { + __u32 lr_flags2; + __u32 lr_layout_version; + }; + struct lu_fid lr_fid; + struct lu_fid lr_fid2; + union { + struct lu_fid lr_fid3; + char lr_pool_name[LOV_MAXPOOLNAME + 1]; + }; + __u32 lr_stripe_count; + __u32 lr_hash_type; + __u64 lr_padding_3; +}; + +void lustre_swab_lfsck_request(struct lfsck_request *lr); + +struct lfsck_reply { + __u32 lr_status; + __u32 lr_padding_1; + __u64 lr_padding_2; +}; + +void lustre_swab_lfsck_reply(struct lfsck_reply *lr); + +enum lfsck_events { + LE_LASTID_REBUILDING = 1, + LE_LASTID_REBUILT = 2, + LE_PHASE1_DONE = 3, + LE_PHASE2_DONE = 4, + LE_START = 5, + LE_STOP = 6, + LE_QUERY = 7, + LE_FID_ACCESSED = 8, + LE_PEER_EXIT = 9, + LE_CONDITIONAL_DESTROY = 10, + LE_PAIRS_VERIFY = 11, + LE_CREATE_ORPHAN = 12, + LE_SKIP_NLINK_DECLARE = 13, + LE_SKIP_NLINK = 14, + LE_SET_LMV_MASTER = 15, + LE_SET_LMV_SLAVE = 16, +}; + +enum lfsck_event_flags { + LEF_TO_OST = 0x00000001, + LEF_FROM_OST = 0x00000002, + LEF_SET_LMV_HASH = 0x00000004, + LEF_SET_LMV_ALL = 0x00000008, + LEF_RECHECK_NAME_HASH = 0x00000010, +}; + +static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd, struct obdo *wobdo, const struct obdo *lobdo) { @@ -3328,7 +3630,7 @@ static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd, } } -static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd, +static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd, struct obdo *lobdo, const struct obdo *wobdo) { @@ -3378,6 +3680,8 @@ extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, int stripe_count); extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); +void lustre_print_user_md(unsigned int level, struct lov_user_md *lum, + const char *msg); /* llog_swab.c */ extern void lustre_swab_llogd_body (struct llogd_body *d); @@ -3449,6 +3753,7 @@ enum idx_info_flags { II_FL_VARKEY = 1 << 1, /* keys can be of variable size */ II_FL_VARREC = 1 << 2, /* records can be of variable size */ II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */ + II_FL_NOKEY = 1 << 4, /* client doesn't care about key */ }; #define LIP_MAGIC 0x8A6D6B6C @@ -3553,6 +3858,16 @@ static inline int capa_for_oss(struct lustre_capa *c) return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0; } +static inline bool lovea_slot_is_dummy(const struct lov_ost_data_v1 *obj) +{ + /* zero area does not care about the bytes-order. */ + if (obj->l_ost_oi.oi.oi_id == 0 && obj->l_ost_oi.oi.oi_seq == 0 && + obj->l_ost_idx == 0 && obj->l_ost_gen == 0) + return true; + + return false; +} + /* lustre_capa::lc_hmac_alg */ enum { CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */ @@ -3603,6 +3918,14 @@ struct getinfo_fid2path { void lustre_swab_fid2path (struct getinfo_fid2path *gf); +/** path2parent request/reply structures */ +struct getparent { + struct lu_fid gp_fid; /**< parent FID */ + __u32 gp_linkno; /**< hardlink number */ + __u32 gp_name_size; /**< size of the name field */ + char gp_name[0]; /**< zero-terminated link name */ +} __attribute__((packed)); + enum { LAYOUT_INTENT_ACCESS = 0, LAYOUT_INTENT_READ = 1, @@ -3649,90 +3972,188 @@ extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui); extern void lustre_swab_hsm_request(struct hsm_request *hr); /** - * These are object update opcode under UPDATE_OBJ, which is currently - * being used by cross-ref operations between MDT. + * OUT_UPDATE RPC Format * * During the cross-ref operation, the Master MDT, which the client send the * request to, will disassembly the operation into object updates, then OSP * will send these updates to the remote MDT to be executed. * - * Update request format - * magic: UPDATE_BUFFER_MAGIC_V1 - * Count: How many updates in the req. - * bufs[0] : following are packets of object. - * update[0]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * update[1]: - * type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * .......... - * update[7]: type: object_update_op, the op code of update - * fid: The object fid of the update. - * lens/bufs: other parameters of the update. - * Current 8 maxim updates per object update request. + * An UPDATE_OBJ RPC does a list of updates. Each update belongs to an + * operation and does a type of modification to an object. + * + * Request Format * - ******************************************************************* - * update reply format: + * update_buf + * update (1st) + * update (2nd) + * ... + * update (ub_count-th) * - * ur_version: UPDATE_REPLY_V1 - * ur_count: The count of the reply, which is usually equal - * to the number of updates in the request. - * ur_lens: The reply lengths of each object update. + * ub_count must be less than or equal to UPDATE_PER_RPC_MAX. * - * replies: 1st update reply [4bytes_ret: other body] - * 2nd update reply [4bytes_ret: other body] - * ..... - * nth update reply [4bytes_ret: other body] + * Reply Format * - * For each reply of the update, the format would be - * result(4 bytes):Other stuff + * update_reply + * rc [+ buffers] (1st) + * rc [+ buffers] (2st) + * ... + * rc [+ buffers] (nr_count-th) + * + * ur_count must be less than or equal to UPDATE_PER_RPC_MAX and should usually + * be equal to ub_count. + */ + +/** + * Type of each update */ +enum update_type { + OUT_CREATE = 1, + OUT_DESTROY = 2, + OUT_REF_ADD = 3, + OUT_REF_DEL = 4, + OUT_ATTR_SET = 5, + OUT_ATTR_GET = 6, + OUT_XATTR_SET = 7, + OUT_XATTR_GET = 8, + OUT_INDEX_LOOKUP = 9, + OUT_INDEX_INSERT = 10, + OUT_INDEX_DELETE = 11, + OUT_WRITE = 12, + OUT_XATTR_DEL = 13, + OUT_LAST +}; + +enum update_flag { + UPDATE_FL_OST = 0x00000001, /* op from OST (not MDT) */ + UPDATE_FL_SYNC = 0x00000002, /* commit before replying */ + UPDATE_FL_COMMITTED = 0x00000004, /* op committed globally */ + UPDATE_FL_NOLOG = 0x00000008 /* for idempotent updates */ +}; + +struct object_update_param { + __u16 oup_len; /* length of this parameter */ + __u16 oup_padding; + __u32 oup_padding2; + char oup_buf[0]; +}; + +static inline size_t +object_update_param_size(const struct object_update_param *param) +{ + return cfs_size_round(sizeof(*param) + param->oup_len); +} + +/* object update */ +struct object_update { + __u16 ou_type; /* enum update_type */ + __u16 ou_params_count; /* update parameters count */ + __u32 ou_master_index; /* master MDT/OST index */ + __u32 ou_flags; /* enum update_flag */ + __u32 ou_padding1; /* padding 1 */ + __u64 ou_batchid; /* op transno on master */ + struct lu_fid ou_fid; /* object to be updated */ + struct object_update_param ou_params[0]; /* update params */ +}; + +#define UPDATE_REQUEST_MAGIC_V1 0xBDDE0001 +#define UPDATE_REQUEST_MAGIC_V2 0xBDDE0002 +#define UPDATE_REQUEST_MAGIC UPDATE_REQUEST_MAGIC_V2 +/* Hold object_updates sending to the remote OUT in single RPC */ +struct object_update_request { + __u32 ourq_magic; + __u16 ourq_count; /* number of ourq_updates[] */ + __u16 ourq_padding; + struct object_update ourq_updates[0]; +}; + +void lustre_swab_object_update(struct object_update *ou); +void lustre_swab_object_update_request(struct object_update_request *our); + +static inline size_t +object_update_size(const struct object_update *update) +{ + const struct object_update_param *param; + size_t size; + unsigned int i; + + size = offsetof(struct object_update, ou_params[0]); + for (i = 0; i < update->ou_params_count; i++) { + param = (struct object_update_param *)((char *)update + size); + size += object_update_param_size(param); + } -#define UPDATE_MAX_OPS 10 -#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001 -#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1 -#define UPDATE_BUF_COUNT 8 -enum object_update_op { - OBJ_CREATE = 1, - OBJ_DESTROY = 2, - OBJ_REF_ADD = 3, - OBJ_REF_DEL = 4, - OBJ_ATTR_SET = 5, - OBJ_ATTR_GET = 6, - OBJ_XATTR_SET = 7, - OBJ_XATTR_GET = 8, - OBJ_INDEX_LOOKUP = 9, - OBJ_INDEX_INSERT = 10, - OBJ_INDEX_DELETE = 11, - OBJ_LAST -}; - -struct update { - __u32 u_type; - __u32 u_batchid; - struct lu_fid u_fid; - __u32 u_lens[UPDATE_BUF_COUNT]; - __u32 u_bufs[0]; -}; - -struct update_buf { - __u32 ub_magic; - __u32 ub_count; - __u32 ub_bufs[0]; -}; - -#define UPDATE_REPLY_V1 0x00BD0001 -struct update_reply { - __u32 ur_version; - __u32 ur_count; - __u32 ur_lens[0]; -}; - -void lustre_swab_update_buf(struct update_buf *ub); -void lustre_swab_update_reply_buf(struct update_reply *ur); + return size; +} + +static inline struct object_update * +object_update_request_get(const struct object_update_request *our, + unsigned int index, size_t *size) +{ + void *ptr; + unsigned int i; + + if (index >= our->ourq_count) + return NULL; + + ptr = (void *)&our->ourq_updates[0]; + for (i = 0; i < index; i++) + ptr += object_update_size(ptr); + + if (size != NULL) + *size = object_update_size(ptr); + + return ptr; +} + + +/* the result of object update */ +struct object_update_result { + __u32 our_rc; + __u16 our_datalen; + __u16 our_padding; + __u32 our_data[0]; +}; + +#define UPDATE_REPLY_MAGIC_V1 0x00BD0001 +#define UPDATE_REPLY_MAGIC_V2 0x00BD0002 +#define UPDATE_REPLY_MAGIC UPDATE_REPLY_MAGIC_V2 +/* Hold object_update_results being replied from the remote OUT. */ +struct object_update_reply { + __u32 ourp_magic; + __u16 ourp_count; + __u16 ourp_padding; + __u16 ourp_lens[0]; +}; + +void lustre_swab_object_update_result(struct object_update_result *our); +void lustre_swab_object_update_reply(struct object_update_reply *our); + +static inline struct object_update_result * +object_update_result_get(const struct object_update_reply *reply, + unsigned int index, size_t *size) +{ + __u16 count = reply->ourp_count; + unsigned int i; + void *ptr; + + if (index >= count) + return NULL; + + ptr = (char *)reply + + cfs_size_round(offsetof(struct object_update_reply, + ourp_lens[count])); + for (i = 0; i < index; i++) { + if (reply->ourp_lens[i] == 0) + return NULL; + + ptr += cfs_size_round(reply->ourp_lens[i]); + } + + if (size != NULL) + *size = reply->ourp_lens[index]; + + return ptr; +} /** layout swap request structure * fid1 and fid2 are in mdt_body