X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=3bdebf1f7af364906ed65583522b37101bbf7ebb;hp=eb9cdca92ccd3ee646d6174f22cedf6db57c1446;hb=624fd0ca0832e92dd2f5486984b6c7f7397619ba;hpb=a4921671d04d8a8658bbb1aff8ba022b99e6e1ba diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index eb9cdca..3bdebf1 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -26,7 +26,7 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* @@ -42,10 +42,6 @@ * * Lustre wire protocol definitions. * - * We assume all nodes are either little-endian or big-endian, and we - * always send messages in the sender's native format. The receiver - * detects the message format by checking the 'magic' field of the message - * (see lustre_msg_swabbed() below). * ALL structs passing over the wire should be declared here. Structs * that are used in interfaces with userspace should go in lustre_user.h. * @@ -72,6 +68,11 @@ * in the code to ensure that new/old clients that see this larger struct * do not fail, otherwise you need to implement protocol compatibility). * + * We assume all nodes are either little-endian or big-endian, and we + * always send messages in the sender's native format. The receiver + * detects the message format by checking the 'magic' field of the message + * (see lustre_msg_swabbed() below). + * * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines, * implemented either here, inline (trivial implementations) or in * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other" @@ -90,11 +91,10 @@ #ifndef _LUSTRE_IDL_H_ #define _LUSTRE_IDL_H_ -#include +#include /* for LASSERT, LPUX64, etc */ /* Defn's shared with user-space. */ #include -#include /* * GENERAL STUFF @@ -138,12 +138,7 @@ #define SEQ_DATA_PORTAL 31 #define SEQ_CONTROLLER_PORTAL 32 -#define SVC_KILLED 1 -#define SVC_EVENT 2 -#define SVC_SIGNAL 4 -#define SVC_RUNNING 8 -#define SVC_STOPPING 16 -#define SVC_STOPPED 32 +/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */ /* packet types */ #define PTL_RPC_MSG_REQUEST 4711 @@ -170,19 +165,35 @@ typedef __u32 mdsno_t; typedef __u64 seqno_t; +typedef __u64 obd_id; +typedef __u64 obd_seq; +typedef __s64 obd_time; +typedef __u64 obd_size; +typedef __u64 obd_off; +typedef __u64 obd_blocks; +typedef __u64 obd_valid; +typedef __u32 obd_blksize; +typedef __u32 obd_mode; +typedef __u32 obd_uid; +typedef __u32 obd_gid; +typedef __u32 obd_flag; +typedef __u32 obd_count; /** * Describes a range of sequence, lsr_start is included but lsr_end is * not in the range. - * Same structure is used in fld module where lsr_mdt field holds mdt id + * Same structure is used in fld module where lsr_index field holds mdt id * of the home mdt. */ +#define LU_SEQ_RANGE_MDT 0x0 +#define LU_SEQ_RANGE_OST 0x1 + struct lu_seq_range { __u64 lsr_start; __u64 lsr_end; - __u32 lsr_mdt; - __u32 lsr_padding; + __u32 lsr_index; + __u32 lsr_flags; }; /** @@ -200,7 +211,7 @@ static inline __u64 range_space(const struct lu_seq_range *range) static inline void range_init(struct lu_seq_range *range) { - range->lsr_start = range->lsr_end = range->lsr_mdt = 0; + range->lsr_start = range->lsr_end = range->lsr_index = 0; } /** @@ -213,18 +224,6 @@ static inline int range_within(const struct lu_seq_range *range, return s >= range->lsr_start && s < range->lsr_end; } -/** - * allocate \a w units of sequence from range \a from. - */ -static inline void range_alloc(struct lu_seq_range *to, - struct lu_seq_range *from, - __u64 width) -{ - to->lsr_start = from->lsr_start; - to->lsr_end = from->lsr_start + width; - from->lsr_start += width; -} - static inline int range_is_sane(const struct lu_seq_range *range) { return (range->lsr_end >= range->lsr_start); @@ -241,57 +240,122 @@ static inline int range_is_exhausted(const struct lu_seq_range *range) return range_space(range) == 0; } -#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x" +/* return 0 if two range have the same location */ +static inline int range_compare_loc(const struct lu_seq_range *r1, + const struct lu_seq_range *r2) +{ + return r1->lsr_index != r2->lsr_index || + r1->lsr_flags != r2->lsr_flags; +} + +#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%x" #define PRANGE(range) \ (range)->lsr_start, \ (range)->lsr_end, \ - (range)->lsr_mdt + (range)->lsr_index, \ + (range)->lsr_flags /** \defgroup lu_fid lu_fid * @{ */ /** - * File identifier. - * - * Fid is a cluster-wide unique identifier of a file or an object - * (stripe). Fids are never reused. Fids are transmitted across network (in - * the sender byte-ordering), and stored on disk in a packed form (struct - * lu_fid_pack) in a big-endian order. + * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. */ -struct lu_fid { - /** - * fid sequence. Sequence is a unit of migration: all files (objects) - * with fids from a given sequence are stored on the same - * server. - * - * Lustre should support 2 ^ 64 objects, thus even if one - * sequence has one object we will never reach this value. - */ - __u64 f_seq; - /** fid number within sequence. */ - __u32 f_oid; - /** - * fid version, used to distinguish different versions (in the sense - * of snapshots, etc.) of the same file system object. Not currently - * used. - */ - __u32 f_ver; +enum lma_compat { + LMAC_HSM = 0x00000001, + LMAC_SOM = 0x00000002, }; /** + * Masks for all features that should be supported by a Lustre version to + * access a specific file. + * This information is stored in lustre_mdt_attrs::lma_incompat. + * + * NOTE: No incompat feature should be added before bug #17670 is landed. + */ +#define LMA_INCOMPAT_SUPP 0x0 + +/** * Following struct for MDT attributes, that will be kept inode's EA. * Introduced in 2.0 release (please see b15993, for details) */ struct lustre_mdt_attrs { + /** + * Bitfield for supported data in this structure. From enum lma_compat. + * lma_self_fid and lma_flags are always available. + */ + __u32 lma_compat; + /** + * Per-file incompat feature list. Lustre version should support all + * flags set in this field. The supported feature mask is available in + * LMA_INCOMPAT_SUPP. + */ + __u32 lma_incompat; /** FID of this inode */ struct lu_fid lma_self_fid; - /** SOM state, mdt/ost type, others */ + /** mdt/ost type, others */ __u64 lma_flags; - /** total sectors in objects */ - __u64 lma_som_sectors; + /* IO Epoch SOM attributes belongs to */ + __u64 lma_ioepoch; + /** total file size in objects */ + __u64 lma_som_size; + /** total fs blocks in objects */ + __u64 lma_som_blocks; + /** mds mount id the size is valid for */ + __u64 lma_som_mountid; }; +/** + * Fill \a lma with its first content. + * Only fid is stored. + */ +static inline void lustre_lma_init(struct lustre_mdt_attrs *lma, + const struct lu_fid *fid) +{ + lma->lma_compat = 0; + lma->lma_incompat = 0; + memcpy(&lma->lma_self_fid, fid, sizeof(*fid)); + lma->lma_flags = 0; + lma->lma_ioepoch = 0; + lma->lma_som_size = 0; + lma->lma_som_blocks = 0; + lma->lma_som_mountid = 0; + + /* If a field is added in struct lustre_mdt_attrs, zero it explicitly + * and change the test below. */ + LASSERT(sizeof(*lma) == + (offsetof(struct lustre_mdt_attrs, lma_som_mountid) + + sizeof(lma->lma_som_mountid))); +}; + +extern void lustre_swab_lu_fid(struct lu_fid *fid); + +/** + * Swab, if needed, lustre_mdt_attr struct to on-disk format. + * Otherwise, do not touch it. + */ +static inline void lustre_lma_swab(struct lustre_mdt_attrs *lma) +{ + /* Use LUSTRE_MSG_MAGIC to detect local endianess. */ + if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) { + __swab32s(&lma->lma_compat); + __swab32s(&lma->lma_incompat); + lustre_swab_lu_fid(&lma->lma_self_fid); + __swab64s(&lma->lma_flags); + __swab64s(&lma->lma_ioepoch); + __swab64s(&lma->lma_som_size); + __swab64s(&lma->lma_som_blocks); + __swab64s(&lma->lma_som_mountid); + } +}; + +/* This is the maximum number of MDTs allowed in CMD testing until such + * a time that FID-on-OST is implemented. This is due to the limitations + * of packing non-0-MDT numbers into the FID SEQ namespace. Once FID-on-OST + * is implemented this limit will be virtually unlimited. */ +#define MAX_MDT_COUNT 8 + /** * fid constants @@ -324,20 +388,85 @@ static inline void fid_zero(struct lu_fid *fid) memset(fid, 0, sizeof(*fid)); } -/* Normal FID sequence starts from this value, i.e. 1<<33 */ -#define FID_SEQ_START 0x200000000ULL +static inline obd_id fid_ver_oid(const struct lu_fid *fid) +{ + return ((__u64)fid_ver(fid) << 32 | fid_oid(fid)); +} -/* IDIF sequence starts from this value, i.e. 1<<32 */ -#define IDIF_SEQ_START 0x100000000ULL +/** + * Note that reserved SEQ numbers below 12 will conflict with ldiskfs + * inodes in the IGIF namespace, so these reserved SEQ numbers can be + * used for other purposes and not risk collisions with existing inodes. + * + * Different FID Format + * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0 + */ +enum fid_seq { + FID_SEQ_OST_MDT0 = 0, + FID_SEQ_LLOG = 1, + FID_SEQ_ECHO = 2, + FID_SEQ_OST_MDT1 = 3, + FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */ + FID_SEQ_RSVD = 11, + FID_SEQ_IGIF = 12, + FID_SEQ_IGIF_MAX = 0x0ffffffffULL, + FID_SEQ_IDIF = 0x100000000ULL, + FID_SEQ_IDIF_MAX = 0x1ffffffffULL, + /* Normal FID sequence starts from this value, i.e. 1<<33 */ + FID_SEQ_START = 0x200000000ULL, + FID_SEQ_LOCAL_FILE = 0x200000001ULL, + FID_SEQ_DOT_LUSTRE = 0x200000002ULL, + FID_SEQ_NORMAL = 0x200000400ULL, + FID_SEQ_LOV_DEFAULT= 0xffffffffffffffffULL +}; + +#define OBIF_OID_MAX_BITS 32 +#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS) +#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1) +#define IDIF_OID_MAX_BITS 48 +#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS) +#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1) + + +static inline int fid_seq_is_mdt0(obd_seq seq) +{ + return (seq == FID_SEQ_OST_MDT0); +} + +static inline int fid_seq_is_cmd(const __u64 seq) +{ + return (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX); +}; + +static inline int fid_seq_is_mdt(const __u64 seq) +{ + return seq == FID_SEQ_OST_MDT0 || + (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX); +}; + +static inline int fid_seq_is_rsvd(const __u64 seq) +{ + return seq <= FID_SEQ_RSVD; +}; + +static inline int fid_is_mdt0(const struct lu_fid *fid) +{ + return fid_seq_is_mdt0(fid_seq(fid)); +} /** * Check if a fid is igif or not. * \param fid the fid to be tested. * \return true if the fid is a igif; otherwise false. */ +static inline int fid_seq_is_igif(const __u64 seq) +{ + return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX; +} + static inline int fid_is_igif(const struct lu_fid *fid) { - return fid_seq(fid) > 0 && fid_seq(fid) < IDIF_SEQ_START; + return fid_seq_is_igif(fid_seq(fid)); } /** @@ -345,9 +474,177 @@ static inline int fid_is_igif(const struct lu_fid *fid) * \param fid the fid to be tested. * \return true if the fid is a idif; otherwise false. */ +static inline int fid_seq_is_idif(const __u64 seq) +{ + return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX; +} + static inline int fid_is_idif(const struct lu_fid *fid) { - return fid_seq(fid) >= IDIF_SEQ_START && fid_seq(fid) < FID_SEQ_START; + return fid_seq_is_idif(fid_seq(fid)); +} + +struct ost_id { + obd_id oi_id; + obd_seq oi_seq; +}; + +static inline int fid_seq_is_norm(const __u64 seq) +{ + return (seq >= FID_SEQ_NORMAL); +} + +static inline int fid_is_norm(const struct lu_fid *fid) +{ + return fid_seq_is_norm(fid_seq(fid)); +} + +/* convert an OST objid into an IDIF FID SEQ number */ +static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx) +{ + return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff); +} + +/* convert a packed IDIF FID into an OST objid */ +static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver) +{ + return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid; +} + +/* unpack an ostid (id/seq) from a wire/disk structure into an IDIF FID */ +static inline void ostid_idif_unpack(struct ost_id *ostid, + struct lu_fid *fid, __u32 ost_idx) +{ + fid->f_seq = fid_idif_seq(ostid->oi_id, ost_idx); + fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */ + fid->f_ver = ostid->oi_id >> 48; /* in theory, not currently used */ +} + +/* unpack an ostid (id/seq) from a wire/disk structure into a non-IDIF FID */ +static inline void ostid_fid_unpack(struct ost_id *ostid, struct lu_fid *fid) +{ + fid->f_seq = ostid->oi_seq; + fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */ + fid->f_ver = ostid->oi_id >> 32; /* in theory, not currently used */ +} + +/* Unpack an OST object id/seq (group) into a FID. This is needed for + * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper + * FIDs. Note that if an id/seq is already in FID/IDIF format it will + * be passed through unchanged. Only legacy OST objects in "group 0" + * will be mapped into the IDIF namespace so that they can fit into the + * struct lu_fid fields without loss. For reference see: + * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs + */ +static inline int fid_ostid_unpack(struct lu_fid *fid, struct ost_id *ostid, + __u32 ost_idx) +{ + if (ost_idx > 0xffff) { + CERROR("bad ost_idx, seq:"LPU64" id:"LPU64" ost_idx:%u\n", + ostid->oi_seq, ostid->oi_id, ost_idx); + return -EBADF; + } + + if (fid_seq_is_mdt0(ostid->oi_seq)) { + /* This is a "legacy" (old 1.x/2.early) OST object in "group 0" + * that we map into the IDIF namespace. It allows up to 2^48 + * objects per OST, as this is the object namespace that has + * been in production for years. This can handle create rates + * of 1M objects/s/OST for 9 years, or combinations thereof. */ + if (ostid->oi_id >= IDIF_MAX_OID) { + CERROR("bad MDT0 id, seq:"LPU64" id:"LPU64" ost_idx:%u\n", + ostid->oi_seq, ostid->oi_id, ost_idx); + return -EBADF; + } + ostid_idif_unpack(ostid, fid, ost_idx); + + } else if (fid_seq_is_rsvd(ostid->oi_seq)) { + /* These are legacy OST objects for LLOG/ECHO and CMD testing. + * We only support 2^32 objects in these groups, and cannot + * uniquely identify them in the system (i.e. they are the + * duplicated on all OSTs), but this is not strictly required + * for the old object protocol, which has a separate ost_idx. */ + if (ostid->oi_id >= 0xffffffffULL) { + CERROR("bad RSVD id, seq:"LPU64" id:"LPU64" ost_idx:%u\n", + ostid->oi_seq, ostid->oi_id, ost_idx); + return -EBADF; + } + ostid_fid_unpack(ostid, fid); + + } else if (unlikely(fid_seq_is_igif(ostid->oi_seq))) { + /* This is an MDT inode number, which should never collide with + * proper OST object IDs, and is probably a broken filesystem */ + CERROR("bad IGIF, seq:"LPU64" id:"LPU64" ost_idx:%u\n", + ostid->oi_seq, ostid->oi_id, ost_idx); + return -EBADF; + + } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ { + /* This is either an IDIF object, which identifies objects across + * all OSTs, or a regular FID. The IDIF namespace maps legacy + * OST objects into the FID namespace. In both cases, we just + * pass the FID through, no conversion needed. */ + ostid_fid_unpack(ostid, fid); + } + + return 0; +} + +/* pack an IDIF FID into an ostid (id/seq) for the wire/disk */ +static inline void ostid_idif_pack(struct lu_fid *fid, struct ost_id *ostid) +{ + ostid->oi_seq = FID_SEQ_OST_MDT0; + ostid->oi_id = fid_idif_id(fid->f_seq, fid->f_oid, fid->f_ver); +} + +/* pack a non-IDIF FID into an ostid (id/seq) for the wire/disk */ +static inline void ostid_fid_pack(struct lu_fid *fid, struct ost_id *ostid) +{ + ostid->oi_seq = fid_seq(fid); + ostid->oi_id = fid_ver_oid(fid); +} + +/* pack any OST FID into an ostid (id/seq) for the wire/disk */ +static inline int fid_ostid_pack(struct lu_fid *fid, struct ost_id *ostid) +{ + if (unlikely(fid_seq_is_igif(fid->f_seq))) { + CERROR("bad IGIF, "DFID"\n", PFID(fid)); + return -EBADF; + } + + if (fid_is_idif(fid)) + ostid_idif_pack(fid, ostid); + else + ostid_fid_pack(fid, ostid); + + return 0; +} + +/* extract OST sequence (group) from a wire ost_id (id/seq) pair */ +static inline obd_seq ostid_seq(struct ost_id *ostid) +{ + if (unlikely(fid_seq_is_igif(ostid->oi_seq))) + CWARN("bad IGIF, oi_seq: "LPU64" oi_id: "LPX64"\n", + ostid->oi_seq, ostid->oi_id); + + if (unlikely(fid_seq_is_idif(ostid->oi_seq))) + return FID_SEQ_OST_MDT0; + + return ostid->oi_seq; +} + +/* extract OST objid from a wire ost_id (id/seq) pair */ +static inline obd_id ostid_id(struct ost_id *ostid) +{ + if (ostid->oi_seq == FID_SEQ_OST_MDT0) + return ostid->oi_id & IDIF_OID_MASK; + + if (fid_seq_is_rsvd(ostid->oi_seq)) + return ostid->oi_id & OBIF_OID_MASK; + + if (fid_seq_is_idif(ostid->oi_seq)) + return fid_idif_id(ostid->oi_seq, ostid->oi_id, 0); + + return ostid->oi_id; } /** @@ -361,6 +658,21 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) } /** + * Build igif from the inode number/generation. + */ +#define LU_IGIF_BUILD(fid, ino, gen) \ +do { \ + fid->f_seq = ino; \ + fid->f_oid = gen; \ + fid->f_ver = 0; \ +} while(0) +static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen) +{ + LU_IGIF_BUILD(fid, ino, gen); + LASSERT(fid_is_igif(fid)); +} + +/** * Get inode generation from a igif. * \param fid a igif to get inode generation from. * \return inode generation for the igif. @@ -370,13 +682,10 @@ static inline __u32 lu_igif_gen(const struct lu_fid *fid) return fid_oid(fid); } -#define DFID "["LPX64":0x%x:0x%x]" - -#define PFID(fid) \ - fid_seq(fid), \ - fid_oid(fid), \ - fid_ver(fid) - +/* + * Fids are transmitted across network (in the sender byte-ordering), + * and stored on disk in big-endian order. + */ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src) { /* check that all fields are converted */ @@ -425,20 +734,6 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst)); } -/** - * Storage representation for fids. - * - * Variable size, first byte contains the length of the whole record. - */ -struct lu_fid_pack { - char fp_len; - char fp_area[sizeof(struct lu_fid)]; -}; - -void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid, - struct lu_fid *befider); -int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid); - static inline int fid_is_sane(const struct lu_fid *fid) { return @@ -488,18 +783,70 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, /** \defgroup lu_dir lu_dir * @{ */ + +/** + * Enumeration of possible directory entry attributes. + * + * Attributes follow directory entry header in the order they appear in this + * enumeration. + */ +enum lu_dirent_attrs { + LUDA_FID = 0x0001, + LUDA_TYPE = 0x0002, + LUDA_64BITHASH = 0x0004, +}; + /** * Layout of readdir pages, as transmitted on wire. */ struct lu_dirent { + /** valid if LUDA_FID is set. */ struct lu_fid lde_fid; + /** a unique entry identifier: a hash or an offset. */ __u64 lde_hash; + /** total record length, including all attributes. */ __u16 lde_reclen; + /** name length */ __u16 lde_namelen; - __u32 lde_pad0; + /** optional variable size attributes following this entry. + * taken from enum lu_dirent_attrs. + */ + __u32 lde_attrs; + /** name is followed by the attributes indicated in ->ldp_attrs, in + * their natural order. After the last attribute, padding bytes are + * added to make ->lde_reclen a multiple of 8. + */ char lde_name[0]; }; +/* + * Definitions of optional directory entry attributes formats. + * + * Individual attributes do not have their length encoded in a generic way. It + * is assumed that consumer of an attribute knows its format. This means that + * it is impossible to skip over an unknown attribute, except by skipping over all + * remaining attributes (by using ->lde_reclen), which is not too + * constraining, because new server versions will append new attributes at + * the end of an entry. + */ + +/** + * Fid directory attribute: a fid of an object referenced by the entry. This + * will be almost always requested by the client and supplied by the server. + * + * Aligned to 8 bytes. + */ +/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */ + +/** + * File type. + * + * Aligned to 2 bytes. + */ +struct luda_type { + __u16 lt_type; +}; + struct lu_dirpage { __u64 ldp_hash_start; __u64 ldp_hash_end; @@ -532,16 +879,30 @@ static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent) return next; } +static inline int lu_dirent_calc_size(int namelen, __u16 attr) +{ + int size; + + if (attr & LUDA_TYPE) { + const unsigned align = sizeof(struct luda_type) - 1; + size = (sizeof(struct lu_dirent) + namelen + align) & ~align; + size += sizeof(struct luda_type); + } else + size = sizeof(struct lu_dirent) + namelen; + + return (size + 7) & ~7; +} + static inline int lu_dirent_size(struct lu_dirent *ent) { if (le16_to_cpu(ent->lde_reclen) == 0) { - return (sizeof(*ent) + - le16_to_cpu(ent->lde_namelen) + 7) & ~7; + return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen), + le32_to_cpu(ent->lde_attrs)); } return le16_to_cpu(ent->lde_reclen); } -#define DIR_END_OFF 0xfffffffffffffffeULL +#define MDS_DIR_END_OFF 0xfffffffffffffffeULL /** @} lu_dir */ @@ -555,8 +916,8 @@ static inline int lustre_handle_is_used(struct lustre_handle *lh) return lh->cookie != 0ull; } -static inline int lustre_handle_equal(struct lustre_handle *lh1, - struct lustre_handle *lh2) +static inline int lustre_handle_equal(const struct lustre_handle *lh1, + const struct lustre_handle *lh2) { return lh1->cookie == lh2->cookie; } @@ -569,6 +930,7 @@ static inline void lustre_handle_copy(struct lustre_handle *tgt, /* flags for lm_flags */ #define MSGHDR_AT_SUPPORT 0x1 +#define MSGHDR_CKSUM_INCOMPAT18 0x2 #define lustre_msg lustre_msg_v2 /* we depend on this structure to be 8-byte aligned */ @@ -632,6 +994,9 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */ #define DLM_REPLY_REC_OFF 2 /* reply record offset */ +/** only use in req->rq_{req,rep}_swab_mask */ +#define MSG_PTLRPC_HEADER_OFF 31 + /* Flags that are operation-specific go in the top 16 bits. */ #define MSG_OP_FLAG_MASK 0xffff0000 #define MSG_OP_FLAG_SHIFT 16 @@ -654,47 +1019,61 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT) */ -#define MSG_CONNECT_RECOVERING 0x1 -#define MSG_CONNECT_RECONNECT 0x2 -#define MSG_CONNECT_REPLAYABLE 0x4 +#define MSG_CONNECT_RECOVERING 0x00000001 +#define MSG_CONNECT_RECONNECT 0x00000002 +#define MSG_CONNECT_REPLAYABLE 0x00000004 //#define MSG_CONNECT_PEER 0x8 -#define MSG_CONNECT_LIBCLIENT 0x10 -#define MSG_CONNECT_INITIAL 0x20 -#define MSG_CONNECT_ASYNC 0x40 -#define MSG_CONNECT_NEXT_VER 0x80 /* use next version of lustre_msg */ -#define MSG_CONNECT_TRANSNO 0x100 /* report transno */ +#define MSG_CONNECT_LIBCLIENT 0x00000010 +#define MSG_CONNECT_INITIAL 0x00000020 +#define MSG_CONNECT_ASYNC 0x00000040 +#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */ +#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ /* Connect flags */ -#define OBD_CONNECT_RDONLY 0x00000001ULL /* client allowed read-only access */ -#define OBD_CONNECT_INDEX 0x00000002ULL /* connect to specific LOV idx */ -#define OBD_CONNECT_GRANT 0x00000008ULL /* OSC acquires grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x00000010ULL /* server takes locks for client */ -#define OBD_CONNECT_VERSION 0x00000020ULL /* Server supports versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x00000040ULL /* Separate portal for non-IO reqs */ -#define OBD_CONNECT_ACL 0x00000080ULL /* client uses access control lists */ -#define OBD_CONNECT_XATTR 0x00000100ULL /* client using extended attributes*/ -#define OBD_CONNECT_TRUNCLOCK 0x00000400ULL /* locks on server for punch b=9528 */ -#define OBD_CONNECT_IBITS 0x00001000ULL /* support for inodebits locks */ -#define OBD_CONNECT_JOIN 0x00002000ULL /* files can be concatenated */ -#define OBD_CONNECT_ATTRFID 0x00004000ULL /* Server supports GetAttr By Fid */ -#define OBD_CONNECT_NODEVOH 0x00008000ULL /* No open handle for special nodes */ -#define OBD_CONNECT_RMT_CLIENT 0x00010000ULL /* Remote client */ -#define OBD_CONNECT_RMT_CLIENT_FORCE 0x00020000ULL /* Remote client by force */ -#define OBD_CONNECT_BRW_SIZE 0x00040000ULL /* Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x00080000ULL /* 64bit qunit_data.qd_count b=10707*/ -#define OBD_CONNECT_MDS_CAPA 0x00100000ULL /* MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x00200000ULL /* OSS capability */ -#define OBD_CONNECT_CANCELSET 0x00400000ULL /* Early batched cancels. */ -#define OBD_CONNECT_SOM 0x00800000ULL /* SOM feature */ -#define OBD_CONNECT_AT 0x01000000ULL /* client uses adaptive timeouts */ -#define OBD_CONNECT_LRU_RESIZE 0x02000000ULL /* Lru resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x04000000ULL /* MDS-MDS connection*/ -#define OBD_CONNECT_REAL 0x08000000ULL /* real connection */ -#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /* shrink/enlarge qunit b=10600 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /* support several cksum algos */ -#define OBD_CONNECT_FID 0x40000000ULL /* FID is supported by server */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /* client supports lov v3 ea */ - +#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ +#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ +#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ +#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ +#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ +#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ +#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ +#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ +#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ +#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/ +#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ +#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ +#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ +#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated. + *We do not support JOIN FILE + *anymore, reserve this flags + *just for preventing such bit + *to be reused.*/ +#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ +#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ +#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ +#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ +#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*64bit qunit_data.qd_count */ +#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ +#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ +#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ +#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ +#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ +#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ +#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ +#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ +#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*shrink/enlarge qunit */ +#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ +#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ +#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ +#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ +#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ +#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */ +#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */ +#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client supports layout lock */ +#define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits + * directory hash */ /* also update obd_connect_names[] for lprocfs_rd_connect_flags() * and lustre/utils/wirecheck.c */ @@ -707,27 +1086,29 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \ OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ OBD_CONNECT_IBITS | OBD_CONNECT_JOIN | \ - OBD_CONNECT_NODEVOH |/* OBD_CONNECT_ATTRFID |*/\ + OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \ + OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ OBD_CONNECT_RMT_CLIENT | \ OBD_CONNECT_RMT_CLIENT_FORCE | \ OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA | \ - OBD_CONNECT_MDS_MDS | OBD_CONNECT_CANCELSET | \ - OBD_CONNECT_FID | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_AT | \ - OBD_CONNECT_LOV_V3) + OBD_CONNECT_MDS_MDS | OBD_CONNECT_FID | \ + LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_VBR | \ + OBD_CONNECT_LOV_V3 | OBD_CONNECT_SOM | \ + OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH) #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ OBD_CONNECT_BRW_SIZE | OBD_CONNECT_QUOTA64 | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET | \ - OBD_CONNECT_CKSUM | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_AT | OBD_CONNECT_CHANGE_QS | \ - OBD_CONNECT_RMT_CLIENT | \ - OBD_CONNECT_RMT_CLIENT_FORCE) + OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ + LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ + OBD_CONNECT_CHANGE_QS | \ + OBD_CONNECT_OSS_CAPA | OBD_CONNECT_RMT_CLIENT | \ + OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ + OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ + OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20) #define ECHO_CONNECT_SUPPORTED (0) -#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT) - -#define MAX_QUOTA_COUNT32 (0xffffffffULL) +#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \ + OBD_CONNECT_FULL20) #define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\ ((patch)<<8) + (fix)) @@ -798,47 +1179,36 @@ typedef enum { } ost_cmd_t; #define OST_FIRST_OPC OST_REPLY -typedef __u64 obd_id; -typedef __u64 obd_gr; -typedef __u64 obd_time; -typedef __u64 obd_size; -typedef __u64 obd_off; -typedef __u64 obd_blocks; -typedef __u32 obd_blksize; -typedef __u32 obd_mode; -typedef __u32 obd_uid; -typedef __u32 obd_gid; -typedef __u32 obd_flag; -typedef __u64 obd_valid; -typedef __u32 obd_count; - -#define OBD_FL_INLINEDATA (0x00000001) -#define OBD_FL_OBDMDEXISTS (0x00000002) -#define OBD_FL_DELORPHAN (0x00000004) /* if set in o_flags delete orphans */ -#define OBD_FL_NORPC (0x00000008) /* set in o_flags do in OSC not OST */ -#define OBD_FL_IDONLY (0x00000010) /* set in o_flags only adjust obj id*/ -#define OBD_FL_RECREATE_OBJS (0x00000020) /* recreate missing obj */ -#define OBD_FL_DEBUG_CHECK (0x00000040) /* echo client/server debug check */ -#define OBD_FL_NO_USRQUOTA (0x00000100) /* the object's owner is over quota */ -#define OBD_FL_NO_GRPQUOTA (0x00000200) /* the object's group is over quota */ - -/** - * Set this to delegate DLM locking during obd_punch() to the OSTs. Only OSTs - * that declared OBD_CONNECT_TRUNCLOCK in their connect flags support this - * functionality. - */ -#define OBD_FL_TRUNCLOCK (0x00000800) - -/* - * Checksum types - */ -#define OBD_FL_CKSUM_CRC32 (0x00001000) -#define OBD_FL_CKSUM_ADLER (0x00002000) -#define OBD_FL_CKSUM_ALL (OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER) +enum obdo_flags { + OBD_FL_INLINEDATA = 0x00000001, + OBD_FL_OBDMDEXISTS = 0x00000002, + OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ + OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ + OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ + OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */ + OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ + OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */ + OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */ + OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */ + OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */ + OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */ + OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */ + OBD_FL_CKSUM_RSVD1 = 0x00004000, /* for future cksum types */ + OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ + OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ + OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ + OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client */ + OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */ + + OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER, + + /* mask for local-only flag, which won't be sent over network */ + OBD_FL_LOCAL_MASK = 0xF0000000, +}; #define LOV_MAGIC_V1 0x0BD10BD0 #define LOV_MAGIC LOV_MAGIC_V1 -#define LOV_MAGIC_JOIN 0x0BD20BD0 +#define LOV_MAGIC_JOIN_V1 0x0BD20BD0 #define LOV_MAGIC_V3 0x0BD30BD0 #define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ @@ -846,13 +1216,10 @@ typedef __u32 obd_count; #define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */ #define LOV_PATTERN_CMOBD 0x200 -#define LOV_OBJECT_GROUP_DEFAULT ~0ULL -#define LOV_OBJECT_GROUP_CLEAR 0ULL - #define lov_ost_data lov_ost_data_v1 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/ __u64 l_object_id; /* OST object ID */ - __u64 l_object_gr; /* OST object group (creating MDS number) */ + __u64 l_object_seq; /* OST object seq number */ __u32 l_ost_gen; /* generation of this l_ost_idx */ __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */ }; @@ -862,7 +1229,7 @@ struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ __u64 lmm_object_id; /* LOV object ID */ - __u64 lmm_object_gr; /* LOV object group */ + __u64 lmm_object_seq; /* LOV object seq number */ __u32 lmm_stripe_size; /* size of stripe in bytes */ __u32 lmm_stripe_count; /* num stripes in use for this object */ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ @@ -890,7 +1257,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ __u64 lmm_object_id; /* LOV object ID */ - __u64 lmm_object_gr; /* LOV object group */ + __u64 lmm_object_seq; /* LOV object seq number */ __u32 lmm_stripe_size; /* size of stripe in bytes */ __u32 lmm_stripe_count; /* num stripes in use for this object */ char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ @@ -919,11 +1286,11 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */ #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ -#define OBD_MD_FLOSCOPQ (0x00400000ULL) /* osc opaque data */ +/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */ #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */ #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ -#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write easize is epoch */ +#define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */ /* ->mds if epoch opens or closes */ #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */ #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */ @@ -934,6 +1301,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */ #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */ #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */ +#define OBD_MD_MDTIDX (0x0000000800000000ULL) /* Get MDT index */ #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */ #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */ @@ -944,7 +1312,8 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */ #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */ #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */ - +#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes + * under lock */ #define OBD_FL_TRUNC (0x0000200000000000ULL) /* for filter_truncate */ #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ @@ -961,48 +1330,35 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ /* don't forget obdo_fid which is way down at the bottom so it can * come after the definition of llog_cookie */ -struct obd_statfs { - __u64 os_type; - __u64 os_blocks; - __u64 os_bfree; - __u64 os_bavail; - __u64 os_files; - __u64 os_ffree; - __u8 os_fsid[40]; - __u32 os_bsize; - __u32 os_namelen; - __u64 os_maxbytes; - __u32 os_state; /* positive error code on server */ - __u32 os_spare1; - __u32 os_spare2; - __u32 os_spare3; - __u32 os_spare4; - __u32 os_spare5; - __u32 os_spare6; - __u32 os_spare7; - __u32 os_spare8; - __u32 os_spare9; -}; extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay * and resends for avoid deadlocks */ - #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update * obd_osfs_age */ +#define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd + * instead of a specific set. This + * means that we cannot rely on the set + * interpret routine to be called. + * lov_statfs_fini() must thus be called + * by the request interpret routine */ /* ost_body.data values for OST_BRW */ #define OBD_BRW_READ 0x01 #define OBD_BRW_WRITE 0x02 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 +#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous + * transfer and is not accounted in + * the grant. */ #define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ #define OBD_BRW_GRANTED 0x40 /* the ost manages this */ #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ #define OBD_BRW_NOQUOTA 0x100 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ +#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */ +#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */ #define OBD_OBJECT_EOF 0xffffffffffffffffULL @@ -1011,7 +1367,7 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); struct obd_ioobj { obd_id ioo_id; - obd_gr ioo_gr; + obd_seq ioo_seq; __u32 ioo_type; __u32 ioo_bufcnt; }; @@ -1040,11 +1396,11 @@ extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT) struct ost_lvb { - __u64 lvb_size; - __u64 lvb_mtime; - __u64 lvb_atime; - __u64 lvb_ctime; - __u64 lvb_blocks; + __u64 lvb_size; + obd_time lvb_mtime; + obd_time lvb_atime; + obd_time lvb_ctime; + __u64 lvb_blocks; }; extern void lustre_swab_ost_lvb(struct ost_lvb *); @@ -1075,6 +1431,7 @@ typedef enum { MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ MDS_WRITEPAGE = 51, MDS_IS_SUBDIR = 52, + MDS_GET_INFO = 53, MDS_LAST_OPC } mds_cmd_t; @@ -1121,14 +1478,6 @@ extern void lustre_swab_generic_32s (__u32 *val); /* This FULL lock is useful to take on unlink sort of operations */ #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1) -struct ll_fid { - __u64 id; /* holds object id */ - __u32 generation; /* holds object generation */ - - __u32 f_type; /* holds object type or stripe idx when passing it to - * OST for saving into EA. */ -}; - extern void lustre_swab_ll_fid (struct ll_fid *fid); #define MDS_STATUS_CONN 1 @@ -1152,13 +1501,15 @@ enum md_op_flags { MF_MDC_CANCEL_FID2 = (1 << 4), MF_MDC_CANCEL_FID3 = (1 << 5), MF_MDC_CANCEL_FID4 = (1 << 6), + /* There is a pending attribute update. */ + MF_SOM_AU = (1 << 7), + /* Cancel OST locks while getattr OST attributes. */ + MF_GETATTR_LOCK = (1 << 8), }; -#define MF_SOM_LOCAL_FLAGS (MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID2 | \ - MF_MDC_CANCEL_FID3 | MF_MDC_CANCEL_FID4) +#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE) #define MDS_BFLAG_UNCOMMITTED_WRITES 0x1 -#define MDS_BFLAG_EXT_FLAGS 0x80000000 /* == EXT3_RESERVED_FL */ /* these should be identical to their EXT3_*_FL counterparts, and are * redefined here only to avoid dragging in ext3_fs.h */ @@ -1169,29 +1520,26 @@ enum md_op_flags { #define MDS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */ #ifdef __KERNEL__ -/* If MDS_BFLAG_IOC_FLAGS is set it means we requested EXT3_*_FL inode flags - * and we need to decode these into local S_* flags in the inode. Otherwise - * we pass flags straight through (see bug 9486). */ +/* Convert wire MDS_*_FL to corresponding client local VFS S_* values + * for the client inode i_flags. The MDS_*_FL are the Lustre wire + * protocol equivalents of LDISKFS_*_FL values stored on disk, while + * the S_* flags are kernel-internal values that change between kernel + * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS. + * See b=16526 for a full history. */ static inline int ll_ext_to_inode_flags(int flags) { - return (flags & MDS_BFLAG_EXT_FLAGS) ? - (((flags & MDS_SYNC_FL) ? S_SYNC : 0) | + return (((flags & MDS_SYNC_FL) ? S_SYNC : 0) | ((flags & MDS_NOATIME_FL) ? S_NOATIME : 0) | ((flags & MDS_APPEND_FL) ? S_APPEND : 0) | #if defined(S_DIRSYNC) ((flags & MDS_DIRSYNC_FL) ? S_DIRSYNC : 0) | #endif - ((flags & MDS_IMMUTABLE_FL) ? S_IMMUTABLE : 0)) : - (flags & ~MDS_BFLAG_EXT_FLAGS); + ((flags & MDS_IMMUTABLE_FL) ? S_IMMUTABLE : 0)); } -/* If MDS_BFLAG_EXT_FLAGS is set it means we requested EXT3_*_FL inode flags - * and we pass these straight through. Otherwise we need to convert from - * S_* flags to their EXT3_*_FL equivalents (see bug 9486). */ -static inline int ll_inode_to_ext_flags(int oflags, int iflags) +static inline int ll_inode_to_ext_flags(int iflags) { - return (oflags & MDS_BFLAG_EXT_FLAGS) ? (oflags & ~MDS_BFLAG_EXT_FLAGS): - (((iflags & S_SYNC) ? MDS_SYNC_FL : 0) | + return (((iflags & S_SYNC) ? MDS_SYNC_FL : 0) | ((iflags & S_NOATIME) ? MDS_NOATIME_FL : 0) | ((iflags & S_APPEND) ? MDS_APPEND_FL : 0) | #if defined(S_DIRSYNC) @@ -1201,18 +1549,28 @@ static inline int ll_inode_to_ext_flags(int oflags, int iflags) } #endif -struct mdt_body { - struct lu_fid fid1; - struct lu_fid fid2; +/* + * while mds_body is to interact with 1.6, mdt_body is to interact with 2.0. + * both of them should have the same fields layout, because at client side + * one could be dynamically cast to the other. + * + * mdt_body has large size than mds_body, with unused padding (48 bytes) + * at the end. client always use size of mdt_body to prepare request/reply + * buffers, and actual data could be interepeted as mdt_body or mds_body + * accordingly. + */ +struct mds_body { + struct ll_fid fid1; + struct ll_fid fid2; struct lustre_handle handle; __u64 valid; __u64 size; /* Offset, in the case of MDS_READPAGE */ - __u64 mtime; - __u64 atime; - __u64 ctime; + obd_time mtime; + obd_time atime; + obd_time ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 ioepoch; - __u64 ino; /* for 1.6 compatibility */ + __u64 io_epoch; + __u64 ino; __u32 fsuid; __u32 fsgid; __u32 capability; @@ -1222,33 +1580,29 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, MDS_BFLAG for close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 generation; /* for 1.6 compatibility */ + __u32 generation; __u32 suppgid; __u32 eadatasize; __u32 aclsize; __u32 max_mdsize; __u32 max_cookiesize; - __u32 padding_4; /* also fix lustre_swab_mdt_body */ - __u64 padding_5; - __u64 padding_6; - __u64 padding_7; - __u64 padding_8; - __u64 padding_9; - __u64 padding_10; + __u32 padding_4; /* also fix lustre_swab_mds_body */ }; -struct mds_body { - struct ll_fid fid1; - struct ll_fid fid2; +extern void lustre_swab_mds_body (struct mds_body *b); + +struct mdt_body { + struct lu_fid fid1; + struct lu_fid fid2; struct lustre_handle handle; __u64 valid; __u64 size; /* Offset, in the case of MDS_READPAGE */ - __u64 mtime; - __u64 atime; - __u64 ctime; + obd_time mtime; + obd_time atime; + obd_time ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 io_epoch; - __u64 ino; + __u64 ioepoch; + __u64 ino; /* for 1.6 compatibility */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -1258,26 +1612,32 @@ struct mds_body { __u32 flags; /* from vfs for pin/unpin, MDS_BFLAG for close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 generation; + __u32 generation; /* for 1.6 compatibility */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; __u32 max_mdsize; __u32 max_cookiesize; - __u32 padding_4; /* also fix lustre_swab_mds_body */ -}; + __u32 uid_h; /* high 32-bits of uid, for FUID */ + __u32 gid_h; /* high 32-bits of gid, for FUID */ + __u32 padding_5; /* also fix lustre_swab_mdt_body */ + __u64 padding_6; + __u64 padding_7; + __u64 padding_8; + __u64 padding_9; + __u64 padding_10; +}; /* 216 */ -extern void lustre_swab_mds_body (struct mds_body *b); extern void lustre_swab_mdt_body (struct mdt_body *b); -struct mdt_epoch { +struct mdt_ioepoch { struct lustre_handle handle; __u64 ioepoch; __u32 flags; __u32 padding; }; -extern void lustre_swab_mdt_epoch (struct mdt_epoch *b); +extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b); #define Q_QUOTACHECK 0x800100 #define Q_INITQUOTA 0x800101 /* init slave limits */ @@ -1323,24 +1683,31 @@ struct quota_adjust_qunit { }; extern void lustre_swab_quota_adjust_qunit(struct quota_adjust_qunit *q); -/* flags in qunit_data and quota_adjust_qunit will use macroes below */ +/* flags is shared among quota structures */ #define LQUOTA_FLAGS_GRP 1UL /* 0 is user, 1 is group */ #define LQUOTA_FLAGS_BLK 2UL /* 0 is inode, 1 is block */ #define LQUOTA_FLAGS_ADJBLK 4UL /* adjust the block qunit size */ #define LQUOTA_FLAGS_ADJINO 8UL /* adjust the inode qunit size */ #define LQUOTA_FLAGS_CHG_QS 16UL /* indicate whether it has capability of * OBD_CONNECT_CHANGE_QS */ +#define LQUOTA_FLAGS_RECOVERY 32UL /* recovery is going on a uid/gid */ +#define LQUOTA_FLAGS_SETQUOTA 64UL /* being setquota on a uid/gid */ + +/* flags is specific for quota_adjust_qunit */ +#define LQUOTA_QAQ_CREATE_LQS (1 << 31) /* when it is set, need create lqs */ -/* the status of lqsk_flags in struct lustre_qunit_size_key */ +/* the status of lqs_flags in struct lustre_qunit_size */ #define LQUOTA_QUNIT_FLAGS (LQUOTA_FLAGS_GRP | LQUOTA_FLAGS_BLK) #define QAQ_IS_GRP(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_GRP) #define QAQ_IS_ADJBLK(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJBLK) #define QAQ_IS_ADJINO(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJINO) +#define QAQ_IS_CREATE_LQS(qaq) ((qaq)->qaq_flags & LQUOTA_QAQ_CREATE_LQS) #define QAQ_SET_GRP(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_GRP) #define QAQ_SET_ADJBLK(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJBLK) #define QAQ_SET_ADJINO(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJINO) +#define QAQ_SET_CREATE_LQS(qaq) ((qaq)->qaq_flags |= LQUOTA_QAQ_CREATE_LQS) /* inode access permission for remote user, the inode info are omitted, * for client knows them. */ @@ -1367,33 +1734,14 @@ struct mdt_remote_perm { __u32 rp_uid; __u32 rp_gid; __u32 rp_fsuid; + __u32 rp_fsuid_h; __u32 rp_fsgid; + __u32 rp_fsgid_h; __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */ }; extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p); -struct mds_rec_setattr { - __u32 sa_opcode; - __u32 sa_fsuid; - __u32 sa_fsgid; - __u32 sa_cap; - __u32 sa_suppgid; - __u32 sa_mode; - struct ll_fid sa_fid; - __u64 sa_valid; /* MDS_ATTR_* attributes */ - __u64 sa_size; - __u64 sa_mtime; - __u64 sa_atime; - __u64 sa_ctime; - __u32 sa_uid; - __u32 sa_gid; - __u32 sa_attr_flags; - __u32 sa_padding; /* also fix lustre_swab_mds_rec_setattr */ -}; - -extern void lustre_swab_mds_rec_setattr (struct mds_rec_setattr *sa); - struct mdt_rec_setattr { __u32 sa_opcode; __u32 sa_cap; @@ -1411,9 +1759,9 @@ struct mdt_rec_setattr { __u32 sa_gid; __u64 sa_size; __u64 sa_blocks; - __u64 sa_mtime; - __u64 sa_atime; - __u64 sa_ctime; + obd_time sa_mtime; + obd_time sa_atime; + obd_time sa_ctime; __u32 sa_attr_flags; __u32 sa_mode; __u32 sa_padding_2; @@ -1425,7 +1773,7 @@ struct mdt_rec_setattr { extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); /* - * Attribute flags used in mds_rec_setattr::sa_valid. + * Attribute flags used in mdt_rec_setattr::sa_valid. * The kernel's #defines for ATTR_* should not be used over the network * since the client and MDS may run different kernels (see bug 13828) * Therefore, we should only use MDS_ATTR_* attributes for sa_valid. @@ -1452,8 +1800,11 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); #define FMODE_WRITE 00000002 #endif +/* IO Epoch is opened on a closed file. */ #define FMODE_EPOCH 01000000 -#define FMODE_EPOCHLCK 02000000 +/* IO Epoch is opened on a file truncate. */ +#define FMODE_TRUNC 02000000 +/* Size-on-MDS Attribute Update is pending. */ #define FMODE_SOM 04000000 #define FMODE_CLOSED 0 @@ -1470,7 +1821,11 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */ #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */ -#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file*/ +#define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file. + * We do not support JOIN FILE + * anymore, reserve this flags + * just for preventing such bit + * to be reused. */ #define MDS_CREATE_RMT_ACL 01000000000 /* indicate create on remote server * with default ACL */ #define MDS_CREATE_SLAVE_OBJ 02000000000 /* indicate create slave object @@ -1479,6 +1834,9 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */ #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */ #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */ +#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */ +#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or + * hsm restore) */ /* permission for create non-directory file */ #define MAY_CREATE (1 << 7) @@ -1498,49 +1856,17 @@ extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa); #define MAY_RGETFACL (1 << 14) enum { - MDS_CHECK_SPLIT = 1 << 0, - MDS_CROSS_REF = 1 << 1, - MDS_VTX_BYPASS = 1 << 2, - MDS_PERM_BYPASS = 1 << 3, - MDS_SOM = 1 << 4, - MDS_QUOTA_IGNORE = 1 << 5 -}; - -struct mds_rec_join { - struct ll_fid jr_fid; - __u64 jr_headsize; -}; - -extern void lustre_swab_mds_rec_join (struct mds_rec_join *jr); - -struct mdt_rec_join { - struct lu_fid jr_fid; - __u64 jr_headsize; -}; - -extern void lustre_swab_mdt_rec_join (struct mdt_rec_join *jr); - -struct mds_rec_create { - __u32 cr_opcode; - __u32 cr_fsuid; - __u32 cr_fsgid; - __u32 cr_cap; - __u32 cr_flags; /* for use with open */ - __u32 cr_mode; - struct ll_fid cr_fid; - struct ll_fid cr_replayfid; - __u64 cr_time; - __u64 cr_rdev; - __u32 cr_suppgid; - __u32 cr_padding_1; /* also fix lustre_swab_mds_rec_create */ - __u32 cr_padding_2; /* also fix lustre_swab_mds_rec_create */ - __u32 cr_padding_3; /* also fix lustre_swab_mds_rec_create */ - __u32 cr_padding_4; /* also fix lustre_swab_mds_rec_create */ - __u32 cr_padding_5; /* also fix lustre_swab_mds_rec_create */ + MDS_CHECK_SPLIT = 1 << 0, + MDS_CROSS_REF = 1 << 1, + MDS_VTX_BYPASS = 1 << 2, + MDS_PERM_BYPASS = 1 << 3, + MDS_SOM = 1 << 4, + MDS_QUOTA_IGNORE = 1 << 5, + MDS_CLOSE_CLEANUP = 1 << 6, + MDS_KEEP_ORPHAN = 1 << 7 }; -extern void lustre_swab_mds_rec_create (struct mds_rec_create *cr); - +/* instance of mdt_reint_rec */ struct mdt_rec_create { __u32 cr_opcode; __u32 cr_cap; @@ -1554,39 +1880,34 @@ struct mdt_rec_create { __u32 cr_suppgid2_h; struct lu_fid cr_fid1; struct lu_fid cr_fid2; - struct lustre_handle cr_old_handle; /* u64 handle in case of open replay */ - __u64 cr_time; + struct lustre_handle cr_old_handle; /* handle in case of open replay */ + obd_time cr_time; __u64 cr_rdev; __u64 cr_ioepoch; - __u64 cr_padding_1; /* pad for 64 bits*/ + __u64 cr_padding_1; /* rr_blocks */ __u32 cr_mode; __u32 cr_bias; - __u32 cr_flags; /* for use with open */ - __u32 cr_padding_2; - __u32 cr_padding_3; - __u32 cr_padding_4; + /* use of helpers set/get_mrc_cr_flags() is needed to access + * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to + * extend cr_flags size without breaking 1.8 compat */ + __u32 cr_flags_l; /* for use with open, low 32 bits */ + __u32 cr_flags_h; /* for use with open, high 32 bits */ + __u32 cr_padding_3; /* rr_padding_3 */ + __u32 cr_padding_4; /* rr_padding_4 */ }; -extern void lustre_swab_mdt_rec_create (struct mdt_rec_create *cr); - -struct mds_rec_link { - __u32 lk_opcode; - __u32 lk_fsuid; - __u32 lk_fsgid; - __u32 lk_cap; - __u32 lk_suppgid1; - __u32 lk_suppgid2; - struct ll_fid lk_fid1; - struct ll_fid lk_fid2; - __u64 lk_time; - __u32 lk_padding_1; /* also fix lustre_swab_mds_rec_link */ - __u32 lk_padding_2; /* also fix lustre_swab_mds_rec_link */ - __u32 lk_padding_3; /* also fix lustre_swab_mds_rec_link */ - __u32 lk_padding_4; /* also fix lustre_swab_mds_rec_link */ -}; +static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags) +{ + mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll); + mrc->cr_flags_h = (__u32)(flags >> 32); +} -extern void lustre_swab_mds_rec_link (struct mds_rec_link *lk); +static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc) +{ + return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32)); +} +/* instance of mdt_reint_rec */ struct mdt_rec_link { __u32 lk_opcode; __u32 lk_cap; @@ -1600,37 +1921,20 @@ struct mdt_rec_link { __u32 lk_suppgid2_h; struct lu_fid lk_fid1; struct lu_fid lk_fid2; - __u64 lk_time; - __u64 lk_padding_1; - __u64 lk_padding_2; - __u64 lk_padding_3; - __u64 lk_padding_4; + obd_time lk_time; + __u64 lk_padding_1; /* rr_atime */ + __u64 lk_padding_2; /* rr_ctime */ + __u64 lk_padding_3; /* rr_size */ + __u64 lk_padding_4; /* rr_blocks */ __u32 lk_bias; - __u32 lk_padding_5; - __u32 lk_padding_6; - __u32 lk_padding_7; - __u32 lk_padding_8; - __u32 lk_padding_9; + __u32 lk_padding_5; /* rr_mode */ + __u32 lk_padding_6; /* rr_flags */ + __u32 lk_padding_7; /* rr_padding_2 */ + __u32 lk_padding_8; /* rr_padding_3 */ + __u32 lk_padding_9; /* rr_padding_4 */ }; -struct mds_rec_unlink { - __u32 ul_opcode; - __u32 ul_fsuid; - __u32 ul_fsgid; - __u32 ul_cap; - __u32 ul_suppgid; - __u32 ul_mode; - struct ll_fid ul_fid1; - struct ll_fid ul_fid2; - __u64 ul_time; - __u32 ul_padding_1; /* also fix lustre_swab_mds_rec_unlink */ - __u32 ul_padding_2; /* also fix lustre_swab_mds_rec_unlink */ - __u32 ul_padding_3; /* also fix lustre_swab_mds_rec_unlink */ - __u32 ul_padding_4; /* also fix lustre_swab_mds_rec_unlink */ -}; - -extern void lustre_swab_mds_rec_unlink (struct mds_rec_unlink *ul); - +/* instance of mdt_reint_rec */ struct mdt_rec_unlink { __u32 ul_opcode; __u32 ul_cap; @@ -1644,37 +1948,20 @@ struct mdt_rec_unlink { __u32 ul_suppgid2_h; struct lu_fid ul_fid1; struct lu_fid ul_fid2; - __u64 ul_time; - __u64 ul_padding_2; - __u64 ul_padding_3; - __u64 ul_padding_4; - __u64 ul_padding_5; + obd_time ul_time; + __u64 ul_padding_2; /* rr_atime */ + __u64 ul_padding_3; /* rr_ctime */ + __u64 ul_padding_4; /* rr_size */ + __u64 ul_padding_5; /* rr_blocks */ __u32 ul_bias; __u32 ul_mode; - __u32 ul_padding_6; - __u32 ul_padding_7; - __u32 ul_padding_8; - __u32 ul_padding_9; + __u32 ul_padding_6; /* rr_flags */ + __u32 ul_padding_7; /* rr_padding_2 */ + __u32 ul_padding_8; /* rr_padding_3 */ + __u32 ul_padding_9; /* rr_padding_4 */ }; -struct mds_rec_rename { - __u32 rn_opcode; - __u32 rn_fsuid; - __u32 rn_fsgid; - __u32 rn_cap; - __u32 rn_suppgid1; - __u32 rn_suppgid2; - struct ll_fid rn_fid1; - struct ll_fid rn_fid2; - __u64 rn_time; - __u32 rn_padding_1; /* also fix lustre_swab_mds_rec_rename */ - __u32 rn_padding_2; /* also fix lustre_swab_mds_rec_rename */ - __u32 rn_padding_3; /* also fix lustre_swab_mds_rec_rename */ - __u32 rn_padding_4; /* also fix lustre_swab_mds_rec_rename */ -}; - -extern void lustre_swab_mds_rec_rename (struct mds_rec_rename *rn); - +/* instance of mdt_reint_rec */ struct mdt_rec_rename { __u32 rn_opcode; __u32 rn_cap; @@ -1688,19 +1975,20 @@ struct mdt_rec_rename { __u32 rn_suppgid2_h; struct lu_fid rn_fid1; struct lu_fid rn_fid2; - __u64 rn_time; - __u64 rn_padding_1; - __u64 rn_padding_2; - __u64 rn_padding_3; - __u64 rn_padding_4; - __u32 rn_bias; /* some operation flags */ - __u32 rn_mode; /* cross-ref rename has mode */ - __u32 rn_padding_5; - __u32 rn_padding_6; - __u32 rn_padding_7; - __u32 rn_padding_8; -}; - + obd_time rn_time; + __u64 rn_padding_1; /* rr_atime */ + __u64 rn_padding_2; /* rr_ctime */ + __u64 rn_padding_3; /* rr_size */ + __u64 rn_padding_4; /* rr_blocks */ + __u32 rn_bias; /* some operation flags */ + __u32 rn_mode; /* cross-ref rename has mode */ + __u32 rn_padding_5; /* rr_flags */ + __u32 rn_padding_6; /* rr_padding_2 */ + __u32 rn_padding_7; /* rr_padding_3 */ + __u32 rn_padding_8; /* rr_padding_4 */ +}; + +/* instance of mdt_reint_rec */ struct mdt_rec_setxattr { __u32 sx_opcode; __u32 sx_cap; @@ -1713,22 +2001,30 @@ struct mdt_rec_setxattr { __u32 sx_suppgid2; __u32 sx_suppgid2_h; struct lu_fid sx_fid; - __u64 sx_padding_1; /* These three members are lu_fid size */ + __u64 sx_padding_1; /* These three are rr_fid2 */ __u32 sx_padding_2; __u32 sx_padding_3; __u64 sx_valid; - __u64 sx_time; - __u64 sx_padding_5; - __u64 sx_padding_6; - __u64 sx_padding_7; + obd_time sx_time; + __u64 sx_padding_5; /* rr_ctime */ + __u64 sx_padding_6; /* rr_size */ + __u64 sx_padding_7; /* rr_blocks */ __u32 sx_size; __u32 sx_flags; - __u32 sx_padding_8; - __u32 sx_padding_9; - __u32 sx_padding_10; - __u32 sx_padding_11; + __u32 sx_padding_8; /* rr_flags */ + __u32 sx_padding_9; /* rr_padding_2 */ + __u32 sx_padding_10; /* rr_padding_3 */ + __u32 sx_padding_11; /* rr_padding_4 */ }; +/* + * mdt_rec_reint is the template for all mdt_reint_xxx structures. + * Do NOT change the size of various members, otherwise the value + * will be broken in lustre_swab_mdt_rec_reint(). + * + * If you add new members in other mdt_reint_xxx structres and need to use the + * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also. + */ struct mdt_rec_reint { __u32 rr_opcode; __u32 rr_cap; @@ -1742,14 +2038,14 @@ struct mdt_rec_reint { __u32 rr_suppgid2_h; struct lu_fid rr_fid1; struct lu_fid rr_fid2; - __u64 rr_mtime; - __u64 rr_atime; - __u64 rr_ctime; + obd_time rr_mtime; + obd_time rr_atime; + obd_time rr_ctime; __u64 rr_size; __u64 rr_blocks; __u32 rr_bias; __u32 rr_mode; - __u32 rr_padding_1; /* also fix lustre_swab_mdt_rec_reint */ + __u32 rr_flags; __u32 rr_padding_2; /* also fix lustre_swab_mdt_rec_reint */ __u32 rr_padding_3; /* also fix lustre_swab_mdt_rec_reint */ __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */ @@ -1760,13 +2056,49 @@ extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); struct lmv_desc { __u32 ld_tgt_count; /* how many MDS's */ __u32 ld_active_tgt_count; /* how many active */ + __u32 ld_default_stripe_count; /* how many objects are used */ + __u32 ld_pattern; /* default MEA_MAGIC_* */ + __u64 ld_default_hash_size; + __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */ + __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */ + __u32 ld_qos_maxage; /* in second */ + __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */ + __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */ struct obd_uuid ld_uuid; }; extern void lustre_swab_lmv_desc (struct lmv_desc *ld); +/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */ +struct lmv_stripe_md { + __u32 mea_magic; + __u32 mea_count; + __u32 mea_master; + __u32 mea_padding; + char mea_pool_name[LOV_MAXPOOLNAME]; + struct lu_fid mea_ids[0]; +}; + +extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea); + +/* lmv structures */ +#define MEA_MAGIC_LAST_CHAR 0xb2221ca1 +#define MEA_MAGIC_ALL_CHARS 0xb222a11c +#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b + +#define MAX_HASH_SIZE_32 0x7fffffffUL +#define MAX_HASH_SIZE 0x7fffffffffffffffULL +#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL + +struct md_fld { + seqno_t mf_seq; + mdsno_t mf_mds; +}; + +extern void lustre_swab_md_fld (struct md_fld *mf); + enum fld_rpc_opc { - FLD_QUERY = 600, + FLD_QUERY = 900, FLD_LAST_OPC, FLD_FIRST_OPC = FLD_QUERY }; @@ -1786,7 +2118,8 @@ enum seq_op { * LOV data structures */ -#define LOV_MIN_STRIPE_SIZE 65536 /* maximum PAGE_SIZE (ia64), power of 2 */ +#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */ +#define LOV_MIN_STRIPE_SIZE (1<o_flags &= ~OBD_FL_LOCAL_MASK; +} + +static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo) +{ + obd_flag local_flags = 0; + + if (lobdo->o_valid & OBD_MD_FLFLAGS) + local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; + + LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK)); + + memcpy(lobdo, wobdo, sizeof(*lobdo)); + if (local_flags != 0) { + lobdo->o_valid |= OBD_MD_FLFLAGS; + lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + lobdo->o_flags |= local_flags; + } +} + extern void lustre_swab_obdo (struct obdo *o); /* request structure for OST's */ @@ -2414,7 +2701,7 @@ extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, int stripe_count); -extern void lustre_swab_lov_user_md_join(struct lov_user_md_join *lumj); +extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); /* llog_swab.c */ extern void lustre_swab_llogd_body (struct llogd_body *d); @@ -2426,6 +2713,13 @@ extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec, struct lustre_cfg; extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); +/* Functions for dumping PTLRPC fields */ +void dump_rniobuf(struct niobuf_remote *rnb); +void dump_ioo(struct obd_ioobj *nb); +void dump_obdo(struct obdo *oa); +void dump_ost_body(struct ost_body *ob); +void dump_rcs(__u32 *rc); + /* this will be used when OBD_CONNECT_CHANGE_QS is set */ struct qunit_data { /** @@ -2464,20 +2758,17 @@ struct qunit_data { #define QDATA_CLR_CHANGE_QS(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_CHG_QS) extern void lustre_swab_qdata(struct qunit_data *d); -extern int quota_get_qdata(void*req, struct qunit_data *qdata, - int is_req, int is_exp); +extern struct qunit_data *quota_get_qdata(void *req, int is_req, int is_exp); extern int quota_copy_qdata(void *request, struct qunit_data *qdata, int is_req, int is_exp); typedef enum { - QUOTA_DQACQ = 901, - QUOTA_DQREL = 902, + QUOTA_DQACQ = 601, + QUOTA_DQREL = 602, QUOTA_LAST_OPC } quota_cmd_t; #define QUOTA_FIRST_OPC QUOTA_DQACQ -#define JOIN_FILE_ALIGN 4096 - #define QUOTA_REQUEST 1 #define QUOTA_REPLY 0 #define QUOTA_EXPORT 1 @@ -2488,7 +2779,7 @@ typedef enum { #define QUOTA_RET_NOQUOTA 1 /**< not support quota */ #define QUOTA_RET_NOLIMIT 2 /**< quota limit isn't set */ #define QUOTA_RET_ACQUOTA 4 /**< need to acquire extra quota */ -#define QUOTA_RET_INC_PENDING 8 /**< pending value is increased */ + /* security opcodes */ typedef enum { @@ -2571,7 +2862,7 @@ enum { #define CAPA_HMAC_ALG_MASK 0xff000000 struct lustre_capa_key { - __u64 lk_mdsid; /**< mds# */ + __u64 lk_seq; /**< mds# */ __u32 lk_keyid; /**< key# */ __u32 lk_padding; __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ @@ -2580,7 +2871,7 @@ struct lustre_capa_key { extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k); /** The link ea holds 1 \a link_ea_entry for each hardlink */ -#define LINK_EA_MAGIC 0x01EA0000 +#define LINK_EA_MAGIC 0x11EAF1DFUL struct link_ea_header { __u32 leh_magic; __u32 leh_reccount; @@ -2595,11 +2886,22 @@ struct link_ea_header { */ struct link_ea_entry { /** __u16 stored big-endian, unaligned */ - char lee_reclen[2]; - struct lu_fid_pack lee_parent_fid; /**< variable length */ - /** logically after lee_parent_fid; don't use directly */ + unsigned char lee_reclen[2]; + unsigned char lee_parent_fid[sizeof(struct lu_fid)]; char lee_name[0]; -}; +}__attribute__((packed)); + +/** fid2path request/reply structure */ +struct getinfo_fid2path { + struct lu_fid gf_fid; + __u64 gf_recno; + __u32 gf_linkno; + __u32 gf_pathlen; + char gf_path[0]; +} __attribute__((packed)); + +void lustre_swab_fid2path (struct getinfo_fid2path *gf); + #endif /** @} lustreidl */