X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=e648bdaea9ef5a3959fb9f5a5a0f55ffdbb4de87;hb=b8785c155c81701a6e0d0ef397a347365e7de504;hp=4169b19441fe76d87576735c10e1a5b66c5dd711;hpb=b5624b418b645b1ac86e735413c9c7bbb93ef97c;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index 4169b19..e648bda 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -42,10 +42,6 @@ * * Lustre wire protocol definitions. * - * We assume all nodes are either little-endian or big-endian, and we - * always send messages in the sender's native format. The receiver - * detects the message format by checking the 'magic' field of the message - * (see lustre_msg_swabbed() below). * ALL structs passing over the wire should be declared here. Structs * that are used in interfaces with userspace should go in lustre_user.h. * @@ -72,6 +68,11 @@ * in the code to ensure that new/old clients that see this larger struct * do not fail, otherwise you need to implement protocol compatibility). * + * We assume all nodes are either little-endian or big-endian, and we + * always send messages in the sender's native format. The receiver + * detects the message format by checking the 'magic' field of the message + * (see lustre_msg_swabbed() below). + * * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines, * implemented either here, inline (trivial implementations) or in * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other" @@ -168,111 +169,135 @@ #define LUSTRE_LOG_VERSION 0x00050000 #define LUSTRE_MGS_VERSION 0x00060000 -typedef __u64 mdsno_t; +typedef __u32 mdsno_t; typedef __u64 seqno_t; -struct lu_range { - __u64 lr_start; - __u64 lr_end; - /** stub for compact fld work. */ - __u64 lr_padding; +/** + * Describes a range of sequence, lsr_start is included but lsr_end is + * not in the range. + * Same structure is used in fld module where lsr_mdt field holds mdt id + * of the home mdt. + */ + +struct lu_seq_range { + __u64 lsr_start; + __u64 lsr_end; + __u32 lsr_mdt; + __u32 lsr_padding; }; /** * returns width of given range \a r */ -static inline __u64 range_space(const struct lu_range *range) +static inline __u64 range_space(const struct lu_seq_range *range) { - return range->lr_end - range->lr_start; + return range->lsr_end - range->lsr_start; } /** * initialize range to zero */ -static inline void range_init(struct lu_range *range) + +static inline void range_init(struct lu_seq_range *range) { - range->lr_start = range->lr_end = 0; + range->lsr_start = range->lsr_end = range->lsr_mdt = 0; } /** * check if given seq id \a s is within given range \a r */ -static inline int range_within(struct lu_range *range, + +static inline int range_within(const struct lu_seq_range *range, __u64 s) { - return s >= range->lr_start && s < range->lr_end; + return s >= range->lsr_start && s < range->lsr_end; } /** * allocate \a w units of sequence from range \a from. */ -static inline void range_alloc(struct lu_range *to, - struct lu_range *from, +static inline void range_alloc(struct lu_seq_range *to, + struct lu_seq_range *from, __u64 width) { - to->lr_start = from->lr_start; - to->lr_end = from->lr_start + width; - from->lr_start += width; + to->lsr_start = from->lsr_start; + to->lsr_end = from->lsr_start + width; + from->lsr_start += width; } -static inline int range_is_sane(const struct lu_range *range) +static inline int range_is_sane(const struct lu_seq_range *range) { - return (range->lr_end >= range->lr_start); + return (range->lsr_end >= range->lsr_start); } -static inline int range_is_zero(const struct lu_range *range) +static inline int range_is_zero(const struct lu_seq_range *range) { - return (range->lr_start == 0 && range->lr_end == 0); + return (range->lsr_start == 0 && range->lsr_end == 0); } -static inline int range_is_exhausted(const struct lu_range *range) +static inline int range_is_exhausted(const struct lu_seq_range *range) + { return range_space(range) == 0; } -#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x]" +#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x" #define PRANGE(range) \ - (range)->lr_start, \ - (range)->lr_end + (range)->lsr_start, \ + (range)->lsr_end, \ + (range)->lsr_mdt /** \defgroup lu_fid lu_fid * @{ */ /** - * File identifier. + * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat. + */ +enum lma_compat { + LMAC_HSM = 0x00000001, + LMAC_SOM = 0x00000002, +}; + +/** + * Masks for all features that should be supported by a Lustre version to + * access a specific file. + * This information is stored in lustre_mdt_attrs::lma_incompat. * - * Fid is a cluster-wide unique identifier of a file or an object - * (stripe). Fids are never reused. Fids are transmitted across network (in - * the sender byte-ordering), and stored on disk in a packed form (struct - * lu_fid_pack) in a big-endian order. + * NOTE: No incompat feature should be added before bug #17670 is landed. + */ +#define LMA_INCOMPAT_SUPP 0x0 + +/** + * Following struct for MDT attributes, that will be kept inode's EA. + * Introduced in 2.0 release (please see b15993, for details) */ -struct lu_fid { +struct lustre_mdt_attrs { /** - * fid sequence. Sequence is a unit of migration: all files (objects) - * with fids from a given sequence are stored on the same - * server. - * - * Lustre should support 2 ^ 64 objects, thus even if one - * sequence has one object we will never reach this value. + * Bitfield for supported data in this structure. From enum lma_compat. + * lma_self_fid and lma_flags are always available. */ - __u64 f_seq; - /** fid number within sequence. */ - __u32 f_oid; - /** - * fid version, used to distinguish different versions (in the sense - * of snapshots, etc.) of the same file system object. Not currently - * used. + __u32 lma_compat; + /** + * Per-file incompat feature list. Lustre version should support all + * flags set in this field. The supported feature mask is available in + * LMA_INCOMPAT_SUPP. */ - __u32 f_ver; + __u32 lma_incompat; + /** FID of this inode */ + struct lu_fid lma_self_fid; + /** SOM state, mdt/ost type, others */ + __u64 lma_flags; + /** total sectors in objects */ + __u64 lma_som_sectors; }; /** * fid constants */ enum { - /* initial fid id value */ + /** initial fid id value */ LUSTRE_FID_INIT_OID = 1UL }; @@ -345,13 +370,10 @@ static inline __u32 lu_igif_gen(const struct lu_fid *fid) return fid_oid(fid); } -#define DFID "[0x%16.16"LPF64"x/0x%8.8x:0x%8.8x]" - -#define PFID(fid) \ - fid_seq(fid), \ - fid_oid(fid), \ - fid_ver(fid) - +/* + * Fids are transmitted across network (in the sender byte-ordering), + * and stored on disk in big-endian order. + */ static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src) { /* check that all fields are converted */ @@ -429,17 +451,17 @@ static inline int fid_is_zero(const struct lu_fid *fid) } extern void lustre_swab_lu_fid(struct lu_fid *fid); -extern void lustre_swab_lu_range(struct lu_range *range); +extern void lustre_swab_lu_seq_range(struct lu_seq_range *range); static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { - /* Check that there is no alignment padding. */ - CLASSERT(sizeof *f0 == + /* Check that there is no alignment padding. */ + CLASSERT(sizeof *f0 == sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver); LASSERTF(fid_is_igif(f0) || fid_ver(f0) == 0, DFID, PFID(f0)); LASSERTF(fid_is_igif(f1) || fid_ver(f1) == 0, DFID, PFID(f1)); - return memcmp(f0, f1, sizeof *f0) == 0; + return memcmp(f0, f1, sizeof *f0) == 0; } #define __diff_normalize(val0, val1) \ @@ -463,18 +485,69 @@ static inline int lu_fid_cmp(const struct lu_fid *f0, /** \defgroup lu_dir lu_dir * @{ */ + +/** + * Enumeration of possible directory entry attributes. + * + * Attributes follow directory entry header in the order they appear in this + * enumeration. + */ +enum lu_dirent_attrs { + LUDA_FID = 0x0001, + LUDA_TYPE = 0x0002, +}; + /** * Layout of readdir pages, as transmitted on wire. */ struct lu_dirent { + /** valid if LUDA_FID is set. */ struct lu_fid lde_fid; + /** a unique entry identifier: a hash or an offset. */ __u64 lde_hash; + /** total record length, including all attributes. */ __u16 lde_reclen; + /** name length */ __u16 lde_namelen; - __u32 lde_pad0; + /** optional variable size attributes following this entry. + * taken from enum lu_dirent_attrs. + */ + __u32 lde_attrs; + /** name is followed by the attributes indicated in ->ldp_attrs, in + * their natural order. After the last attribute, padding bytes are + * added to make ->lde_reclen a multiple of 8. + */ char lde_name[0]; }; +/* + * Definitions of optional directory entry attributes formats. + * + * Individual attributes do not have their length encoded in a generic way. It + * is assumed that consumer of an attribute knows its format. This means that + * it is impossible to skip over an unknown attribute, except by skipping over all + * remaining attributes (by using ->lde_reclen), which is not too + * constraining, because new server versions will append new attributes at + * the end of an entry. + */ + +/** + * Fid directory attribute: a fid of an object referenced by the entry. This + * will be almost always requested by the client and supplied by the server. + * + * Aligned to 8 bytes. + */ +/* To have compatibility with 1.8, lets have fid in lu_dirent struct. */ + +/** + * File type. + * + * Aligned to 2 bytes. + */ +struct luda_type { + __u16 lt_type; +}; + struct lu_dirpage { __u64 ldp_hash_start; __u64 ldp_hash_end; @@ -507,11 +580,25 @@ static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent) return next; } +static inline int lu_dirent_calc_size(int namelen, __u16 attr) +{ + int size; + + if (attr & LUDA_TYPE) { + const unsigned align = sizeof(struct luda_type) - 1; + size = (sizeof(struct lu_dirent) + namelen + align) & ~align; + size += sizeof(struct luda_type); + } else + size = sizeof(struct lu_dirent) + namelen; + + return (size + 7) & ~7; +} + static inline int lu_dirent_size(struct lu_dirent *ent) { if (le16_to_cpu(ent->lde_reclen) == 0) { - return (sizeof(*ent) + - le16_to_cpu(ent->lde_namelen) + 7) & ~7; + return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen), + le32_to_cpu(ent->lde_attrs)); } return le16_to_cpu(ent->lde_reclen); } @@ -629,47 +716,52 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT) */ -#define MSG_CONNECT_RECOVERING 0x1 -#define MSG_CONNECT_RECONNECT 0x2 -#define MSG_CONNECT_REPLAYABLE 0x4 +#define MSG_CONNECT_RECOVERING 0x00000001 +#define MSG_CONNECT_RECONNECT 0x00000002 +#define MSG_CONNECT_REPLAYABLE 0x00000004 //#define MSG_CONNECT_PEER 0x8 -#define MSG_CONNECT_LIBCLIENT 0x10 -#define MSG_CONNECT_INITIAL 0x20 -#define MSG_CONNECT_ASYNC 0x40 -#define MSG_CONNECT_NEXT_VER 0x80 /* use next version of lustre_msg */ -#define MSG_CONNECT_TRANSNO 0x100 /* report transno */ +#define MSG_CONNECT_LIBCLIENT 0x00000010 +#define MSG_CONNECT_INITIAL 0x00000020 +#define MSG_CONNECT_ASYNC 0x00000040 +#define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */ +#define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */ /* Connect flags */ -#define OBD_CONNECT_RDONLY 0x00000001ULL /* client allowed read-only access */ -#define OBD_CONNECT_INDEX 0x00000002ULL /* connect to specific LOV idx */ -#define OBD_CONNECT_GRANT 0x00000008ULL /* OSC acquires grant at connect */ -#define OBD_CONNECT_SRVLOCK 0x00000010ULL /* server takes locks for client */ -#define OBD_CONNECT_VERSION 0x00000020ULL /* Server supports versions in ocd */ -#define OBD_CONNECT_REQPORTAL 0x00000040ULL /* Separate portal for non-IO reqs */ -#define OBD_CONNECT_ACL 0x00000080ULL /* client uses access control lists */ -#define OBD_CONNECT_XATTR 0x00000100ULL /* client using extended attributes*/ -#define OBD_CONNECT_TRUNCLOCK 0x00000400ULL /* locks on server for punch b=9528 */ -#define OBD_CONNECT_IBITS 0x00001000ULL /* support for inodebits locks */ -#define OBD_CONNECT_JOIN 0x00002000ULL /* files can be concatenated */ -#define OBD_CONNECT_ATTRFID 0x00004000ULL /* Server supports GetAttr By Fid */ -#define OBD_CONNECT_NODEVOH 0x00008000ULL /* No open handle for special nodes */ -#define OBD_CONNECT_LCL_CLIENT 0x00010000ULL /* local 1.8 client */ -#define OBD_CONNECT_RMT_CLIENT 0x00020000ULL /* Remote 1.8 client */ -#define OBD_CONNECT_BRW_SIZE 0x00040000ULL /* Max bytes per rpc */ -#define OBD_CONNECT_QUOTA64 0x00080000ULL /* 64bit qunit_data.qd_count b=10707*/ -#define OBD_CONNECT_MDS_CAPA 0x00100000ULL /* MDS capability */ -#define OBD_CONNECT_OSS_CAPA 0x00200000ULL /* OSS capability */ -#define OBD_CONNECT_CANCELSET 0x00400000ULL /* Early batched cancels. */ -#define OBD_CONNECT_SOM 0x00800000ULL /* SOM feature */ -#define OBD_CONNECT_AT 0x01000000ULL /* client uses adaptive timeouts */ -#define OBD_CONNECT_LRU_RESIZE 0x02000000ULL /* Lru resize feature. */ -#define OBD_CONNECT_MDS_MDS 0x04000000ULL /* MDS-MDS connection*/ -#define OBD_CONNECT_REAL 0x08000000ULL /* real connection */ -#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /* shrink/enlarge qunit b=10600 */ -#define OBD_CONNECT_CKSUM 0x20000000ULL /* support several cksum algos */ -#define OBD_CONNECT_FID 0x40000000ULL /* FID is supported by server */ -#define OBD_CONNECT_LOV_V3 0x100000000ULL /* client supports lov v3 ea */ - +#define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/ +#define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */ +#define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */ +#define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */ +#define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */ +#define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */ +#define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */ +#define OBD_CONNECT_ACL 0x80ULL /*access control lists */ +#define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */ +#define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/ +#define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */ +#define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */ +#define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/ +#define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated */ +#define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/ +#define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/ +#define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */ +#define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */ +#define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */ +#define OBD_CONNECT_QUOTA64 0x80000ULL /*64bit qunit_data.qd_count */ +#define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */ +#define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */ +#define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */ +#define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */ +#define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */ +#define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */ +#define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */ +#define OBD_CONNECT_REAL 0x8000000ULL /*real connection */ +#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*shrink/enlarge qunit */ +#define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/ +#define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */ +#define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */ +#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */ +#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */ /* also update obd_connect_names[] for lprocfs_rd_connect_flags() * and lustre/utils/wirecheck.c */ @@ -683,25 +775,27 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ OBD_CONNECT_IBITS | OBD_CONNECT_JOIN | \ OBD_CONNECT_NODEVOH |/* OBD_CONNECT_ATTRFID |*/\ - OBD_CONNECT_LCL_CLIENT | \ + OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ OBD_CONNECT_RMT_CLIENT | \ + OBD_CONNECT_RMT_CLIENT_FORCE | \ OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA | \ - OBD_CONNECT_MDS_MDS | OBD_CONNECT_CANCELSET | \ - OBD_CONNECT_FID | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_AT | \ - OBD_CONNECT_LOV_V3) + OBD_CONNECT_MDS_MDS | OBD_CONNECT_FID | \ + LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_VBR | \ + OBD_CONNECT_LOV_V3 | OBD_CONNECT_SOM) #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ OBD_CONNECT_BRW_SIZE | OBD_CONNECT_QUOTA64 | \ - OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET | \ - OBD_CONNECT_CKSUM | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_AT) + OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \ + LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \ + OBD_CONNECT_CHANGE_QS | \ + OBD_CONNECT_OSS_CAPA | OBD_CONNECT_RMT_CLIENT | \ + OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \ + OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \ + OBD_CONNECT_GRANT_SHRINK) #define ECHO_CONNECT_SUPPORTED (0) #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT) -#define MAX_QUOTA_COUNT32 (0xffffffffULL) - #define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\ ((patch)<<8) + (fix)) #define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255) @@ -766,6 +860,7 @@ typedef enum { OST_SET_INFO = 17, OST_QUOTACHECK = 18, OST_QUOTACTL = 19, + OST_QUOTA_ADJUST_QUNIT = 20, OST_LAST_OPC } ost_cmd_t; #define OST_FIRST_OPC OST_REPLY @@ -776,37 +871,40 @@ typedef __u64 obd_time; typedef __u64 obd_size; typedef __u64 obd_off; typedef __u64 obd_blocks; +typedef __u64 obd_valid; typedef __u32 obd_blksize; typedef __u32 obd_mode; typedef __u32 obd_uid; typedef __u32 obd_gid; typedef __u32 obd_flag; -typedef __u64 obd_valid; typedef __u32 obd_count; -#define OBD_FL_INLINEDATA (0x00000001) -#define OBD_FL_OBDMDEXISTS (0x00000002) -#define OBD_FL_DELORPHAN (0x00000004) /* if set in o_flags delete orphans */ -#define OBD_FL_NORPC (0x00000008) /* set in o_flags do in OSC not OST */ -#define OBD_FL_IDONLY (0x00000010) /* set in o_flags only adjust obj id*/ -#define OBD_FL_RECREATE_OBJS (0x00000020) /* recreate missing obj */ -#define OBD_FL_DEBUG_CHECK (0x00000040) /* echo client/server debug check */ -#define OBD_FL_NO_USRQUOTA (0x00000100) /* the object's owner is over quota */ -#define OBD_FL_NO_GRPQUOTA (0x00000200) /* the object's group is over quota */ - -/** - * Set this to delegate DLM locking during obd_punch() to the OSTs. Only OSTs - * that declared OBD_CONNECT_TRUNCLOCK in their connect flags support this - * functionality. - */ -#define OBD_FL_TRUNCLOCK (0x00000800) - -/* - * Checksum types - */ -#define OBD_FL_CKSUM_CRC32 (0x00001000) -#define OBD_FL_CKSUM_ADLER (0x00002000) -#define OBD_FL_CKSUM_ALL (OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER) +enum obdo_flags { + OBD_FL_INLINEDATA = 0x00000001, + OBD_FL_OBDMDEXISTS = 0x00000002, + OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */ + OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */ + OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/ + OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */ + OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */ + OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */ + OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */ + OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */ + OBD_FL_TRUNCLOCK = 0x00000800, /* delegate DLM locking during punch*/ + OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */ + OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */ + OBD_FL_CKSUM_RSVD1 = 0x00004000, /* for future cksum types */ + OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */ + OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */ + OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */ + + OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER, + + /* mask for local-only flag, which won't be sent over network */ + OBD_FL_LOCAL_MASK = 0xF0000000, + /* temporary OBDO used by osc_brw_async (see bug 18364) */ + OBD_FL_TEMPORARY = 0x10000000, +}; #define LOV_MAGIC_V1 0x0BD10BD0 #define LOV_MAGIC LOV_MAGIC_V1 @@ -847,7 +945,16 @@ struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access" #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default" +#define XATTR_USER_PREFIX "user." +#define XATTR_TRUSTED_PREFIX "trusted." +#define XATTR_SECURITY_PREFIX "security." +#define XATTR_LUSTRE_PREFIX "lustre." + #define XATTR_NAME_LOV "trusted.lov" +#define XATTR_NAME_LMA "trusted.lma" +#define XATTR_NAME_LMV "trusted.lmv" +#define XATTR_NAME_LINK "trusted.link" + struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ @@ -882,7 +989,7 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */ #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ -#define OBD_MD_FLOSCOPQ (0x00400000ULL) /* osc opaque data */ +/*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */ #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */ #define OBD_MD_FLGROUP (0x01000000ULL) /* group */ #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */ @@ -908,6 +1015,8 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */ #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */ +#define OBD_FL_TRUNC (0x0000200000000000ULL) /* for filter_truncate */ + #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */ #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */ @@ -922,6 +1031,14 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ /* don't forget obdo_fid which is way down at the bottom so it can * come after the definition of llog_cookie */ +enum obd_statfs_state { + OS_STATE_DEGRADED = 0x00000001, /**< RAID degraded/rebuilding */ + OS_STATE_READONLY = 0x00000002, /**< filesystem is read-only */ + OS_STATE_RDONLY_1 = 0x00000004, /**< obsolete 1.6, was EROFS=30 */ + OS_STATE_RDONLY_2 = 0x00000008, /**< obsolete 1.6, was EROFS=30 */ + OS_STATE_RDONLY_3 = 0x00000010, /**< obsolete 1.6, was EROFS=30 */ +}; + struct obd_statfs { __u64 os_type; __u64 os_blocks; @@ -933,7 +1050,7 @@ struct obd_statfs { __u32 os_bsize; __u32 os_namelen; __u64 os_maxbytes; - __u32 os_state; /* positive error code on server */ + __u32 os_state; /**< obd_statfs_state OS_STATE_* flag */ __u32 os_spare1; __u32 os_spare2; __u32 os_spare3; @@ -948,16 +1065,23 @@ struct obd_statfs { extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay * and resends for avoid deadlocks */ - #define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update * obd_osfs_age */ +#define OBD_STATFS_PTLRPCD 0x0004 /* requests will be sent via ptlrpcd + * instead of a specific set. This + * means that we cannot rely on the set + * interpret routine to be called. + * lov_statfs_fini() must thus be called + * by the request interpret routine */ /* ost_body.data values for OST_BRW */ #define OBD_BRW_READ 0x01 #define OBD_BRW_WRITE 0x02 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE) -#define OBD_BRW_SYNC 0x08 +#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous + * transfer and is not accounted in + * the grant. */ #define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ #define OBD_BRW_GRANTED 0x40 /* the ost manages this */ @@ -1036,6 +1160,7 @@ typedef enum { MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */ MDS_WRITEPAGE = 51, MDS_IS_SUBDIR = 52, + MDS_GET_INFO = 53, MDS_LAST_OPC } mds_cmd_t; @@ -1162,9 +1287,19 @@ static inline int ll_inode_to_ext_flags(int oflags, int iflags) } #endif -struct mdt_body { - struct lu_fid fid1; - struct lu_fid fid2; +/* + * while mds_body is to interact with 1.6, mdt_body is to interact with 2.0. + * both of them should have the same fields layout, because at client side + * one could be dynamically cast to the other. + * + * mdt_body has large size than mds_body, with unused padding (48 bytes) + * at the end. client always use size of mdt_body to prepare request/reply + * buffers, and actual data could be interepeted as mdt_body or mds_body + * accordingly. + */ +struct mds_body { + struct ll_fid fid1; + struct ll_fid fid2; struct lustre_handle handle; __u64 valid; __u64 size; /* Offset, in the case of MDS_READPAGE */ @@ -1172,8 +1307,8 @@ struct mdt_body { __u64 atime; __u64 ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 ioepoch; - __u64 ino; /* for 1.6 compatibility */ + __u64 io_epoch; + __u64 ino; __u32 fsuid; __u32 fsgid; __u32 capability; @@ -1183,18 +1318,20 @@ struct mdt_body { __u32 flags; /* from vfs for pin/unpin, MDS_BFLAG for close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 generation; /* for 1.6 compatibility */ + __u32 generation; __u32 suppgid; __u32 eadatasize; __u32 aclsize; __u32 max_mdsize; __u32 max_cookiesize; - __u32 padding_4; /* also fix lustre_swab_mdt_body */ + __u32 padding_4; /* also fix lustre_swab_mds_body */ }; -struct mds_body { - struct ll_fid fid1; - struct ll_fid fid2; +extern void lustre_swab_mds_body (struct mds_body *b); + +struct mdt_body { + struct lu_fid fid1; + struct lu_fid fid2; struct lustre_handle handle; __u64 valid; __u64 size; /* Offset, in the case of MDS_READPAGE */ @@ -1202,8 +1339,8 @@ struct mds_body { __u64 atime; __u64 ctime; __u64 blocks; /* XID, in the case of MDS_READPAGE */ - __u64 io_epoch; - __u64 ino; + __u64 ioepoch; + __u64 ino; /* for 1.6 compatibility */ __u32 fsuid; __u32 fsgid; __u32 capability; @@ -1213,16 +1350,21 @@ struct mds_body { __u32 flags; /* from vfs for pin/unpin, MDS_BFLAG for close */ __u32 rdev; __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */ - __u32 generation; + __u32 generation; /* for 1.6 compatibility */ __u32 suppgid; __u32 eadatasize; __u32 aclsize; __u32 max_mdsize; __u32 max_cookiesize; - __u32 padding_4; /* also fix lustre_swab_mds_body */ -}; + __u32 padding_4; /* also fix lustre_swab_mdt_body */ + __u64 padding_5; + __u64 padding_6; + __u64 padding_7; + __u64 padding_8; + __u64 padding_9; + __u64 padding_10; +}; /* 216 */ -extern void lustre_swab_mds_body (struct mds_body *b); extern void lustre_swab_mdt_body (struct mdt_body *b); struct mdt_epoch { @@ -1238,13 +1380,26 @@ extern void lustre_swab_mdt_epoch (struct mdt_epoch *b); #define Q_INITQUOTA 0x800101 /* init slave limits */ #define Q_GETOINFO 0x800102 /* get obd quota info */ #define Q_GETOQUOTA 0x800103 /* get obd quotas */ +#define Q_FINVALIDATE 0x800104 /* invalidate operational quotas */ + +#define Q_TYPEMATCH(id, type) \ + ((id) == (type) || (id) == UGQUOTA) -#define Q_TYPESET(oqc, type) \ - ((oqc)->qc_type == type || (oqc)->qc_type == UGQUOTA) +#define Q_TYPESET(oqc, type) Q_TYPEMATCH((oqc)->qc_type, type) #define Q_GETOCMD(oqc) \ ((oqc)->qc_cmd == Q_GETOINFO || (oqc)->qc_cmd == Q_GETOQUOTA) +#define QCTL_COPY(out, in) \ +do { \ + Q_COPY(out, in, qc_cmd); \ + Q_COPY(out, in, qc_type); \ + Q_COPY(out, in, qc_id); \ + Q_COPY(out, in, qc_stat); \ + Q_COPY(out, in, qc_dqinfo); \ + Q_COPY(out, in, qc_dqblk); \ +} while (0) + struct obd_quotactl { __u32 qc_cmd; __u32 qc_type; @@ -1256,6 +1411,34 @@ struct obd_quotactl { extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); +struct quota_adjust_qunit { + __u32 qaq_flags; + __u32 qaq_id; + __u64 qaq_bunit_sz; + __u64 qaq_iunit_sz; + __u64 padding1; +}; +extern void lustre_swab_quota_adjust_qunit(struct quota_adjust_qunit *q); + +/* flags in qunit_data and quota_adjust_qunit will use macroes below */ +#define LQUOTA_FLAGS_GRP 1UL /* 0 is user, 1 is group */ +#define LQUOTA_FLAGS_BLK 2UL /* 0 is inode, 1 is block */ +#define LQUOTA_FLAGS_ADJBLK 4UL /* adjust the block qunit size */ +#define LQUOTA_FLAGS_ADJINO 8UL /* adjust the inode qunit size */ +#define LQUOTA_FLAGS_CHG_QS 16UL /* indicate whether it has capability of + * OBD_CONNECT_CHANGE_QS */ + +/* the status of lqsk_flags in struct lustre_qunit_size_key */ +#define LQUOTA_QUNIT_FLAGS (LQUOTA_FLAGS_GRP | LQUOTA_FLAGS_BLK) + +#define QAQ_IS_GRP(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_GRP) +#define QAQ_IS_ADJBLK(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJBLK) +#define QAQ_IS_ADJINO(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJINO) + +#define QAQ_SET_GRP(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_GRP) +#define QAQ_SET_ADJBLK(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJBLK) +#define QAQ_SET_ADJINO(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJINO) + /* inode access permission for remote user, the inode info are omitted, * for client knows them. */ struct mds_remote_perm { @@ -1271,7 +1454,8 @@ enum { CFS_SETUID_PERM = 0x01, CFS_SETGID_PERM = 0x02, CFS_SETGRP_PERM = 0x04, - CFS_RMTACL_PERM = 0x08 + CFS_RMTACL_PERM = 0x08, + CFS_RMTOWN_PERM = 0x10 }; extern void lustre_swab_mds_remote_perm(struct mds_remote_perm *p); @@ -1309,11 +1493,15 @@ extern void lustre_swab_mds_rec_setattr (struct mds_rec_setattr *sa); struct mdt_rec_setattr { __u32 sa_opcode; + __u32 sa_cap; __u32 sa_fsuid; + __u32 sa_fsuid_h; __u32 sa_fsgid; - __u32 sa_cap; + __u32 sa_fsgid_h; __u32 sa_suppgid; + __u32 sa_suppgid_h; __u32 sa_padding_1; + __u32 sa_padding_1_h; struct lu_fid sa_fid; __u64 sa_valid; __u32 sa_uid; @@ -1411,23 +1599,10 @@ enum { MDS_CROSS_REF = 1 << 1, MDS_VTX_BYPASS = 1 << 2, MDS_PERM_BYPASS = 1 << 3, - MDS_SOM = 1 << 4 -}; - -struct mds_rec_join { - struct ll_fid jr_fid; - __u64 jr_headsize; -}; - -extern void lustre_swab_mds_rec_join (struct mds_rec_join *jr); - -struct mdt_rec_join { - struct lu_fid jr_fid; - __u64 jr_headsize; + MDS_SOM = 1 << 4, + MDS_QUOTA_IGNORE = 1 << 5 }; -extern void lustre_swab_mdt_rec_join (struct mdt_rec_join *jr); - struct mds_rec_create { __u32 cr_opcode; __u32 cr_fsuid; @@ -1451,14 +1626,18 @@ extern void lustre_swab_mds_rec_create (struct mds_rec_create *cr); struct mdt_rec_create { __u32 cr_opcode; + __u32 cr_cap; __u32 cr_fsuid; + __u32 cr_fsuid_h; __u32 cr_fsgid; - __u32 cr_cap; + __u32 cr_fsgid_h; __u32 cr_suppgid1; + __u32 cr_suppgid1_h; __u32 cr_suppgid2; + __u32 cr_suppgid2_h; struct lu_fid cr_fid1; struct lu_fid cr_fid2; - struct lustre_handle cr_old_handle; /* u64 handle in case of open replay */ + struct lustre_handle cr_old_handle; /* handle in case of open replay */ __u64 cr_time; __u64 cr_rdev; __u64 cr_ioepoch; @@ -1473,6 +1652,20 @@ struct mdt_rec_create { extern void lustre_swab_mdt_rec_create (struct mdt_rec_create *cr); +struct mds_rec_join { + struct ll_fid jr_fid; + __u64 jr_headsize; +}; + +extern void lustre_swab_mds_rec_join (struct mds_rec_join *jr); + +struct mdt_rec_join { + struct lu_fid jr_fid; + __u64 jr_headsize; +}; + +extern void lustre_swab_mdt_rec_join (struct mdt_rec_join *jr); + struct mds_rec_link { __u32 lk_opcode; __u32 lk_fsuid; @@ -1493,11 +1686,15 @@ extern void lustre_swab_mds_rec_link (struct mds_rec_link *lk); struct mdt_rec_link { __u32 lk_opcode; + __u32 lk_cap; __u32 lk_fsuid; + __u32 lk_fsuid_h; __u32 lk_fsgid; - __u32 lk_cap; + __u32 lk_fsgid_h; __u32 lk_suppgid1; + __u32 lk_suppgid1_h; __u32 lk_suppgid2; + __u32 lk_suppgid2_h; struct lu_fid lk_fid1; struct lu_fid lk_fid2; __u64 lk_time; @@ -1533,11 +1730,15 @@ extern void lustre_swab_mds_rec_unlink (struct mds_rec_unlink *ul); struct mdt_rec_unlink { __u32 ul_opcode; + __u32 ul_cap; __u32 ul_fsuid; + __u32 ul_fsuid_h; __u32 ul_fsgid; - __u32 ul_cap; + __u32 ul_fsgid_h; __u32 ul_suppgid1; + __u32 ul_suppgid1_h; __u32 ul_suppgid2; + __u32 ul_suppgid2_h; struct lu_fid ul_fid1; struct lu_fid ul_fid2; __u64 ul_time; @@ -1573,11 +1774,15 @@ extern void lustre_swab_mds_rec_rename (struct mds_rec_rename *rn); struct mdt_rec_rename { __u32 rn_opcode; + __u32 rn_cap; __u32 rn_fsuid; + __u32 rn_fsuid_h; __u32 rn_fsgid; - __u32 rn_cap; + __u32 rn_fsgid_h; __u32 rn_suppgid1; + __u32 rn_suppgid1_h; __u32 rn_suppgid2; + __u32 rn_suppgid2_h; struct lu_fid rn_fid1; struct lu_fid rn_fid2; __u64 rn_time; @@ -1595,11 +1800,15 @@ struct mdt_rec_rename { struct mdt_rec_setxattr { __u32 sx_opcode; + __u32 sx_cap; __u32 sx_fsuid; + __u32 sx_fsuid_h; __u32 sx_fsgid; - __u32 sx_cap; + __u32 sx_fsgid_h; __u32 sx_suppgid1; + __u32 sx_suppgid1_h; __u32 sx_suppgid2; + __u32 sx_suppgid2_h; struct lu_fid sx_fid; __u64 sx_padding_1; /* These three members are lu_fid size */ __u32 sx_padding_2; @@ -1619,11 +1828,15 @@ struct mdt_rec_setxattr { struct mdt_rec_reint { __u32 rr_opcode; + __u32 rr_cap; __u32 rr_fsuid; + __u32 rr_fsuid_h; __u32 rr_fsgid; - __u32 rr_cap; + __u32 rr_fsgid_h; __u32 rr_suppgid1; + __u32 rr_suppgid1_h; __u32 rr_suppgid2; + __u32 rr_suppgid2_h; struct lu_fid rr_fid1; struct lu_fid rr_fid2; __u64 rr_mtime; @@ -1644,11 +1857,40 @@ extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr); struct lmv_desc { __u32 ld_tgt_count; /* how many MDS's */ __u32 ld_active_tgt_count; /* how many active */ + __u32 ld_default_stripe_count; /* how many objects are used */ + __u32 ld_pattern; /* default MEA_MAGIC_* */ + __u64 ld_default_hash_size; + __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */ + __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */ + __u32 ld_qos_maxage; /* in second */ + __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */ + __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */ struct obd_uuid ld_uuid; }; extern void lustre_swab_lmv_desc (struct lmv_desc *ld); +/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */ +struct lmv_stripe_md { + __u32 mea_magic; + __u32 mea_count; + __u32 mea_master; + __u32 mea_padding; + char mea_pool_name[LOV_MAXPOOLNAME]; + struct lu_fid mea_ids[0]; +}; + +extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea); + +/* lmv structures */ +#define MEA_MAGIC_LAST_CHAR 0xb2221ca1 +#define MEA_MAGIC_ALL_CHARS 0xb222a11c +#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b + +#define MAX_HASH_SIZE_32 0x7fffffffUL +#define MAX_HASH_SIZE 0x7fffffffffffffffULL +#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL + struct md_fld { seqno_t mf_seq; mdsno_t mf_mds; @@ -1657,7 +1899,7 @@ struct md_fld { extern void lustre_swab_md_fld (struct md_fld *mf); enum fld_rpc_opc { - FLD_QUERY = 600, + FLD_QUERY = 900, FLD_LAST_OPC, FLD_FIRST_OPC = FLD_QUERY }; @@ -1677,7 +1919,8 @@ enum seq_op { * LOV data structures */ -#define LOV_MIN_STRIPE_SIZE 65536 /* maximum PAGE_SIZE (ia64), power of 2 */ +#define LOV_MIN_STRIPE_BITS 16 /* maximum PAGE_SIZE (ia64), power of 2 */ +#define LOV_MIN_STRIPE_SIZE (1<o_flags &= ~OBD_FL_LOCAL_MASK; +} + +static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo) +{ + obd_flag local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK; + + LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK)); + + memcpy(lobdo, wobdo, sizeof(*lobdo)); + lobdo->o_flags &= ~OBD_FL_LOCAL_MASK; + lobdo->o_flags |= local_flags; +} + extern void lustre_swab_obdo (struct obdo *o); /* request structure for OST's */ - struct ost_body { struct obdo oa; }; @@ -2235,6 +2590,7 @@ extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, int stripe_count); extern void lustre_swab_lov_user_md_join(struct lov_user_md_join *lumj); +extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm); /* llog_swab.c */ extern void lustre_swab_llogd_body (struct llogd_body *d); @@ -2246,37 +2602,70 @@ extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec, struct lustre_cfg; extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); -/* quota. fixed by tianzy for bug10707 */ -#define QUOTA_IS_GRP 0X1UL /* 0 is user, 1 is group. Used by qd_flags*/ -#define QUOTA_IS_BLOCK 0x2UL /* 0 is inode, 1 is block. Used by qd_flags*/ - +/* this will be used when OBD_CONNECT_CHANGE_QS is set */ struct qunit_data { - __u32 qd_id; /* ID appiles to (uid, gid) */ - __u32 qd_flags; /* Quota type (USRQUOTA, GRPQUOTA) occupy one bit; - * Block quota or file quota occupy one bit */ - __u64 qd_count; /* acquire/release count (bytes for block quota) */ + /** + * ID appiles to (uid, gid) + */ + __u32 qd_id; + /** + * LQUOTA_FLAGS_* affect the responding bits + */ + __u32 qd_flags; + /** + * acquire/release count (bytes for block quota) + */ + __u64 qd_count; + /** + * when a master returns the reply to a slave, it will + * contain the current corresponding qunit size + */ + __u64 qd_qunit; + __u64 padding; }; -struct qunit_data_old { - __u32 qd_id; /* ID appiles to (uid, gid) */ - __u32 qd_type; /* Quota type (USRQUOTA, GRPQUOTA) */ - __u32 qd_count; /* acquire/release count (bytes for block quota) */ - __u32 qd_isblk; /* Block quota or file quota */ -}; +#define QDATA_IS_GRP(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_GRP) +#define QDATA_IS_BLK(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_BLK) +#define QDATA_IS_ADJBLK(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_ADJBLK) +#define QDATA_IS_ADJINO(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_ADJINO) +#define QDATA_IS_CHANGE_QS(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_CHG_QS) + +#define QDATA_SET_GRP(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_GRP) +#define QDATA_SET_BLK(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_BLK) +#define QDATA_SET_ADJBLK(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_ADJBLK) +#define QDATA_SET_ADJINO(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_ADJINO) +#define QDATA_SET_CHANGE_QS(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_CHG_QS) + +#define QDATA_CLR_GRP(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_GRP) +#define QDATA_CLR_CHANGE_QS(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_CHG_QS) extern void lustre_swab_qdata(struct qunit_data *d); -extern void lustre_swab_qdata_old(struct qunit_data_old *d); -extern struct qunit_data *lustre_quota_old_to_new(struct qunit_data_old *d); -extern struct qunit_data_old *lustre_quota_new_to_old(struct qunit_data *d); +extern struct qunit_data *quota_get_qdata(void*req, int is_req, int is_exp); +extern int quota_copy_qdata(void *request, struct qunit_data *qdata, + int is_req, int is_exp); typedef enum { QUOTA_DQACQ = 601, QUOTA_DQREL = 602, + QUOTA_LAST_OPC } quota_cmd_t; +#define QUOTA_FIRST_OPC QUOTA_DQACQ #define JOIN_FILE_ALIGN 4096 -/** security opcodes */ +#define QUOTA_REQUEST 1 +#define QUOTA_REPLY 0 +#define QUOTA_EXPORT 1 +#define QUOTA_IMPORT 0 + +/* quota check function */ +#define QUOTA_RET_OK 0 /**< return successfully */ +#define QUOTA_RET_NOQUOTA 1 /**< not support quota */ +#define QUOTA_RET_NOLIMIT 2 /**< quota limit isn't set */ +#define QUOTA_RET_ACQUOTA 4 /**< need to acquire extra quota */ + + +/* security opcodes */ typedef enum { SEC_CTX_INIT = 801, SEC_CTX_INIT_CONT = 802, @@ -2294,15 +2683,15 @@ typedef enum { /* NB take care when changing the sequence of elements this struct, * because the offset info is used in find_capa() */ struct lustre_capa { - struct lu_fid lc_fid; /* fid */ - __u64 lc_opc; /* operations allowed */ - __u32 lc_uid; /* uid, it is obsolete, but maybe used in - * future, reserve it for 64-bits aligned.*/ - __u32 lc_flags; /* HMAC algorithm & flags */ - __u32 lc_keyid; /* key used for the capability */ - __u32 lc_timeout; /* capa timeout value (sec) */ - __u64 lc_expiry; /* expiry time (sec) */ - __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /* HMAC */ + struct lu_fid lc_fid; /** fid */ + __u64 lc_opc; /** operations allowed */ + __u64 lc_uid; /** file owner */ + __u64 lc_gid; /** file group */ + __u32 lc_flags; /** HMAC algorithm & flags */ + __u32 lc_keyid; /** key# used for the capability */ + __u32 lc_timeout; /** capa timeout value (sec) */ + __u32 lc_expiry; /** expiry time (sec) */ + __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ } __attribute__((packed)); extern void lustre_swab_lustre_capa(struct lustre_capa *c); @@ -2317,9 +2706,9 @@ enum { CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */ CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */ CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */ - CAPA_OPC_META_WRITE = 1<<8, /**< write object meta data */ - CAPA_OPC_META_READ = 1<<9, /**< read object meta data */ - + CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */ + CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */ + CAPA_OPC_META_READ = 1<<10, /**< read object meta data */ }; #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE) @@ -2327,7 +2716,8 @@ enum { (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \ CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE) #define CAPA_OPC_OSS_ONLY \ - (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC) + (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \ + CAPA_OPC_OSS_DESTROY) #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY) @@ -2364,11 +2754,39 @@ struct lustre_capa_key { extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k); -/* quota check function */ -#define QUOTA_RET_OK 0 /**< return successfully */ -#define QUOTA_RET_NOQUOTA 1 /**< not support quota */ -#define QUOTA_RET_NOLIMIT 2 /**< quota limit isn't set */ -#define QUOTA_RET_ACQUOTA 3 /**< need to acquire extra quota */ -#endif +/** The link ea holds 1 \a link_ea_entry for each hardlink */ +#define LINK_EA_MAGIC 0x11EAF1DFUL +struct link_ea_header { + __u32 leh_magic; + __u32 leh_reccount; + __u64 leh_len; /* total size */ + /* future use */ + __u32 padding1; + __u32 padding2; +}; +/** Hardlink data is name and parent fid. + * Stored in this crazy struct for maximum packing and endian-neutrality + */ +struct link_ea_entry { + /** __u16 stored big-endian, unaligned */ + char lee_reclen[2]; + struct lu_fid_pack lee_parent_fid; /**< variable length */ + /** logically after lee_parent_fid; don't use directly */ + char lee_name[0]; +}; + +/** fid2path request/reply structure */ +struct getinfo_fid2path { + struct lu_fid gf_fid; + __u64 gf_recno; + __u32 gf_linkno; + __u32 gf_pathlen; + char gf_path[0]; +} __attribute__((packed)); + +void lustre_swab_fid2path (struct getinfo_fid2path *gf); + + +#endif /** @} lustreidl */