X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre%2Flustre_idl.h;h=c80dd5e0f3d2c678df9b25f61e915e65a8959b81;hp=bf58102775e70e3181bce52bc73d6672c38250bb;hb=e9dc8feb37943024452dcc5acb3f96b95c6a518d;hpb=7bbc01d82edb529c0d46efedfd5e8bf2c903c195 diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index bf581027..c80dd5e 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -16,8 +16,8 @@ * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see [sun.com URL with a - * copy of GPLv2]. + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or @@ -36,7 +36,11 @@ * lustre/include/lustre/lustre_idl.h * * Lustre wire protocol definitions. + */ + +/** \defgroup lustreidl lustreidl * + * Lustre wire protocol definitions. * * We assume all nodes are either little-endian or big-endian, and we * always send messages in the sender's native format. The receiver @@ -79,6 +83,8 @@ * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine * may be defined that swabs just the variable part, after the caller has * verified that the message buffer is large enough. + * + * @{ */ #ifndef _LUSTRE_IDL_H_ @@ -86,10 +92,9 @@ #include -#include - /* Defn's shared with user-space. */ #include +#include /* * GENERAL STUFF @@ -163,89 +168,152 @@ #define LUSTRE_LOG_VERSION 0x00050000 #define LUSTRE_MGS_VERSION 0x00060000 -typedef __u64 mdsno_t; +typedef __u32 mdsno_t; typedef __u64 seqno_t; -struct lu_range { - __u64 lr_start; - __u64 lr_end; +/** + * Describes a range of sequence, lsr_start is included but lsr_end is + * not in the range. + * Same structure is used in fld module where lsr_mdt field holds mdt id + * of the home mdt. + */ + +struct lu_seq_range { + __u64 lsr_start; + __u64 lsr_end; + __u32 lsr_mdt; + __u32 lsr_padding; }; -static inline __u64 range_space(struct lu_range *r) +/** + * returns width of given range \a r + */ + +static inline __u64 range_space(const struct lu_seq_range *range) { - return r->lr_end - r->lr_start; + return range->lsr_end - range->lsr_start; } -static inline void range_zero(struct lu_range *r) +/** + * initialize range to zero + */ + +static inline void range_init(struct lu_seq_range *range) { - r->lr_start = r->lr_end = 0; + range->lsr_start = range->lsr_end = range->lsr_mdt = 0; } -static inline int range_within(struct lu_range *r, +/** + * check if given seq id \a s is within given range \a r + */ + +static inline int range_within(const struct lu_seq_range *range, __u64 s) { - return s >= r->lr_start && s < r->lr_end; + return s >= range->lsr_start && s < range->lsr_end; } -static inline void range_alloc(struct lu_range *r, - struct lu_range *s, - __u64 w) +/** + * allocate \a w units of sequence from range \a from. + */ +static inline void range_alloc(struct lu_seq_range *to, + struct lu_seq_range *from, + __u64 width) { - r->lr_start = s->lr_start; - r->lr_end = s->lr_start + w; - s->lr_start += w; + to->lsr_start = from->lsr_start; + to->lsr_end = from->lsr_start + width; + from->lsr_start += width; } -static inline int range_is_sane(struct lu_range *r) +static inline int range_is_sane(const struct lu_seq_range *range) { - return (r->lr_end >= r->lr_start); + return (range->lsr_end >= range->lsr_start); } -static inline int range_is_zero(struct lu_range *r) +static inline int range_is_zero(const struct lu_seq_range *range) { - return (r->lr_start == 0 && r->lr_end == 0); + return (range->lsr_start == 0 && range->lsr_end == 0); } -static inline int range_is_exhausted(struct lu_range *r) +static inline int range_is_exhausted(const struct lu_seq_range *range) + { - return range_space(r) == 0; + return range_space(range) == 0; } -#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x]" +#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x" #define PRANGE(range) \ - (range)->lr_start, \ - (range)->lr_end + (range)->lsr_start, \ + (range)->lsr_end, \ + (range)->lsr_mdt + +/** \defgroup lu_fid lu_fid + * @{ */ +/** + * File identifier. + * + * Fid is a cluster-wide unique identifier of a file or an object + * (stripe). Fids are never reused. Fids are transmitted across network (in + * the sender byte-ordering), and stored on disk in a packed form (struct + * lu_fid_pack) in a big-endian order. + */ struct lu_fid { - __u64 f_seq; /* holds fid sequence. Lustre should support 2 ^ 64 - * objects, thus even if one sequence has one object we - * will never reach this value. */ - __u32 f_oid; /* fid number within its sequence. */ - __u32 f_ver; /* holds fid version. */ + /** + * fid sequence. Sequence is a unit of migration: all files (objects) + * with fids from a given sequence are stored on the same + * server. + * + * Lustre should support 2 ^ 64 objects, thus even if one + * sequence has one object we will never reach this value. + */ + __u64 f_seq; + /** fid number within sequence. */ + __u32 f_oid; + /** + * fid version, used to distinguish different versions (in the sense + * of snapshots, etc.) of the same file system object. Not currently + * used. + */ + __u32 f_ver; }; -/* +/** + * Following struct for MDT attributes, that will be kept inode's EA. + * Introduced in 2.0 release (please see b15993, for details) + */ +struct lustre_mdt_attrs { + /** FID of this inode */ + struct lu_fid lma_self_fid; + /** SOM state, mdt/ost type, others */ + __u64 lma_flags; + /** total sectors in objects */ + __u64 lma_som_sectors; +}; + + +/** * fid constants */ enum { - /* initial fid id value */ + /** initial fid id value */ LUSTRE_FID_INIT_OID = 1UL }; -/* get object sequence */ +/** returns fid object sequence */ static inline __u64 fid_seq(const struct lu_fid *fid) { return fid->f_seq; } -/* get object id */ +/** returns fid object id */ static inline __u32 fid_oid(const struct lu_fid *fid) { return fid->f_oid; } -/* get object version */ +/** returns fid object version */ static inline __u32 fid_ver(const struct lu_fid *fid) { return fid->f_ver; @@ -265,7 +333,7 @@ static inline void fid_zero(struct lu_fid *fid) /** * Check if a fid is igif or not. * \param fid the fid to be tested. - * \return true if the fid is a igif; otherwise false. + * \return true if the fid is a igif; otherwise false. */ static inline int fid_is_igif(const struct lu_fid *fid) { @@ -275,7 +343,7 @@ static inline int fid_is_igif(const struct lu_fid *fid) /** * Check if a fid is idif or not. * \param fid the fid to be tested. - * \return true if the fid is a idif; otherwise false. + * \return true if the fid is a idif; otherwise false. */ static inline int fid_is_idif(const struct lu_fid *fid) { @@ -296,13 +364,13 @@ static inline ino_t lu_igif_ino(const struct lu_fid *fid) * Get inode generation from a igif. * \param fid a igif to get inode generation from. * \return inode generation for the igif. - */ + */ static inline __u32 lu_igif_gen(const struct lu_fid *fid) { return fid_oid(fid); } -#define DFID "[0x%16.16"LPF64"x/0x%8.8x:0x%8.8x]" +#define DFID "["LPX64":0x%x:0x%x]" #define PFID(fid) \ fid_seq(fid), \ @@ -357,13 +425,11 @@ static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src) LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst)); } -#ifdef __KERNEL__ -/* +/** * Storage representation for fids. * * Variable size, first byte contains the length of the whole record. */ - struct lu_fid_pack { char fp_len; char fp_area[sizeof(struct lu_fid)]; @@ -373,9 +439,6 @@ void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid, struct lu_fid *befider); int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid); -/* __KERNEL__ */ -#endif - static inline int fid_is_sane(const struct lu_fid *fid) { return @@ -391,20 +454,41 @@ static inline int fid_is_zero(const struct lu_fid *fid) } extern void lustre_swab_lu_fid(struct lu_fid *fid); -extern void lustre_swab_lu_range(struct lu_range *range); +extern void lustre_swab_lu_seq_range(struct lu_seq_range *range); static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1) { - /* Check that there is no alignment padding. */ - CLASSERT(sizeof *f0 == + /* Check that there is no alignment padding. */ + CLASSERT(sizeof *f0 == sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver); LASSERTF(fid_is_igif(f0) || fid_ver(f0) == 0, DFID, PFID(f0)); LASSERTF(fid_is_igif(f1) || fid_ver(f1) == 0, DFID, PFID(f1)); - return memcmp(f0, f1, sizeof *f0) == 0; + return memcmp(f0, f1, sizeof *f0) == 0; } -/* +#define __diff_normalize(val0, val1) \ +({ \ + typeof(val0) __val0 = (val0); \ + typeof(val1) __val1 = (val1); \ + \ + (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \ +}) + +static inline int lu_fid_cmp(const struct lu_fid *f0, + const struct lu_fid *f1) +{ + return + __diff_normalize(fid_seq(f0), fid_seq(f1)) ?: + __diff_normalize(fid_oid(f0), fid_oid(f1)) ?: + __diff_normalize(fid_ver(f0), fid_ver(f1)); +} + +/** @} lu_fid */ + +/** \defgroup lu_dir lu_dir + * @{ */ +/** * Layout of readdir pages, as transmitted on wire. */ struct lu_dirent { @@ -459,6 +543,8 @@ static inline int lu_dirent_size(struct lu_dirent *ent) #define DIR_END_OFF 0xfffffffffffffffeULL +/** @} lu_dir */ + struct lustre_handle { __u64 cookie; }; @@ -500,6 +586,7 @@ struct lustre_msg_v2 { }; /* without gss, ptlrpc_body is put at the first buffer. */ +#define PTLRPC_NUM_VERSIONS 4 struct ptlrpc_body { struct lustre_handle pb_handle; __u32 pb_type; @@ -517,6 +604,10 @@ struct ptlrpc_body { __u32 pb_service_time; /* for rep, actual service time */ __u32 pb_limit; __u64 pb_slv; + /* VBR: pre-versions */ + __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS]; + /* padding for future needs */ + __u64 pb_padding[4]; }; extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); @@ -554,8 +645,10 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); * This was used in early prototypes of adaptive timeouts, and while there * shouldn't be any users of that code there also isn't a need for using this * bits. Defer usage until at least 1.10 to avoid potential conflict. */ -#define MSG_REQ_REPLAY_DONE 0x0010 -#define MSG_LOCK_REPLAY_DONE 0x0020 +#define MSG_DELAY_REPLAY 0x0010 +#define MSG_VERSION_REPLAY 0x0020 +#define MSG_REQ_REPLAY_DONE 0x0040 +#define MSG_LOCK_REPLAY_DONE 0x0080 /* * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT) @@ -574,6 +667,7 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); /* Connect flags */ #define OBD_CONNECT_RDONLY 0x00000001ULL /* client allowed read-only access */ #define OBD_CONNECT_INDEX 0x00000002ULL /* connect to specific LOV idx */ +#define OBD_CONNECT_MDS 0x00000004ULL /* connect from MDT to OST */ #define OBD_CONNECT_GRANT 0x00000008ULL /* OSC acquires grant at connect */ #define OBD_CONNECT_SRVLOCK 0x00000010ULL /* server takes locks for client */ #define OBD_CONNECT_VERSION 0x00000020ULL /* Server supports versions in ocd */ @@ -585,8 +679,8 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_JOIN 0x00002000ULL /* files can be concatenated */ #define OBD_CONNECT_ATTRFID 0x00004000ULL /* Server supports GetAttr By Fid */ #define OBD_CONNECT_NODEVOH 0x00008000ULL /* No open handle for special nodes */ -#define OBD_CONNECT_LCL_CLIENT 0x00010000ULL /* local 1.8 client */ -#define OBD_CONNECT_RMT_CLIENT 0x00020000ULL /* Remote 1.8 client */ +#define OBD_CONNECT_RMT_CLIENT 0x00010000ULL /* Remote client */ +#define OBD_CONNECT_RMT_CLIENT_FORCE 0x00020000ULL /* Remote client by force */ #define OBD_CONNECT_BRW_SIZE 0x00040000ULL /* Max bytes per rpc */ #define OBD_CONNECT_QUOTA64 0x00080000ULL /* 64bit qunit_data.qd_count b=10707*/ #define OBD_CONNECT_MDS_CAPA 0x00100000ULL /* MDS capability */ @@ -597,10 +691,10 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); #define OBD_CONNECT_LRU_RESIZE 0x02000000ULL /* Lru resize feature. */ #define OBD_CONNECT_MDS_MDS 0x04000000ULL /* MDS-MDS connection*/ #define OBD_CONNECT_REAL 0x08000000ULL /* real connection */ -#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*shrink/enlarge qunit size - *b=10600 */ +#define OBD_CONNECT_CHANGE_QS 0x10000000ULL /* shrink/enlarge qunit b=10600 */ #define OBD_CONNECT_CKSUM 0x20000000ULL /* support several cksum algos */ #define OBD_CONNECT_FID 0x40000000ULL /* FID is supported by server */ +#define OBD_CONNECT_LOV_V3 0x100000000ULL /* client supports lov v3 ea */ /* also update obd_connect_names[] for lprocfs_rd_connect_flags() * and lustre/utils/wirecheck.c */ @@ -615,19 +709,22 @@ extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb); OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \ OBD_CONNECT_IBITS | OBD_CONNECT_JOIN | \ OBD_CONNECT_NODEVOH |/* OBD_CONNECT_ATTRFID |*/\ - OBD_CONNECT_LCL_CLIENT | \ OBD_CONNECT_RMT_CLIENT | \ + OBD_CONNECT_RMT_CLIENT_FORCE | \ OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA | \ OBD_CONNECT_MDS_MDS | OBD_CONNECT_CANCELSET | \ OBD_CONNECT_FID | \ - LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_AT) + LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_AT | \ + OBD_CONNECT_LOV_V3) #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \ OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \ OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \ OBD_CONNECT_BRW_SIZE | OBD_CONNECT_QUOTA64 | \ OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET | \ OBD_CONNECT_CKSUM | LRU_RESIZE_CONNECT_FLAG | \ - OBD_CONNECT_AT) + OBD_CONNECT_AT | OBD_CONNECT_CHANGE_QS | \ + OBD_CONNECT_RMT_CLIENT | \ + OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_MDS) #define ECHO_CONNECT_SUPPORTED (0) #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT) @@ -697,6 +794,7 @@ typedef enum { OST_SET_INFO = 17, OST_QUOTACHECK = 18, OST_QUOTACTL = 19, + OST_QUOTA_ADJUST_QUNIT = 20, OST_LAST_OPC } ost_cmd_t; #define OST_FIRST_OPC OST_REPLY @@ -725,7 +823,7 @@ typedef __u32 obd_count; #define OBD_FL_NO_USRQUOTA (0x00000100) /* the object's owner is over quota */ #define OBD_FL_NO_GRPQUOTA (0x00000200) /* the object's group is over quota */ -/* +/** * Set this to delegate DLM locking during obd_punch() to the OSTs. Only OSTs * that declared OBD_CONNECT_TRUNCLOCK in their connect flags support this * functionality. @@ -739,54 +837,10 @@ typedef __u32 obd_count; #define OBD_FL_CKSUM_ADLER (0x00002000) #define OBD_FL_CKSUM_ALL (OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER) -/* - * This should not be smaller than sizeof(struct lustre_handle) + sizeof(struct - * llog_cookie) + sizeof(struct ll_fid). Nevertheless struct ll_fid is not - * longer stored in o_inline, we keep this just for case. - */ -#define OBD_INLINESZ 80 - -/* Note: 64-bit types are 64-bit aligned in structure */ -struct obdo { - obd_valid o_valid; /* hot fields in this obdo */ - obd_id o_id; - obd_gr o_gr; - obd_id o_fid; - obd_size o_size; /* o_size-o_blocks == ost_lvb */ - obd_time o_mtime; - obd_time o_atime; - obd_time o_ctime; - obd_blocks o_blocks; /* brw: cli sent cached bytes */ - obd_size o_grant; - - /* 32-bit fields start here: keep an even number of them via padding */ - obd_blksize o_blksize; /* optimal IO blocksize */ - obd_mode o_mode; /* brw: cli sent cache remain */ - obd_uid o_uid; - obd_gid o_gid; - obd_flag o_flags; - obd_count o_nlink; /* brw: checksum */ - obd_count o_generation; - obd_count o_misc; /* brw: o_dropped */ - __u32 o_easize; /* epoch in ost writes */ - __u32 o_mds; - __u32 o_stripe_idx; /* holds stripe idx */ - __u32 o_padding_1; - char o_inline[OBD_INLINESZ]; - /* lustre_handle + llog_cookie */ -}; - -#define o_dirty o_blocks -#define o_undirty o_mode -#define o_dropped o_misc -#define o_cksum o_nlink - -extern void lustre_swab_obdo (struct obdo *o); - - #define LOV_MAGIC_V1 0x0BD10BD0 #define LOV_MAGIC LOV_MAGIC_V1 #define LOV_MAGIC_JOIN 0x0BD20BD0 +#define LOV_MAGIC_V3 0x0BD30BD0 #define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */ #define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */ @@ -815,14 +869,35 @@ struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ }; -extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); +/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */ #define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data)) #define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data)) #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access" #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default" +#define XATTR_USER_PREFIX "user." +#define XATTR_TRUSTED_PREFIX "trusted." +#define XATTR_SECURITY_PREFIX "security." +#define XATTR_LUSTRE_PREFIX "lustre." + #define XATTR_NAME_LOV "trusted.lov" +#define XATTR_NAME_LMA "trusted.lma" +#define XATTR_NAME_LMV "trusted.lmv" +#define XATTR_NAME_LINK "trusted.link" + + +struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */ + __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */ + __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */ + __u64 lmm_object_id; /* LOV object ID */ + __u64 lmm_object_gr; /* LOV object group */ + __u32 lmm_stripe_size; /* size of stripe in bytes */ + __u32 lmm_stripe_count; /* num stripes in use for this object */ + char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */ + struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */ +}; + #define OBD_MD_FLID (0x00000001ULL) /* object ID */ #define OBD_MD_FLATIME (0x00000002ULL) /* access time */ @@ -838,11 +913,11 @@ extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */ #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */ #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */ -#define OBD_MD_FLINLINE (0x00008000ULL) /* inline data */ +/*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */ #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */ #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */ #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */ -#define OBD_MD_FLHANDLE (0x00080000ULL) /* file handle */ +#define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */ #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */ #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */ #define OBD_MD_FLOSCOPQ (0x00400000ULL) /* osc opaque data */ @@ -871,6 +946,8 @@ extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */ #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */ +#define OBD_FL_TRUNC (0x0000200000000000ULL) /* for filter_truncate */ + #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */ #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */ #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */ @@ -882,16 +959,6 @@ extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \ OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP) -static inline struct lustre_handle *obdo_handle(struct obdo *oa) -{ - return (struct lustre_handle *)oa->o_inline; -} - -static inline struct llog_cookie *obdo_logcookie(struct obdo *oa) -{ - return (struct llog_cookie *)(oa->o_inline + - sizeof(struct lustre_handle)); -} /* don't forget obdo_fid which is way down at the bottom so it can * come after the definition of llog_cookie */ @@ -922,6 +989,9 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay * and resends for avoid deadlocks */ +#define OBD_STATFS_FROM_CACHE 0x0002 /* the statfs callback should not update + * obd_osfs_age */ + /* ost_body.data values for OST_BRW */ #define OBD_BRW_READ 0x01 @@ -931,7 +1001,7 @@ extern void lustre_swab_obd_statfs (struct obd_statfs *os); #define OBD_BRW_CHECK 0x10 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */ #define OBD_BRW_GRANTED 0x40 /* the ost manages this */ -#define OBD_BRW_DROP 0x80 /* drop the page after IO */ +#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */ #define OBD_BRW_NOQUOTA 0x100 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */ @@ -958,18 +1028,9 @@ struct niobuf_remote { extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr); -/* request structure for OST's */ - -struct ost_body { - struct obdo oa; -}; - -extern void lustre_swab_ost_body (struct ost_body *b); -extern void lustre_swab_ost_last_id(obd_id *id); - /* lock value block communicated between the filter and llite */ -/* OST_LVB_ERR_INIT is needed because the return code in rc is +/* OST_LVB_ERR_INIT is needed because the return code in rc is * negative, i.e. because ((MASK + rc) & MASK) != MASK. */ #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL @@ -1169,6 +1230,12 @@ struct mdt_body { __u32 max_mdsize; __u32 max_cookiesize; __u32 padding_4; /* also fix lustre_swab_mdt_body */ + __u64 padding_5; + __u64 padding_6; + __u64 padding_7; + __u64 padding_8; + __u64 padding_9; + __u64 padding_10; }; struct mds_body { @@ -1217,13 +1284,26 @@ extern void lustre_swab_mdt_epoch (struct mdt_epoch *b); #define Q_INITQUOTA 0x800101 /* init slave limits */ #define Q_GETOINFO 0x800102 /* get obd quota info */ #define Q_GETOQUOTA 0x800103 /* get obd quotas */ +#define Q_FINVALIDATE 0x800104 /* invalidate operational quotas */ + +#define Q_TYPEMATCH(id, type) \ + ((id) == (type) || (id) == UGQUOTA) -#define Q_TYPESET(oqc, type) \ - ((oqc)->qc_type == type || (oqc)->qc_type == UGQUOTA) +#define Q_TYPESET(oqc, type) Q_TYPEMATCH((oqc)->qc_type, type) #define Q_GETOCMD(oqc) \ ((oqc)->qc_cmd == Q_GETOINFO || (oqc)->qc_cmd == Q_GETOQUOTA) +#define QCTL_COPY(out, in) \ +do { \ + Q_COPY(out, in, qc_cmd); \ + Q_COPY(out, in, qc_type); \ + Q_COPY(out, in, qc_id); \ + Q_COPY(out, in, qc_stat); \ + Q_COPY(out, in, qc_dqinfo); \ + Q_COPY(out, in, qc_dqblk); \ +} while (0) + struct obd_quotactl { __u32 qc_cmd; __u32 qc_type; @@ -1235,6 +1315,34 @@ struct obd_quotactl { extern void lustre_swab_obd_quotactl(struct obd_quotactl *q); +struct quota_adjust_qunit { + __u32 qaq_flags; + __u32 qaq_id; + __u64 qaq_bunit_sz; + __u64 qaq_iunit_sz; + __u64 padding1; +}; +extern void lustre_swab_quota_adjust_qunit(struct quota_adjust_qunit *q); + +/* flags in qunit_data and quota_adjust_qunit will use macroes below */ +#define LQUOTA_FLAGS_GRP 1UL /* 0 is user, 1 is group */ +#define LQUOTA_FLAGS_BLK 2UL /* 0 is inode, 1 is block */ +#define LQUOTA_FLAGS_ADJBLK 4UL /* adjust the block qunit size */ +#define LQUOTA_FLAGS_ADJINO 8UL /* adjust the inode qunit size */ +#define LQUOTA_FLAGS_CHG_QS 16UL /* indicate whether it has capability of + * OBD_CONNECT_CHANGE_QS */ + +/* the status of lqsk_flags in struct lustre_qunit_size_key */ +#define LQUOTA_QUNIT_FLAGS (LQUOTA_FLAGS_GRP | LQUOTA_FLAGS_BLK) + +#define QAQ_IS_GRP(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_GRP) +#define QAQ_IS_ADJBLK(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJBLK) +#define QAQ_IS_ADJINO(qaq) ((qaq)->qaq_flags & LQUOTA_FLAGS_ADJINO) + +#define QAQ_SET_GRP(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_GRP) +#define QAQ_SET_ADJBLK(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJBLK) +#define QAQ_SET_ADJINO(qaq) ((qaq)->qaq_flags |= LQUOTA_FLAGS_ADJINO) + /* inode access permission for remote user, the inode info are omitted, * for client knows them. */ struct mds_remote_perm { @@ -1250,7 +1358,8 @@ enum { CFS_SETUID_PERM = 0x01, CFS_SETGID_PERM = 0x02, CFS_SETGRP_PERM = 0x04, - CFS_RMTACL_PERM = 0x08 + CFS_RMTACL_PERM = 0x08, + CFS_RMTOWN_PERM = 0x10 }; extern void lustre_swab_mds_remote_perm(struct mds_remote_perm *p); @@ -1288,11 +1397,15 @@ extern void lustre_swab_mds_rec_setattr (struct mds_rec_setattr *sa); struct mdt_rec_setattr { __u32 sa_opcode; + __u32 sa_cap; __u32 sa_fsuid; + __u32 sa_fsuid_h; __u32 sa_fsgid; - __u32 sa_cap; + __u32 sa_fsgid_h; __u32 sa_suppgid; + __u32 sa_suppgid_h; __u32 sa_padding_1; + __u32 sa_padding_1_h; struct lu_fid sa_fid; __u64 sa_valid; __u32 sa_uid; @@ -1390,7 +1503,8 @@ enum { MDS_CROSS_REF = 1 << 1, MDS_VTX_BYPASS = 1 << 2, MDS_PERM_BYPASS = 1 << 3, - MDS_SOM = 1 << 4 + MDS_SOM = 1 << 4, + MDS_QUOTA_IGNORE = 1 << 5 }; struct mds_rec_join { @@ -1430,11 +1544,15 @@ extern void lustre_swab_mds_rec_create (struct mds_rec_create *cr); struct mdt_rec_create { __u32 cr_opcode; + __u32 cr_cap; __u32 cr_fsuid; + __u32 cr_fsuid_h; __u32 cr_fsgid; - __u32 cr_cap; + __u32 cr_fsgid_h; __u32 cr_suppgid1; + __u32 cr_suppgid1_h; __u32 cr_suppgid2; + __u32 cr_suppgid2_h; struct lu_fid cr_fid1; struct lu_fid cr_fid2; struct lustre_handle cr_old_handle; /* u64 handle in case of open replay */ @@ -1472,11 +1590,15 @@ extern void lustre_swab_mds_rec_link (struct mds_rec_link *lk); struct mdt_rec_link { __u32 lk_opcode; + __u32 lk_cap; __u32 lk_fsuid; + __u32 lk_fsuid_h; __u32 lk_fsgid; - __u32 lk_cap; + __u32 lk_fsgid_h; __u32 lk_suppgid1; + __u32 lk_suppgid1_h; __u32 lk_suppgid2; + __u32 lk_suppgid2_h; struct lu_fid lk_fid1; struct lu_fid lk_fid2; __u64 lk_time; @@ -1512,11 +1634,15 @@ extern void lustre_swab_mds_rec_unlink (struct mds_rec_unlink *ul); struct mdt_rec_unlink { __u32 ul_opcode; + __u32 ul_cap; __u32 ul_fsuid; + __u32 ul_fsuid_h; __u32 ul_fsgid; - __u32 ul_cap; + __u32 ul_fsgid_h; __u32 ul_suppgid1; + __u32 ul_suppgid1_h; __u32 ul_suppgid2; + __u32 ul_suppgid2_h; struct lu_fid ul_fid1; struct lu_fid ul_fid2; __u64 ul_time; @@ -1552,11 +1678,15 @@ extern void lustre_swab_mds_rec_rename (struct mds_rec_rename *rn); struct mdt_rec_rename { __u32 rn_opcode; + __u32 rn_cap; __u32 rn_fsuid; + __u32 rn_fsuid_h; __u32 rn_fsgid; - __u32 rn_cap; + __u32 rn_fsgid_h; __u32 rn_suppgid1; + __u32 rn_suppgid1_h; __u32 rn_suppgid2; + __u32 rn_suppgid2_h; struct lu_fid rn_fid1; struct lu_fid rn_fid2; __u64 rn_time; @@ -1574,11 +1704,15 @@ struct mdt_rec_rename { struct mdt_rec_setxattr { __u32 sx_opcode; + __u32 sx_cap; __u32 sx_fsuid; + __u32 sx_fsuid_h; __u32 sx_fsgid; - __u32 sx_cap; + __u32 sx_fsgid_h; __u32 sx_suppgid1; + __u32 sx_suppgid1_h; __u32 sx_suppgid2; + __u32 sx_suppgid2_h; struct lu_fid sx_fid; __u64 sx_padding_1; /* These three members are lu_fid size */ __u32 sx_padding_2; @@ -1598,11 +1732,15 @@ struct mdt_rec_setxattr { struct mdt_rec_reint { __u32 rr_opcode; + __u32 rr_cap; __u32 rr_fsuid; + __u32 rr_fsuid_h; __u32 rr_fsgid; - __u32 rr_cap; + __u32 rr_fsgid_h; __u32 rr_suppgid1; + __u32 rr_suppgid1_h; __u32 rr_suppgid2; + __u32 rr_suppgid2_h; struct lu_fid rr_fid1; struct lu_fid rr_fid2; __u64 rr_mtime; @@ -1628,13 +1766,6 @@ struct lmv_desc { extern void lustre_swab_lmv_desc (struct lmv_desc *ld); -struct md_fld { - seqno_t mf_seq; - mdsno_t mf_mds; -}; - -extern void lustre_swab_md_fld (struct md_fld *mf); - enum fld_rpc_opc { FLD_QUERY = 600, FLD_LAST_OPC, @@ -1718,10 +1849,11 @@ typedef enum { LCK_CR = 16, LCK_NL = 32, LCK_GROUP = 64, + LCK_COS = 128, LCK_MAXMODE } ldlm_mode_t; -#define LCK_MODE_NUM 7 +#define LCK_MODE_NUM 8 typedef enum { LDLM_PLAIN = 10, @@ -1882,12 +2014,16 @@ struct cfg_marker { __u32 cm_flags; __u32 cm_vers; /* lustre release version number */ __u32 padding; /* 64 bit align */ - time_t cm_createtime; /*when this record was first created */ - time_t cm_canceltime; /*when this record is no longer valid*/ + __u64 cm_createtime; /*when this record was first created */ + __u64 cm_canceltime; /*when this record is no longer valid*/ char cm_tgtname[MTI_NAME_MAXLEN]; char cm_comment[MTI_NAME_MAXLEN]; }; +extern void lustre_swab_cfg_marker(struct cfg_marker *marker, + int swab, int size); + + /* * Opcodes for multiple servers. */ @@ -1902,14 +2038,14 @@ typedef enum { /* catalog of log objects */ -/* Identifier for a single log object */ +/** Identifier for a single log object */ struct llog_logid { __u64 lgl_oid; __u64 lgl_ogr; __u32 lgl_ogen; } __attribute__((packed)); -/* Records written to the CATALOGS list */ +/** Records written to the CATALOGS list */ #define CATLIST "CATALOGS" struct llog_catid { struct llog_logid lci_logid; @@ -1918,7 +2054,7 @@ struct llog_catid { __u32 lci_padding3; } __attribute__((packed)); -/*join file lov mds md*/ +/** join file lov mds md*/ struct lov_mds_md_join { struct lov_mds_md lmmj_md; /*join private info*/ @@ -1938,10 +2074,13 @@ typedef enum { OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) | REINT_UNLINK, MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) | REINT_SETATTR, + MDS_SETATTR64_REC= LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) | REINT_SETATTR, OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000, PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, /* obsolete */ LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000, LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, + /** changelog record type */ + CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000, LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539, LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b, } llog_op_type; @@ -1955,7 +2094,7 @@ typedef enum { __swab32(LLOG_OP_MAGIC) || \ (((r)->lrh_type == 0) && ((r)->lrh_len > LLOG_CHUNK_SIZE))) -/* Log record header - stored in little endian order. +/** Log record header - stored in little endian order. * Each record must start with this struct, end with a llog_rec_tail, * and be a multiple of 256 bits in size. */ @@ -1982,7 +2121,7 @@ struct llog_logid_rec { struct llog_rec_tail lid_tail; } __attribute__((packed)); -/* MDS extent description +/** MDS extent description * It is for joined file extent info, each extent info for joined file * just like (start, end, lmm). */ @@ -1991,7 +2130,8 @@ struct mds_extent_desc { __u64 med_len; /* extent length */ struct lov_mds_md med_lmm; /* extent's lmm */ }; -/*Joined file array extent log record*/ + +/** Joined file array extent log record*/ struct llog_array_rec { struct llog_rec_hdr lmr_hdr; struct mds_extent_desc lmr_med; @@ -2033,6 +2173,18 @@ struct llog_setattr_rec { struct llog_rec_tail lsr_tail; } __attribute__((packed)); +struct llog_setattr64_rec { + struct llog_rec_hdr lsr_hdr; + obd_id lsr_oid; + obd_count lsr_ogen; + __u32 padding; + __u32 lsr_uid; + __u32 lsr_uid_h; + __u32 lsr_gid; + __u32 lsr_gid_h; + struct llog_rec_tail lsr_tail; +} __attribute__((packed)); + struct llog_size_change_rec { struct llog_rec_hdr lsc_hdr; struct ll_fid lsc_fid; @@ -2041,6 +2193,62 @@ struct llog_size_change_rec { struct llog_rec_tail lsc_tail; } __attribute__((packed)); +#define CHANGELOG_MAGIC 0xca103000 +/** Changelog record types + * When adding record types, update mdd_lproc.c's changelog_str + */ +enum changelog_rec_type { + CL_MARK = 0, + CL_CREATE = 1, /* namespace */ + CL_MKDIR = 2, /* namespace */ + CL_HARDLINK = 3, /* namespace */ + CL_SOFTLINK = 4, /* namespace */ + CL_MKNOD = 5, /* namespace */ + CL_UNLINK = 6, /* namespace */ + CL_RMDIR = 7, /* namespace */ + CL_RENAME = 8, /* namespace */ + CL_EXT = 9, /* namespace extended record (2nd half of rename) */ + CL_OPEN = 10, /* not currently used */ + CL_CLOSE = 11, /* may be written to log only with mtime change */ + CL_IOCTL = 12, + CL_TRUNC = 13, + CL_SETATTR = 14, + CL_XATTR = 15, + CL_LAST +}; + +/** \a changelog_rec_type's that can't be masked */ +#define CL_MINMASK (1 << CL_MARK) +/** bits covering all \a changelog_rec_type's */ +#define CL_ALLMASK 0XFFFF +/** default \a changelog_rec_type mask */ +#define CL_DEFMASK CL_ALLMASK + +/* per-record flags */ +#define CLF_VERSION 0x1000 +#define CLF_FLAGMASK 0x0FFF +#define CLF_HSM 0x0001 + +/** changelog record */ +struct llog_changelog_rec { + struct llog_rec_hdr cr_hdr; + __u16 cr_flags; /**< (flags&CLF_FLAGMASK)|CLF_VERSION */ + __u16 cr_namelen; + __u32 cr_type; /**< \a changelog_rec_type */ + __u64 cr_index; + __u64 cr_prev; /**< last index for this target fid */ + __u64 cr_time; + union { + struct lu_fid cr_tfid; /**< target fid */ + __u32 cr_markerflags; /**< CL_MARK flags */ + }; + struct lu_fid cr_pfid; /**< parent fid */ + union { + char cr_name[0]; /**< last element */ + struct llog_rec_tail cr_tail; /**< for_sizezof_only */ + }; +} __attribute__((packed)); + struct llog_gen { __u64 mnt_cnt; __u64 conn_cnt; @@ -2082,7 +2290,7 @@ struct llog_log_hdr { llh->llh_bitmap_offset - \ sizeof(llh->llh_tail)) * 8) -/* log cookies are used to reference a specific log file and a record therein */ +/** log cookies are used to reference a specific log file and a record therein */ struct llog_cookie { struct llog_logid lgc_lgl; __u32 lgc_subsys; @@ -2090,7 +2298,7 @@ struct llog_cookie { __u32 lgc_padding; } __attribute__((packed)); -/* llog protocol */ +/** llog protocol */ enum llogd_rpc_ops { LLOG_ORIGIN_HANDLE_CREATE = 501, LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502, @@ -2144,8 +2352,69 @@ struct lov_user_md_join { /* LOV EA user data (host-endian) */ struct lov_user_ost_data_join lmm_objects[0]; /* per-stripe data */ } __attribute__((packed)); -extern void lustre_swab_lov_user_md(struct lov_user_md *lum); -extern void lustre_swab_lov_user_md_objects(struct lov_user_md *lum); +/* Note: 64-bit types are 64-bit aligned in structure */ +struct obdo { + obd_valid o_valid; /* hot fields in this obdo */ + obd_id o_id; + obd_gr o_gr; + obd_id o_fid; + obd_size o_size; /* o_size-o_blocks == ost_lvb */ + obd_time o_mtime; + obd_time o_atime; + obd_time o_ctime; + obd_blocks o_blocks; /* brw: cli sent cached bytes */ + obd_size o_grant; + + /* 32-bit fields start here: keep an even number of them via padding */ + obd_blksize o_blksize; /* optimal IO blocksize */ + obd_mode o_mode; /* brw: cli sent cache remain */ + obd_uid o_uid; + obd_gid o_gid; + obd_flag o_flags; + obd_count o_nlink; /* brw: checksum */ + obd_count o_generation; + obd_count o_misc; /* brw: o_dropped */ + __u32 o_easize; /* epoch in ost writes */ + __u32 o_mds; + __u32 o_stripe_idx; /* holds stripe idx */ + __u32 o_padding_1; + struct lustre_handle o_handle; /* brw: lock handle to prolong locks */ + struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS */ + + __u64 o_padding_2; + __u64 o_padding_3; + __u64 o_padding_4; + __u64 o_padding_5; + __u64 o_padding_6; +}; + +#define o_dirty o_blocks +#define o_undirty o_mode +#define o_dropped o_misc +#define o_cksum o_nlink + +extern void lustre_swab_obdo (struct obdo *o); + +/* request structure for OST's */ +struct ost_body { + struct obdo oa; +}; + +/* Key for FIEMAP to be used in get_info calls */ +struct ll_fiemap_info_key { + char name[8]; + struct obdo oa; + struct ll_user_fiemap fiemap; +}; + +extern void lustre_swab_ost_body (struct ost_body *b); +extern void lustre_swab_ost_last_id(obd_id *id); +extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap); + +extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum); +extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum); +extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod, + int stripe_count); extern void lustre_swab_lov_user_md_join(struct lov_user_md_join *lumj); /* llog_swab.c */ @@ -2158,36 +2427,69 @@ extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec, struct lustre_cfg; extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg); -/* quota. fixed by tianzy for bug10707 */ -#define QUOTA_IS_GRP 0X1UL /* 0 is user, 1 is group. Used by qd_flags*/ -#define QUOTA_IS_BLOCK 0x2UL /* 0 is inode, 1 is block. Used by qd_flags*/ - +/* this will be used when OBD_CONNECT_CHANGE_QS is set */ struct qunit_data { - __u32 qd_id; /* ID appiles to (uid, gid) */ - __u32 qd_flags; /* Quota type (USRQUOTA, GRPQUOTA) occupy one bit; - * Block quota or file quota occupy one bit */ - __u64 qd_count; /* acquire/release count (bytes for block quota) */ -}; - -struct qunit_data_old { - __u32 qd_id; /* ID appiles to (uid, gid) */ - __u32 qd_type; /* Quota type (USRQUOTA, GRPQUOTA) */ - __u32 qd_count; /* acquire/release count (bytes for block quota) */ - __u32 qd_isblk; /* Block quota or file quota */ -}; + /** + * ID appiles to (uid, gid) + */ + __u32 qd_id; + /** + * LQUOTA_FLAGS_* affect the responding bits + */ + __u32 qd_flags; + /** + * acquire/release count (bytes for block quota) + */ + __u64 qd_count; + /** + * when a master returns the reply to a slave, it will + * contain the current corresponding qunit size + */ + __u64 qd_qunit; + __u64 padding; +}; + +#define QDATA_IS_GRP(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_GRP) +#define QDATA_IS_BLK(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_BLK) +#define QDATA_IS_ADJBLK(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_ADJBLK) +#define QDATA_IS_ADJINO(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_ADJINO) +#define QDATA_IS_CHANGE_QS(qdata) ((qdata)->qd_flags & LQUOTA_FLAGS_CHG_QS) + +#define QDATA_SET_GRP(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_GRP) +#define QDATA_SET_BLK(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_BLK) +#define QDATA_SET_ADJBLK(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_ADJBLK) +#define QDATA_SET_ADJINO(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_ADJINO) +#define QDATA_SET_CHANGE_QS(qdata) ((qdata)->qd_flags |= LQUOTA_FLAGS_CHG_QS) + +#define QDATA_CLR_GRP(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_GRP) +#define QDATA_CLR_CHANGE_QS(qdata) ((qdata)->qd_flags &= ~LQUOTA_FLAGS_CHG_QS) extern void lustre_swab_qdata(struct qunit_data *d); -extern void lustre_swab_qdata_old(struct qunit_data_old *d); -extern struct qunit_data *lustre_quota_old_to_new(struct qunit_data_old *d); -extern struct qunit_data_old *lustre_quota_new_to_old(struct qunit_data *d); +extern int quota_get_qdata(void*req, struct qunit_data *qdata, + int is_req, int is_exp); +extern int quota_copy_qdata(void *request, struct qunit_data *qdata, + int is_req, int is_exp); typedef enum { - QUOTA_DQACQ = 601, - QUOTA_DQREL = 602, + QUOTA_DQACQ = 901, + QUOTA_DQREL = 902, + QUOTA_LAST_OPC } quota_cmd_t; +#define QUOTA_FIRST_OPC QUOTA_DQACQ #define JOIN_FILE_ALIGN 4096 +#define QUOTA_REQUEST 1 +#define QUOTA_REPLY 0 +#define QUOTA_EXPORT 1 +#define QUOTA_IMPORT 0 + +/* quota check function */ +#define QUOTA_RET_OK 0 /**< return successfully */ +#define QUOTA_RET_NOQUOTA 1 /**< not support quota */ +#define QUOTA_RET_NOLIMIT 2 /**< quota limit isn't set */ +#define QUOTA_RET_ACQUOTA 4 /**< need to acquire extra quota */ + /* security opcodes */ typedef enum { SEC_CTX_INIT = 801, @@ -2206,32 +2508,32 @@ typedef enum { /* NB take care when changing the sequence of elements this struct, * because the offset info is used in find_capa() */ struct lustre_capa { - struct lu_fid lc_fid; /* fid */ - __u64 lc_opc; /* operations allowed */ - __u32 lc_uid; /* uid, it is obsolete, but maybe used in - * future, reserve it for 64-bits aligned.*/ - __u32 lc_flags; /* HMAC algorithm & flags */ - __u32 lc_keyid; /* key used for the capability */ - __u32 lc_timeout; /* capa timeout value (sec) */ - __u64 lc_expiry; /* expiry time (sec) */ - __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /* HMAC */ + struct lu_fid lc_fid; /** fid */ + __u64 lc_opc; /** operations allowed */ + __u64 lc_uid; /** file owner */ + __u64 lc_gid; /** file group */ + __u32 lc_flags; /** HMAC algorithm & flags */ + __u32 lc_keyid; /** key# used for the capability */ + __u32 lc_timeout; /** capa timeout value (sec) */ + __u32 lc_expiry; /** expiry time (sec) */ + __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */ } __attribute__((packed)); extern void lustre_swab_lustre_capa(struct lustre_capa *c); -/* lustre_capa.lc_opc */ +/** lustre_capa::lc_opc */ enum { - CAPA_OPC_BODY_WRITE = 1<<0, /* write object data */ - CAPA_OPC_BODY_READ = 1<<1, /* read object data */ - CAPA_OPC_INDEX_LOOKUP = 1<<2, /* lookup object fid */ - CAPA_OPC_INDEX_INSERT = 1<<3, /* insert object fid */ - CAPA_OPC_INDEX_DELETE = 1<<4, /* delete object fid */ - CAPA_OPC_OSS_WRITE = 1<<5, /* write oss object data */ - CAPA_OPC_OSS_READ = 1<<6, /* read oss object data */ - CAPA_OPC_OSS_TRUNC = 1<<7, /* truncate oss object */ - CAPA_OPC_META_WRITE = 1<<8, /* write object meta data */ - CAPA_OPC_META_READ = 1<<9, /* read object meta data */ - + CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */ + CAPA_OPC_BODY_READ = 1<<1, /**< read object data */ + CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */ + CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */ + CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */ + CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */ + CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */ + CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */ + CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */ + CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */ + CAPA_OPC_META_READ = 1<<10, /**< read object meta data */ }; #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE) @@ -2239,7 +2541,8 @@ enum { (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \ CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE) #define CAPA_OPC_OSS_ONLY \ - (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC) + (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \ + CAPA_OPC_OSS_DESTROY) #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY) @@ -2258,9 +2561,9 @@ static inline int capa_for_oss(struct lustre_capa *c) return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0; } -/* lustre_capa.lc_hmac_alg */ +/* lustre_capa::lc_hmac_alg */ enum { - CAPA_HMAC_ALG_SHA1 = 1, /* sha1 algorithm */ + CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */ CAPA_HMAC_ALG_MAX, }; @@ -2268,17 +2571,35 @@ enum { #define CAPA_HMAC_ALG_MASK 0xff000000 struct lustre_capa_key { - __u64 lk_mdsid; /* mds# */ - __u32 lk_keyid; /* key# */ + __u64 lk_mdsid; /**< mds# */ + __u32 lk_keyid; /**< key# */ __u32 lk_padding; - __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /* key */ + __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */ } __attribute__((packed)); extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k); -/* quota check function */ -#define QUOTA_RET_OK 0 /* return successfully */ -#define QUOTA_RET_NOQUOTA 1 /* not support quota */ -#define QUOTA_RET_NOLIMIT 2 /* quota limit isn't set */ -#define QUOTA_RET_ACQUOTA 3 /* need to acquire extra quota */ +/** The link ea holds 1 \a link_ea_entry for each hardlink */ +#define LINK_EA_MAGIC 0x01EA0000 +struct link_ea_header { + __u32 leh_magic; + __u32 leh_reccount; + __u64 leh_len; /* total size */ + /* future use */ + __u32 padding1; + __u32 padding2; +}; + +/** Hardlink data is name and parent fid. + * Stored in this crazy struct for maximum packing and endian-neutrality + */ +struct link_ea_entry { + /** __u16 stored big-endian, unaligned */ + char lee_reclen[2]; + struct lu_fid_pack lee_parent_fid; /**< variable length */ + /** logically after lee_parent_fid; don't use directly */ + char lee_name[0]; +}; + #endif +/** @} lustreidl */