#define LU_SEQ_RANGE_MASK 0x3
-/** \defgroup lu_fid lu_fid
- * @{ */
-
extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
const struct lu_fid *fid,
enum {
/** LASTID file has zero OID */
LUSTRE_FID_LASTID_OID = 0UL,
- /** initial fid id value */
- LUSTRE_FID_INIT_OID = 1UL
+ /** initial fid id value */
+ LUSTRE_FID_INIT_OID = 1UL,
};
/**
/* sequence for local pre-defined FIDs listed in local_oid */
FID_SEQ_LOCAL_FILE = 0x200000001ULL,
FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
- /* sequence is used for local named objects FIDs generated
- * by local_object_storage library */
+ /* named FIDs generated by local_object_storage library */
FID_SEQ_LOCAL_NAME = 0x200000003ULL,
- /* Because current FLD will only cache the fid sequence, instead
- * of oid on the client side, if the FID needs to be exposed to
- * clients sides, it needs to make sure all of fids under one
- * sequence will be located in one MDT. */
+ /* current FLD will only cache the fid seq, instead of oid on the client
+ * side, if the FID needs to be exposed to clients, it needs to make
+ * sure all of fids under one seq will be located in one MDT.
+ */
FID_SEQ_SPECIAL = 0x200000004ULL,
FID_SEQ_QUOTA = 0x200000005ULL,
FID_SEQ_QUOTA_GLB = 0x200000006ULL,
FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL,
/* sequence is used for update logs of cross-MDT operation */
FID_SEQ_UPDATE_LOG = 0x200000009ULL,
- /* Sequence is used for the directory under which update logs
- * are created. */
+ /* DNE recovery logs in update_logs_dir */
FID_SEQ_UPDATE_LOG_DIR = 0x20000000aULL,
FID_SEQ_NORMAL = 0x200000400ULL,
FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
/** OID for FID_SEQ_SPECIAL */
enum special_oid {
- /* Big Filesystem Lock to serialize rename operations */
- FID_OID_SPECIAL_BFL = 1UL,
+ /* Big Filesystem Lock to serialize rename operations */
+ FID_OID_SPECIAL_BFL = 1UL,
};
/** OID for FID_SEQ_DOT_LUSTRE */
/** @} lu_fid */
/** \defgroup lu_dir lu_dir
- * @{ */
+ * @{
+ */
/**
* Enumeration of possible directory entry attributes.
LUDA_TYPE = 0x0002,
LUDA_64BITHASH = 0x0004,
- /* The following attrs are used for MDT internal only,
- * not visible to client */
+ /* for MDT internal use only, not visible to client */
/* Something in the record is unknown, to be verified in further. */
LUDA_UNKNOWN = 0x0400,
* Layout of readdir pages, as transmitted on wire.
*/
struct lu_dirent {
- /** valid if LUDA_FID is set. */
- struct lu_fid lde_fid;
- /** a unique entry identifier: a hash or an offset. */
- __u64 lde_hash;
- /** total record length, including all attributes. */
- __u16 lde_reclen;
- /** name length */
- __u16 lde_namelen;
- /** optional variable size attributes following this entry.
- * taken from enum lu_dirent_attrs.
- */
- __u32 lde_attrs;
- /** name is followed by the attributes indicated in ->ldp_attrs, in
- * their natural order. After the last attribute, padding bytes are
- * added to make ->lde_reclen a multiple of 8.
- */
- char lde_name[0];
+ /** valid if LUDA_FID is set. */
+ struct lu_fid lde_fid;
+ /** a unique entry identifier: a hash or an offset. */
+ __u64 lde_hash;
+ /** total record length, including all attributes. */
+ __u16 lde_reclen;
+ /** name length */
+ __u16 lde_namelen;
+ /** optional variable size attributes following this entry.
+ * taken from enum lu_dirent_attrs.
+ */
+ __u32 lde_attrs;
+ /** name is followed by the attributes indicated in ->ldp_attrs, in
+ * their natural order. After the last attribute, padding bytes are
+ * added to make ->lde_reclen a multiple of 8.
+ */
+ char lde_name[0];
};
/*
*
* Individual attributes do not have their length encoded in a generic way. It
* is assumed that consumer of an attribute knows its format. This means that
- * it is impossible to skip over an unknown attribute, except by skipping over all
- * remaining attributes (by using ->lde_reclen), which is not too
+ * it is impossible to skip over an unknown attribute, except by skipping over
+ * all remaining attributes (by using ->lde_reclen), which is not too
* constraining, because new server versions will append new attributes at
* the end of an entry.
*/
* Aligned to 2 bytes.
*/
struct luda_type {
- __u16 lt_type;
+ __u16 lt_type;
};
struct lu_dirpage {
- __u64 ldp_hash_start;
- __u64 ldp_hash_end;
- __u32 ldp_flags;
- __u32 ldp_pad0;
- struct lu_dirent ldp_entries[0];
+ __u64 ldp_hash_start;
+ __u64 ldp_hash_end;
+ __u32 ldp_flags;
+ __u32 ldp_pad0;
+ struct lu_dirent ldp_entries[0];
};
enum lu_dirpage_flags {
- /**
- * dirpage contains no entry.
- */
- LDF_EMPTY = 1 << 0,
- /**
- * last entry's lde_hash equals ldp_hash_end.
- */
- LDF_COLLIDE = 1 << 1
+ LDF_EMPTY = 1 << 0, /* dirpage contains no entry. */
+ LDF_COLLIDE = 1 << 1, /* last entry lde_hash == ldp_hash_end */
};
static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
/** @} lu_dir */
struct lustre_handle {
- __u64 cookie;
+ __u64 cookie;
};
#define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
/* flags for lm_flags */
enum lustre_msghdr {
- MSGHDR_AT_SUPPORT = 0x1, /* adaptive timeouts, lm_cksum valid
- * in early reply messages */
- MSGHDR_CKSUM_INCOMPAT18 = 0x2, /* compat for 1.8, needs to be set well
- * beyond 2.8.0 for compatibility */
+ MSGHDR_AT_SUPPORT = 0x1, /* lm_cksum valid early reply msg */
+ MSGHDR_CKSUM_INCOMPAT18 = 0x2, /* compat for 1.8, need beyond 2.8.0 */
};
#define lustre_msg lustre_msg_v2
__u32 lm_flags; /* enum lustre_msghdr MSGHDR_* flags */
__u32 lm_opc; /* SUB request opcode in a batch request */
__u32 lm_padding_3; /* unused */
- __u32 lm_buflens[0]; /* length of additional buffers in bytes,
- * padded to a multiple of 8 bytes. */
- /*
+ /* length of additional buffers in bytes
* message buffers are packed after padded lm_buflens[] array,
* padded to a multiple of 8 bytes each to align contents.
*/
+ __u32 lm_buflens[0];
};
/* The returned result of the SUB request in a batch request */
#define ptlrpc_body ptlrpc_body_v3
struct ptlrpc_body_v2 {
- struct lustre_handle pb_handle;
- __u32 pb_type;
- __u32 pb_version;
- __u32 pb_opc;
- __u32 pb_status;
+ struct lustre_handle pb_handle;
+ __u32 pb_type;
+ __u32 pb_version;
+ __u32 pb_opc;
+ __u32 pb_status;
__u64 pb_last_xid; /* highest replied XID without lower unreplied XID */
__u16 pb_tag; /* virtual slot idx for multiple modifying RPCs */
__u16 pb_padding0;
__u32 pb_padding1;
- __u64 pb_last_committed;
- __u64 pb_transno;
- __u32 pb_flags;
- __u32 pb_op_flags;
- __u32 pb_conn_cnt;
- __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
- __u32 pb_service_time; /* for rep, actual service time, also used for
- net_latency of req */
- __u32 pb_limit;
- __u64 pb_slv;
- /* VBR: pre-versions */
- __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_last_committed;
+ __u64 pb_transno;
+ __u32 pb_flags;
+ __u32 pb_op_flags;
+ __u32 pb_conn_cnt;
+ __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
+ __u32 pb_service_time; /* rep: actual service time, req: net_latency */
+ __u32 pb_limit;
+ __u64 pb_slv;
+ /* VBR: pre-versions */
+ __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
__u64 pb_mbits; /**< unused in V2 */
- /* padding for future needs */
+ /* padding for future needs */
__u64 pb_padding64_0;
__u64 pb_padding64_1;
__u32 pb_uid; /* req: process uid, use by tbf rules */
#define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recov support */
#define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* ptlrpc_body jobid */
#define OBD_CONNECT_UMASK 0x40000000000ULL /* create client umask */
-#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client -EINPROGRESS
- * RPC error handling */
-#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL /* extra grant params for
- * space reservation */
+/* client -EINPROGRESS RPC error handling */
+#define OBD_CONNECT_EINPROGRESS 0x80000000000ULL
+/* extra grant params for space reservation */
+#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL
#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* unused since 2.0 */
#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable LVB type */
#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosec timestamp */
#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL /* flk deadlock detect */
#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL /* create stripe disp */
#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by FID won't pack
- * name in request */
+ * name in request
+ */
#define OBD_CONNECT_LFSCK 0x40000000000000ULL /* allow online LFSCK */
#define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL /* unlink closes file */
#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* allow multiple change
- * RPCs in parallel */
+ * RPCs in parallel
+ */
#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */
#define OBD_CONNECT_SUBTREE 0x800000000000000ULL /* fileset mount */
/* was OBD_CONNECT_LOCKAHEAD_OLD 0x1000000000000000ULL old lockahead 2.12-2.13*/
#define OBD_CONNECT2_LOCK_CONTENTION 0x2000000ULL /* contention detect */
#define OBD_CONNECT2_ATOMIC_OPEN_LOCK 0x4000000ULL /* lock on first open */
#define OBD_CONNECT2_ENCRYPT_NAME 0x8000000ULL /* name encrypt */
-#define OBD_CONNECT2_DMV_IMP_INHERIT 0x20000000ULL /* client handle DMV inheritance */
+/* client handle DMV inheritance */
+#define OBD_CONNECT2_DMV_IMP_INHERIT 0x20000000ULL
#define OBD_CONNECT2_ENCRYPT_FID2PATH 0x40000000ULL /* fid2path enc file */
/* For MDS+OSS rolling upgrade interop with 2.16+older, ignored after 2.20.53 */
#define OBD_CONNECT2_REPLAY_CREATE 0x80000000ULL /* replay OST_CREATE */
-#define OBD_CONNECT2_LARGE_NID 0x100000000ULL /* understands large/IPv6 NIDs */
+/* understands large/IPv6 NIDs */
+#define OBD_CONNECT2_LARGE_NID 0x100000000ULL
#define OBD_CONNECT2_COMPRESS 0x200000000ULL /* compressed file */
/* only ZFS servers require a change to support unaligned DIO, so this flag is
- * ignored for ldiskfs servers */
+ * ignored for ldiskfs servers
+ */
#define OBD_CONNECT2_UNALIGNED_DIO 0x400000000ULL /* unaligned DIO */
/* XXX README XXX README XXX README XXX README XXX README XXX README XXX
* Please DO NOT add OBD_CONNECT flags before first ensuring that this value
/* This structure is used for both request and reply.
*
* If we eventually have separate connect data for different types, which we
- * almost certainly will, then perhaps we stick a union in here. */
+ * almost certainly will, then perhaps we stick a union in here.
+ */
struct obd_connect_data {
__u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
__u32 ocd_version; /* lustre release version number */
/* Fields after ocd_maxbytes are only accessible by the receiver
* if the corresponding flag in ocd_connect_flags is set. Accessing
* any field after ocd_maxbytes on the receiver without a valid flag
- * may result in out-of-bound memory access and kernel oops. */
+ * may result in out-of-bound memory access and kernel oops.
+ */
__u16 ocd_maxmodrpcs; /* Maximum modify RPCs in parallel */
__u16 padding0; /* READ BELOW! also fix lustre_swab_connect */
__u32 padding1; /* READ BELOW! also fix lustre_swab_connect */
/* opcodes */
enum ost_cmd {
- OST_REPLY = 0, /* reply ? */
- OST_GETATTR = 1,
- OST_SETATTR = 2,
- OST_READ = 3,
- OST_WRITE = 4,
- OST_CREATE = 5,
- OST_DESTROY = 6,
- OST_GET_INFO = 7,
- OST_CONNECT = 8,
- OST_DISCONNECT = 9,
- OST_PUNCH = 10,
- OST_OPEN = 11,
- OST_CLOSE = 12,
- OST_STATFS = 13,
- OST_SYNC = 16,
- OST_SET_INFO = 17,
+ OST_REPLY = 0, /* reply ? */
+ OST_GETATTR = 1,
+ OST_SETATTR = 2,
+ OST_READ = 3,
+ OST_WRITE = 4,
+ OST_CREATE = 5,
+ OST_DESTROY = 6,
+ OST_GET_INFO = 7,
+ OST_CONNECT = 8,
+ OST_DISCONNECT = 9,
+ OST_PUNCH = 10,
+ OST_OPEN = 11,
+ OST_CLOSE = 12,
+ OST_STATFS = 13,
+ OST_SYNC = 16,
+ OST_SET_INFO = 17,
OST_QUOTACHECK = 18, /* not used since 2.4 */
OST_QUOTACTL = 19,
OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
#define OST_FIRST_OPC OST_REPLY
enum obdo_flags {
- OBD_FL_INLINEDATA = 0x00000001,
- OBD_FL_OBDMDEXISTS = 0x00000002,
- OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
- OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
- OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
- OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
- OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
+ OBD_FL_INLINEDATA = 0x00000001,
+ OBD_FL_OBDMDEXISTS = 0x00000002,
+ OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
+ OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
+ OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
+ OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
+ OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
OBD_FL_NO_PRJQUOTA = 0x00000080, /* the object's project is over
- * quota */
- OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
- OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
- OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
- OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
+ * quota
+ */
+ OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
+ OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
+ OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
+ OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
- * XXX: obsoleted - reserved for old
- * clients prior than 2.2 */
- OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
- OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
+ * XXX: obsoleted - reserved for old
+ * clients prior than 2.2
+ */
+ OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
+ OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
OBD_FL_SHORT_IO = 0x00400000, /* short io request */
OBD_FL_ROOT_SQUASH = 0x00800000, /* root squash */
#define MAX_MD_SIZE (sizeof(struct lov_comp_md_v1) + \
4 * (sizeof(struct lov_comp_md_entry_v1) + \
MAX_MD_SIZE_OLD))
-#define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
+#define MIN_MD_SIZE (sizeof(struct lov_mds_md) + 1 * \
+ sizeof(struct lov_ost_data))
-/* This is the default MDT reply size allocated, should the striping be bigger,
- * it will be reallocated in mdt_fix_reply.
- * 100 stripes is a bit less than 2.5k of data */
+/* default MDT reply size allocated, should the striping be bigger, it will be
+ * reallocated in mdt_fix_reply. 100 stipes ~= 2.5KB of data.
+ */
#define DEF_REP_MD_SIZE (sizeof(struct lov_mds_md) + \
100 * sizeof(struct lov_ost_data))
#define OBD_MD_FLGROUP (0x01000000ULL) /* group */
#define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
/* OBD_MD_FLEPOCH (0x04000000ULL) obsolete 2.7.50 */
- /* ->mds if epoch opens or closes */
#define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
#define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
#define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
/* OBD_MD_FLCKSPLIT (0x0000080000000000ULL) obsolete 2.3.58*/
#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
- * under lock; for xattr
- * requests means the
- * client holds the lock */
+ * under lock; for xattr
+ * requests means the
+ * client holds the lock
+ */
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
-#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* close intent
- executed */
+#define OBD_MD_CLOSE_INTENT_EXECED (0x0020000000000000ULL) /* intent executed */
#define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
#define OBD_MD_FLOSTLAYOUT (0x0080000000000000ULL) /* contain ost_layout */
#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
/* don't forget obdo_fid which is way down at the bottom so it can
- * come after the definition of llog_cookie */
+ * come after the definition of llog_cookie
+ */
enum hss_valid {
HSS_SETMASK = 0x01,
#define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
#define OBD_BRW_NDELAY 0x04 /* Non-delay RPC should be issued for
* this page. Non-delay RPCs have bit
- * rq_no_delay set. */
-#define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
- * transfer and is not accounted in
- * the grant. */
+ * rq_no_delay set.
+ */
+#define OBD_BRW_SYNC 0x08 /* part of sync transfer, not in grant */
#define OBD_BRW_CHECK 0x10
#define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
#define OBD_BRW_GRANTED 0x40 /* the ost manages this */
#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
-#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
- * that the client is running low on
- * space for unstable pages; asking
- * it to sync quickly */
+#define OBD_BRW_SOFT_SYNC 0x4000 /* flag notifies server that client is
+ * running low on space for unstable
+ * pages; asking it to sync quickly
+ */
#define OBD_BRW_OVER_PRJQUOTA 0x8000 /* Running out of project quota */
#define OBD_BRW_ROOT_PRJQUOTA 0x10000 /* check project quota for root */
#define OBD_BRW_RDMA_ONLY 0x20000 /* RPC contains RDMA-only pages*/
struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
__u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
* now (PTLRPC_BULK_OPS_COUNT - 1) in
- * high 16 bits in 2.4 and later */
+ * high 16 bits in 2.4 and later
+ */
__u32 ioo_bufcnt; /* number of niobufs for this object */
};
/* NOTE: IOOBJ_MAX_BRW_BITS defines the _offset_ of the max_brw field in
* ioo_max_brw, NOT the maximum number of bits in PTLRPC_BULK_OPS_BITS.
- * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits. */
+ * That said, ioo_max_brw is a 32-bit field so the limit is also 16 bits.
+ */
#define IOOBJ_MAX_BRW_BITS 16
#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
#define ioobj_max_brw_set(ioo, num) \
/* lock value block communicated between the filter and llite */
/* OST_LVB_ERR_INIT is needed because the return code in rc is
- * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
+ * negative, i.e. because ((MASK + rc) & MASK) != MASK.
+ */
#define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
#define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
-#define OST_LVB_IS_ERR(blocks) \
- ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
-#define OST_LVB_SET_ERR(blocks, rc) \
- do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
+#define OST_LVB_IS_ERR(blocks) \
+ ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
+#define OST_LVB_SET_ERR(blocks, rc) blocks = OST_LVB_ERR_INIT + rc
#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
struct ost_lvb_v1 {
* can be used with quota, this includes:
* - 64-bit user ID
* - 64-bit group ID
- * - a FID which can be used for per-directory quota in the future */
+ * - a FID which can be used for per-directory quota in the future
+ */
union lquota_id {
struct lu_fid qid_fid; /* FID for per-directory quota */
__u64 qid_uid; /* user identifier */
} while (0)
/* Body of quota request used for quota acquire/release RPCs between quota
- * master (aka QMT) and slaves (ak QSD). */
+ * master (aka QMT) and slaves (ak QSD).
+ */
struct quota_body {
struct lu_fid qb_fid; /* FID of global index packing the pool ID
- * and type (data or metadata) as well as
- * the quota type (user or group). */
+ * and type (data or metadata) as well as
+ * the quota type (user or group).
+ */
union lquota_id qb_id; /* uid or gid or directory FID */
__u32 qb_flags; /* see below */
__u32 qb_padding;
};
/* When the quota_body is used in the reply of quota global intent
- * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
+ * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID.
+ */
#define qb_slv_fid qb_fid
/* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
- * quota reply */
+ * quota reply
+ */
#define qb_qunit qb_usage
#define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
/* There are 2 different resource types on which a quota limit can be enforced:
* - inodes on the MDTs
- * - blocks on the OSTs */
+ * - blocks on the OSTs
+ */
enum {
LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
LQUOTA_RES_DT = 0x02,
__u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
__u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
__u64 qbr_time; /* grace time, in seconds */
- __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
- * kbytes */
+ __u64 qbr_granted; /* amount granted to slaves (inodes or KiB) */
};
/*
*/
struct lquota_slv_rec { /* 8 bytes */
__u64 qsr_granted; /* space granted to the slave for the key=ID,
- * in #inodes or kbytes */
+ * in #inodes or kbytes
+ */
};
/* Data structures associated with the quota locks */
__u64 gl_pad2;
};
#define gl_qunit gl_hardlimit /* current qunit value used when
- * glimpsing per-ID quota locks */
+ * glimpsing per-ID quota locks
+ */
/* quota glimpse flags */
#define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
enum mds_ibits_locks {
MDS_INODELOCK_LOOKUP = 0x000001, /* For namespace, dentry etc. Was
* used to protect permission (mode,
- * owner, group, etc) before 2.4. */
+ * owner, group, etc) before 2.4.
+ */
MDS_INODELOCK_UPDATE = 0x000002, /* size, links, timestamps */
MDS_INODELOCK_OPEN = 0x000004, /* For opened files */
MDS_INODELOCK_LAYOUT = 0x000008, /* for layout */
/* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
* but was moved into name[1] along with the OID to avoid consuming the
- * name[2,3] fields that need to be used for the quota id (also a FID). */
+ * name[2,3] fields that need to be used for the quota id (also a FID).
+ */
enum {
- LUSTRE_RES_ID_SEQ_OFF = 0,
- LUSTRE_RES_ID_VER_OID_OFF = 1,
- LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
+ LUSTRE_RES_ID_SEQ_OFF = 0,
+ LUSTRE_RES_ID_VER_OID_OFF = 1,
+ LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
- LUSTRE_RES_ID_HSH_OFF = 3
+ LUSTRE_RES_ID_HSH_OFF = 3
};
#define MDS_STATUS_CONN 1
enum {
/* these should be identical to their EXT4_*_FL counterparts, they are
- * redefined here only to avoid dragging in fs/ext4/ext4.h */
+ * redefined here only to avoid dragging in fs/ext4/ext4.h
+ */
LUSTRE_SYNC_FL = 0x00000008, /* Synchronous updates */
LUSTRE_IMMUTABLE_FL = 0x00000010, /* Immutable file */
LUSTRE_APPEND_FL = 0x00000020, /* file writes may only append */
* 1. these conflict flags needs to be removed when the flag is
* wired by la_flags see osd_attr_get().
* 2. If these flags needs to be stored into inode, they will be
- * stored in LMA. see LMAI_XXXX */
+ * stored in LMA. see LMAI_XXXX
+ */
LUSTRE_ORPHAN_FL = 0x00002000,
LUSTRE_SET_SYNC_FL = 0x00040000, /* Synchronous setattr on OSTs */
LUSTRE_ENCRYPT_FL = 0x00800000, /* encrypted file */
__u64 mbo_version; /* was mbo_ioepoch before 2.11 */
__u64 mbo_t_state; /* transient file state defined in
* enum md_transient_state
- * was "ino" until 2.4.0 */
+ * was "ino" until 2.4.0
+ */
__u32 mbo_fsuid;
__u32 mbo_fsgid;
__u32 mbo_capability;
/* permissions for md_perm.mp_perm */
enum {
- CFS_SETUID_PERM = 0x01,
- CFS_SETGID_PERM = 0x02,
- CFS_SETGRP_PERM = 0x04,
+ CFS_SETUID_PERM = 0x01,
+ CFS_SETGID_PERM = 0x02,
+ CFS_SETGRP_PERM = 0x04,
};
struct mdt_rec_setattr {
- __u32 sa_opcode;
- __u32 sa_cap;
- __u32 sa_fsuid;
- __u32 sa_fsuid_h;
- __u32 sa_fsgid;
- __u32 sa_fsgid_h;
- __u32 sa_suppgid;
- __u32 sa_suppgid_h;
- __u32 sa_padding_1;
- __u32 sa_padding_1_h;
- struct lu_fid sa_fid;
- __u64 sa_valid;
- __u32 sa_uid;
- __u32 sa_gid;
- __u64 sa_size;
- __u64 sa_blocks;
+ __u32 sa_opcode;
+ __u32 sa_cap;
+ __u32 sa_fsuid;
+ __u32 sa_fsuid_h;
+ __u32 sa_fsgid;
+ __u32 sa_fsgid_h;
+ __u32 sa_suppgid;
+ __u32 sa_suppgid_h;
+ __u32 sa_padding_1;
+ __u32 sa_padding_1_h;
+ struct lu_fid sa_fid;
+ __u64 sa_valid;
+ __u32 sa_uid;
+ __u32 sa_gid;
+ __u64 sa_size;
+ __u64 sa_blocks;
__s64 sa_mtime;
__s64 sa_atime;
__s64 sa_ctime;
- __u32 sa_attr_flags;
- __u32 sa_mode;
+ __u32 sa_attr_flags;
+ __u32 sa_mode;
__u32 sa_bias; /* some operation flags */
__u32 sa_projid;
- __u32 sa_padding_4;
- __u32 sa_padding_5;
+ __u32 sa_padding_4;
+ __u32 sa_padding_5;
};
/*
__u32 cr_mode;
__u32 cr_bias;
/* use of helpers set/get_mrc_cr_flags() is needed to access
- * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
- * extend cr_flags size without breaking 1.8 compat */
+ * 64 bits cr_flags [cr_flags_l, cr_flags_h]
+ */
__u32 cr_flags_l; /* for use with open, low 32 bits */
__u32 cr_flags_h; /* for use with open, high 32 bits */
__u32 cr_umask; /* umask for create */
/* instance of mdt_reint_rec */
struct mdt_rec_link {
- __u32 lk_opcode;
- __u32 lk_cap;
- __u32 lk_fsuid;
- __u32 lk_fsuid_h;
- __u32 lk_fsgid;
- __u32 lk_fsgid_h;
- __u32 lk_suppgid1;
- __u32 lk_suppgid1_h;
- __u32 lk_suppgid2;
- __u32 lk_suppgid2_h;
- struct lu_fid lk_fid1;
- struct lu_fid lk_fid2;
+ __u32 lk_opcode;
+ __u32 lk_cap;
+ __u32 lk_fsuid;
+ __u32 lk_fsuid_h;
+ __u32 lk_fsgid;
+ __u32 lk_fsgid_h;
+ __u32 lk_suppgid1;
+ __u32 lk_suppgid1_h;
+ __u32 lk_suppgid2;
+ __u32 lk_suppgid2_h;
+ struct lu_fid lk_fid1;
+ struct lu_fid lk_fid2;
__s64 lk_time;
- __u64 lk_padding_1; /* rr_atime */
- __u64 lk_padding_2; /* rr_ctime */
- __u64 lk_padding_3; /* rr_size */
- __u64 lk_padding_4; /* rr_blocks */
- __u32 lk_bias;
- __u32 lk_padding_5; /* rr_mode */
- __u32 lk_padding_6; /* rr_flags */
- __u32 lk_padding_7; /* rr_padding_2 */
- __u32 lk_padding_8; /* rr_padding_3 */
- __u32 lk_padding_9; /* rr_padding_4 */
+ __u64 lk_padding_1; /* rr_atime */
+ __u64 lk_padding_2; /* rr_ctime */
+ __u64 lk_padding_3; /* rr_size */
+ __u64 lk_padding_4; /* rr_blocks */
+ __u32 lk_bias;
+ __u32 lk_padding_5; /* rr_mode */
+ __u32 lk_padding_6; /* rr_flags */
+ __u32 lk_padding_7; /* rr_padding_2 */
+ __u32 lk_padding_8; /* rr_padding_3 */
+ __u32 lk_padding_9; /* rr_padding_4 */
};
/* instance of mdt_reint_rec */
struct mdt_rec_unlink {
- __u32 ul_opcode;
- __u32 ul_cap;
- __u32 ul_fsuid;
- __u32 ul_fsuid_h;
- __u32 ul_fsgid;
- __u32 ul_fsgid_h;
- __u32 ul_suppgid1;
- __u32 ul_suppgid1_h;
- __u32 ul_suppgid2;
- __u32 ul_suppgid2_h;
- struct lu_fid ul_fid1;
- struct lu_fid ul_fid2;
+ __u32 ul_opcode;
+ __u32 ul_cap;
+ __u32 ul_fsuid;
+ __u32 ul_fsuid_h;
+ __u32 ul_fsgid;
+ __u32 ul_fsgid_h;
+ __u32 ul_suppgid1;
+ __u32 ul_suppgid1_h;
+ __u32 ul_suppgid2;
+ __u32 ul_suppgid2_h;
+ struct lu_fid ul_fid1;
+ struct lu_fid ul_fid2;
__s64 ul_time;
- __u64 ul_padding_2; /* rr_atime */
- __u64 ul_padding_3; /* rr_ctime */
- __u64 ul_padding_4; /* rr_size */
- __u64 ul_padding_5; /* rr_blocks */
- __u32 ul_bias;
- __u32 ul_mode;
- __u32 ul_padding_6; /* rr_flags */
- __u32 ul_padding_7; /* rr_padding_2 */
- __u32 ul_padding_8; /* rr_padding_3 */
- __u32 ul_padding_9; /* rr_padding_4 */
+ __u64 ul_padding_2; /* rr_atime */
+ __u64 ul_padding_3; /* rr_ctime */
+ __u64 ul_padding_4; /* rr_size */
+ __u64 ul_padding_5; /* rr_blocks */
+ __u32 ul_bias;
+ __u32 ul_mode;
+ __u32 ul_padding_6; /* rr_flags */
+ __u32 ul_padding_7; /* rr_padding_2 */
+ __u32 ul_padding_8; /* rr_padding_3 */
+ __u32 ul_padding_9; /* rr_padding_4 */
};
/* instance of mdt_reint_rec */
struct mdt_rec_rename {
- __u32 rn_opcode;
- __u32 rn_cap;
- __u32 rn_fsuid;
- __u32 rn_fsuid_h;
- __u32 rn_fsgid;
- __u32 rn_fsgid_h;
- __u32 rn_suppgid1;
- __u32 rn_suppgid1_h;
- __u32 rn_suppgid2;
- __u32 rn_suppgid2_h;
- struct lu_fid rn_fid1;
- struct lu_fid rn_fid2;
+ __u32 rn_opcode;
+ __u32 rn_cap;
+ __u32 rn_fsuid;
+ __u32 rn_fsuid_h;
+ __u32 rn_fsgid;
+ __u32 rn_fsgid_h;
+ __u32 rn_suppgid1;
+ __u32 rn_suppgid1_h;
+ __u32 rn_suppgid2;
+ __u32 rn_suppgid2_h;
+ struct lu_fid rn_fid1;
+ struct lu_fid rn_fid2;
__s64 rn_time;
- __u64 rn_padding_1; /* rr_atime */
- __u64 rn_padding_2; /* rr_ctime */
- __u64 rn_padding_3; /* rr_size */
- __u64 rn_padding_4; /* rr_blocks */
- __u32 rn_bias; /* some operation flags */
- __u32 rn_mode; /* cross-ref rename has mode */
- __u32 rn_padding_5; /* rr_flags */
- __u32 rn_padding_6; /* rr_padding_2 */
- __u32 rn_padding_7; /* rr_padding_3 */
- __u32 rn_padding_8; /* rr_padding_4 */
+ __u64 rn_padding_1; /* rr_atime */
+ __u64 rn_padding_2; /* rr_ctime */
+ __u64 rn_padding_3; /* rr_size */
+ __u64 rn_padding_4; /* rr_blocks */
+ __u32 rn_bias; /* some operation flags */
+ __u32 rn_mode; /* cross-ref rename has mode */
+ __u32 rn_padding_5; /* rr_flags */
+ __u32 rn_padding_6; /* rr_padding_2 */
+ __u32 rn_padding_7; /* rr_padding_3 */
+ __u32 rn_padding_8; /* rr_padding_4 */
};
/* instance of mdt_reint_rec */
struct mdt_rec_setxattr {
- __u32 sx_opcode;
- __u32 sx_cap;
- __u32 sx_fsuid;
- __u32 sx_fsuid_h;
- __u32 sx_fsgid;
- __u32 sx_fsgid_h;
- __u32 sx_suppgid1;
- __u32 sx_suppgid1_h;
- __u32 sx_suppgid2;
- __u32 sx_suppgid2_h;
- struct lu_fid sx_fid;
- __u64 sx_padding_1; /* These three are rr_fid2 */
- __u32 sx_padding_2;
- __u32 sx_padding_3;
- __u64 sx_valid;
+ __u32 sx_opcode;
+ __u32 sx_cap;
+ __u32 sx_fsuid;
+ __u32 sx_fsuid_h;
+ __u32 sx_fsgid;
+ __u32 sx_fsgid_h;
+ __u32 sx_suppgid1;
+ __u32 sx_suppgid1_h;
+ __u32 sx_suppgid2;
+ __u32 sx_suppgid2_h;
+ struct lu_fid sx_fid;
+ __u64 sx_padding_1; /* These three are rr_fid2 */
+ __u32 sx_padding_2;
+ __u32 sx_padding_3;
+ __u64 sx_valid;
__s64 sx_time;
- __u64 sx_padding_5; /* rr_ctime */
- __u64 sx_padding_6; /* rr_size */
- __u64 sx_padding_7; /* rr_blocks */
- __u32 sx_size;
- __u32 sx_flags;
- __u32 sx_padding_8; /* rr_flags */
- __u32 sx_padding_9; /* rr_padding_2 */
- __u32 sx_padding_10; /* rr_padding_3 */
- __u32 sx_padding_11; /* rr_padding_4 */
-};
-
-/* instance of mdt_reint_rec
- * FLR: for file resync MDS_REINT_RESYNC RPC. */
+ __u64 sx_padding_5; /* rr_ctime */
+ __u64 sx_padding_6; /* rr_size */
+ __u64 sx_padding_7; /* rr_blocks */
+ __u32 sx_size;
+ __u32 sx_flags;
+ __u32 sx_padding_8; /* rr_flags */
+ __u32 sx_padding_9; /* rr_padding_2 */
+ __u32 sx_padding_10; /* rr_padding_3 */
+ __u32 sx_padding_11; /* rr_padding_4 */
+};
+
+/* instance of mdt_reint_rec. FLR: for file resync MDS_REINT_RESYNC RPC. */
struct mdt_rec_resync {
__u32 rs_opcode;
__u32 rs_cap;
__u32 lmv_stripe_count;
__u32 lmv_master_mdt_index; /* On master object, it is master
* MDT index, on slave object, it
- * is stripe index of the slave obj */
+ * is stripe index of the slave obj
+ */
__u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
* which hash function to be used,
* Note: only lower 16 bits is being
* used for now. Higher 16 bits will
* be used to mark the object status,
- * for example migrating or dead. */
+ * for example migrating or dead.
+ */
__u32 lmv_layout_version; /* increased each time layout changed,
* by directory migration, restripe
- * and LFSCK. */
+ * and LFSCK.
+ */
__u32 lmv_migrate_offset; /* once this is set, it means this
* directory is been migrated, stripes
* before this offset belong to target,
- * from this to source. */
+ * from this to source.
+ */
__u32 lmv_migrate_hash; /* hash type of source stripes of
- * migrating directory */
+ * migrating directory
+ */
__u32 lmv_padding2;
__u64 lmv_padding3;
char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */
};
enum seq_rpc_opc {
- SEQ_QUERY = 700,
- SEQ_LAST_OPC,
- SEQ_FIRST_OPC = SEQ_QUERY
+ SEQ_QUERY = 700,
+ SEQ_LAST_OPC,
+ SEQ_FIRST_OPC = SEQ_QUERY
};
enum seq_op {
- SEQ_ALLOC_SUPER = 0,
- SEQ_ALLOC_META = 1
+ SEQ_ALLOC_SUPER = 0,
+ SEQ_ALLOC_META = 1
};
enum fld_op {
#define LOV_MAX_UUID_BUFFER_SIZE 8192
/* The size of the buffer the lov/mdc reserves for the
* array of UUIDs returned by the MDS. With the current
- * protocol, this will limit the max number of OSTs per LOV */
+ * protocol, this will limit the max number of OSTs per LOV
+ */
#define LOV_DESC_MAGIC 0xB0CCDE5C
#define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
*/
/* opcodes -- MUST be distinct from OST/MDS opcodes */
enum ldlm_cmd {
- LDLM_ENQUEUE = 101,
- LDLM_CONVERT = 102,
- LDLM_CANCEL = 103,
- LDLM_BL_CALLBACK = 104,
- LDLM_CP_CALLBACK = 105,
- LDLM_GL_CALLBACK = 106,
- LDLM_SET_INFO = 107,
- LDLM_LAST_OPC
+ LDLM_ENQUEUE = 101,
+ LDLM_CONVERT = 102,
+ LDLM_CANCEL = 103,
+ LDLM_BL_CALLBACK = 104,
+ LDLM_CP_CALLBACK = 105,
+ LDLM_GL_CALLBACK = 106,
+ LDLM_SET_INFO = 107,
+ LDLM_LAST_OPC
};
#define LDLM_FIRST_OPC LDLM_ENQUEUE
#define RES_NAME_SIZE 4
struct ldlm_res_id {
- __u64 name[RES_NAME_SIZE];
+ __u64 name[RES_NAME_SIZE];
};
#define DLDLMRES "[%#llx:%#llx:%#llx].%#llx"
#define LDLM_MIN_TYPE LDLM_PLAIN
struct ldlm_extent {
- __u64 start;
- __u64 end;
- __u64 gid;
+ __u64 start;
+ __u64 end;
+ __u64 gid;
};
static inline bool ldlm_extent_equal(const struct ldlm_extent *ex1,
};
struct ldlm_flock_wire {
- __u64 lfw_start;
- __u64 lfw_end;
- __u64 lfw_owner;
- __u32 lfw_padding;
- __u32 lfw_pid;
+ __u64 lfw_start;
+ __u64 lfw_end;
+ __u64 lfw_owner;
+ __u32 lfw_padding;
+ __u32 lfw_pid;
};
/* it's important that the fields of the ldlm_extent structure match
* the first fields of the ldlm_flock structure because there is only
* one ldlm_swab routine to process the ldlm_policy_data_t union. if
* this ever changes we will need to swab the union differently based
- * on the resource type. */
+ * on the resource type.
+ */
union ldlm_wire_policy_data {
struct ldlm_extent l_extent;
};
struct ldlm_lock_desc {
- struct ldlm_resource_desc l_resource;
- enum ldlm_mode l_req_mode;
- enum ldlm_mode l_granted_mode;
- union ldlm_wire_policy_data l_policy_data;
+ struct ldlm_resource_desc l_resource;
+ enum ldlm_mode l_req_mode;
+ enum ldlm_mode l_granted_mode;
+ union ldlm_wire_policy_data l_policy_data;
};
#define LDLM_LOCKREQ_HANDLES 2
#define LDLM_ENQUEUE_CANCEL_OFF 1
struct ldlm_request {
- __u32 lock_flags; /* LDLM_FL_*, see lustre_dlm_flags.h */
- __u32 lock_count; /* number of locks in lock_handle[] */
- struct ldlm_lock_desc lock_desc;/* lock descriptor */
- struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
+ __u32 lock_flags; /* LDLM_FL_*,(lustre_dlm_flags.h) */
+ __u32 lock_count; /* Num of locks in lock_handle[] */
+ struct ldlm_lock_desc lock_desc; /* lock descriptor */
+ struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
};
struct ldlm_reply {
- __u32 lock_flags;
- __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
- struct ldlm_lock_desc lock_desc;
- struct lustre_handle lock_handle;
- __u64 lock_policy_res1;
- __u64 lock_policy_res2;
+ __u32 lock_flags;
+ __u32 lock_padding; /* fix lustre_swab_ldlm_reply */
+ struct ldlm_lock_desc lock_desc;
+ struct lustre_handle lock_handle;
+ __u64 lock_policy_res1;
+ __u64 lock_policy_res2;
};
#define ldlm_flags_to_wire(flags) ((__u32)(flags))
struct mgs_config_res {
__u64 mcr_offset; /* index of last config log */
union {
- __u64 mcr_size; /* size of the log */
- __u64 mcr_nm_cur_pass; /* current nodemap config pass */
+ __u64 mcr_size; /* size of the log */
+ __u64 mcr_nm_cur_pass; /* current nodemap config pass */
};
};
/** Identifier for a single log object */
struct llog_logid {
- struct ost_id lgl_oi;
- __u32 lgl_ogen;
+ struct ost_id lgl_oi;
+ __u32 lgl_ogen;
} __attribute__((packed));
/** Records written to the CATALOGS list */
#define CATLIST "CATALOGS"
struct llog_catid {
- struct llog_logid lci_logid;
- __u32 lci_padding1;
- __u32 lci_padding2;
- __u32 lci_padding3;
+ struct llog_logid lci_logid;
+ __u32 lci_padding1;
+ __u32 lci_padding2;
+ __u32 lci_padding3;
} __attribute__((packed));
/* Log data record types - there is no specific reason that these need to
#define CHANGELOG_CATALOG "changelog_catalog"
struct changelog_setinfo {
- __u64 cs_recno;
- __u32 cs_id;
+ __u64 cs_recno;
+ __u32 cs_id;
} __attribute__((packed));
/** changelog record */
struct llog_changelog_user_rec {
struct llog_rec_hdr cur_hdr;
__u32 cur_id;
- /* only intended to be used in relative time comparisons to
- * detect idle users */
- __u32 cur_time;
+ __u32 cur_time; /* relative time comparisons only */
__u64 cur_endrec;
struct llog_rec_tail cur_tail;
} __attribute__((packed));
struct llog_changelog_user_rec2 {
struct llog_rec_hdr cur_hdr;
__u32 cur_id;
- /* only for use in relative time comparisons to detect idle users */
- __u32 cur_time;
+ __u32 cur_time; /* relative time comparisons only */
__u64 cur_endrec;
__u32 cur_mask;
__u32 cur_padding1;
struct llog_rec_hdr arr_hdr; /**< record header */
__u32 arr_status; /**< status of the request */
/* must match enum
- * agent_req_status */
+ * agent_req_status
+ */
__u32 arr_archive_id; /**< backend archive number */
__u64 arr_flags; /**< req flags */
- __u64 arr_compound_id; /**< compound cookie, ignored */
+ __u64 arr_compound_id; /** < compound cookie,
+ * ignored
+ */
__u64 arr_req_create; /**< req. creation time */
__u64 arr_req_change; /**< req. status change time */
struct hsm_action_item arr_hai; /**< req. to the agent */
/* Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
* catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
* because the catlog record is usually fixed size, but its plain
- * log record can be variable */
+ * log record can be variable
+ */
LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID | LLOG_F_EXT_EXTRA_FLAGS |
LLOG_F_EXT_X_UIDGID | LLOG_F_EXT_X_NID |
LLOG_F_EXT_X_OMODE | LLOG_F_EXT_X_XATTR,
/* On-disk header structure of each log object, stored in little endian order */
#define LLOG_MIN_CHUNK_SIZE 8192
#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) + sizeof(llh_tail)
- * - sizeof(llh_bitmap) */
+ * - sizeof(llh_bitmap)
+ */
#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
__u32 llh_flags;
/* for a catalog the first/oldest and still in-use plain slot is just
* next to it. It will serve as the upper limit after Catalog has
- * wrapped around */
+ * wrapped around
+ */
__u32 llh_cat_idx;
struct obd_uuid llh_tgtuuid;
__u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32)-23];
* llh_tail should only be refered by LLOG_HDR_TAIL().
* But this structure is also used by client/server llog interface
* (see llog_client.c), it will be kept in its original way to avoid
- * compatiblity issue. */
+ * compatiblity issue.
+ */
__u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
struct llog_rec_tail llh_tail;
} __attribute__((packed));
llh->llh_hdr.lrh_len - \
sizeof(llh->llh_tail)))
-/** log cookies are used to reference a specific log file and a record therein,
- and pass record offset from llog_process_thread to llog_write */
+/* log cookies are used to reference a specific log file and a record therein,
+ * and pass record offset from llog_process_thread to llog_write
+ */
struct llog_cookie {
union {
struct llog_logid lgc_lgl;
__u64 lgc_offset;
};
- __u32 lgc_subsys;
- __u32 lgc_index;
- __u32 lgc_padding;
+ __u32 lgc_subsys;
+ __u32 lgc_index;
+ __u32 lgc_padding;
} __attribute__((packed));
/** llog protocol */
};
struct llogd_body {
- struct llog_logid lgd_logid;
- __u32 lgd_ctxt_idx;
- __u32 lgd_llh_flags;
- __u32 lgd_index;
- __u32 lgd_saved_index;
- __u32 lgd_len;
- __u64 lgd_cur_offset;
+ struct llog_logid lgd_logid;
+ __u32 lgd_ctxt_idx;
+ __u32 lgd_llh_flags;
+ __u32 lgd_index;
+ __u32 lgd_saved_index;
+ __u32 lgd_len;
+ __u64 lgd_cur_offset;
} __attribute__((packed));
struct llogd_conn_body {
- struct llog_gen lgdc_gen;
- struct llog_logid lgdc_logid;
- __u32 lgdc_ctxt_idx;
+ struct llog_gen lgdc_gen;
+ struct llog_logid lgdc_logid;
+ __u32 lgdc_ctxt_idx;
} __attribute__((packed));
/* Note: 64-bit types are 64-bit aligned in structure */
__u64 o_ioepoch; /* epoch in ost writes */
__u32 o_stripe_idx; /* holds stripe idx */
__u32 o_parent_ver;
- struct lustre_handle o_handle; /* brw: lock handle to prolong
- * locks */
+ struct lustre_handle o_handle; /* brw: prolong locks on IO */
/* Originally, the field is llog_cookie for destroy with unlink cookie
* from MDS, it is obsolete in 2.8. Then reuse it by client to transfer
* layout and PFL information in IO, setattr RPCs. Since llog_cookie is
* not used on wire any longer, remove it from the obdo, then it can be
* enlarged freely in the further without affect related RPCs.
*
- * sizeof(ost_layout) + sieof(__u32) == sizeof(llog_cookie). */
+ * sizeof(ost_layout) + sieof(__u32) == sizeof(llog_cookie).
+ */
struct ost_layout o_layout;
__u32 o_layout_version;
__u32 o_uid_h;
__u64 o_data_version; /* getattr: sum of iversion for
* each stripe.
* brw: grant space consumed on
- * the client for the write */
+ * the client for the write
+ */
__u32 o_projid;
- __u32 o_padding_4; /* also fix
- * lustre_swab_obdo() */
+ __u32 o_padding_4; /* fix lustre_swab_obdo() */
__u64 o_padding_5;
__u64 o_padding_6;
};
LE_PHASE1_DONE = 3,
LE_PHASE2_DONE = 4,
LE_START = 5,
- LE_STOP = 6,
+ LE_STOP = 6,
LE_QUERY = 7,
/* LE_FID_ACCESSED = 8, moved to lfsck_events_local */
LE_PEER_EXIT = 9,
LE_CONDITIONAL_DESTROY = 10,
- LE_PAIRS_VERIFY = 11,
+ LE_PAIRS_VERIFY = 11,
LE_SET_LMV_MASTER = 15,
LE_SET_LMV_SLAVE = 16,
};
#define IDX_INFO_MAGIC 0x3D37CC37
+/* List of flags used in idx_info::ii_flags */
+enum idx_info_flags {
+ II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
+ II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
+ II_FL_VARREC = 1 << 2, /* records can be of variable size */
+ II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
+ II_FL_NOKEY = 1 << 4, /* client doesn't care about key */
+};
+
/* Index file transfer through the network. The server serializes the index into
- * a byte stream which is sent to the client via a bulk transfer */
+ * a byte stream which is sent to the client via a bulk transfer
+ */
struct idx_info {
__u32 ii_magic;
/* reply: see idx_info_flags below */
- __u32 ii_flags;
+ __u32 ii_flags; /* II_FK_* flags */
/* request & reply: number of lu_idxpage (to be) transferred */
__u16 ii_count;
/* reply: version of the index file before starting to walk the index.
* Please note that the version can be modified at any time during the
- * transfer */
+ * transfer
+ */
__u64 ii_version;
/* request: hash to start with:
* reply: hash of the first entry of the first lu_idxpage and hash
- * of the entry to read next if any */
+ * of the entry to read next if any
+ */
__u64 ii_hash_start;
__u64 ii_hash_end;
- /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
- * set */
+ /* reply: keys size in lu_idxpages, minimal one if II_FL_VARKEY set */
__u16 ii_keysize;
- /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
- * is set */
+ /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC */
__u16 ii_recsize;
__u32 ii_pad1;
#define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
-/* List of flags used in idx_info::ii_flags */
-enum idx_info_flags {
- II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
- II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
- II_FL_VARREC = 1 << 2, /* records can be of variable size */
- II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
- II_FL_NOKEY = 1 << 4, /* client doesn't care about key */
-};
-
#define LIP_MAGIC 0x8A6D6B6C
/* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
* - the key size (II_FL_VARKEY is set)
* - the record size (II_FL_VARREC is set)
*
- * For the time being, we only support fixed-size key & record. */
+ * For the time being, we only support fixed-size key & record.
+ */
char lip_entries[0];
};
/* security opcodes */
enum sec_cmd {
- SEC_CTX_INIT = 801,
- SEC_CTX_INIT_CONT = 802,
- SEC_CTX_FINI = 803,
- SEC_LAST_OPC,
- SEC_FIRST_OPC = SEC_CTX_INIT
+ SEC_CTX_INIT = 801,
+ SEC_CTX_INIT_CONT = 802,
+ SEC_CTX_FINI = 803,
+ SEC_LAST_OPC,
+ SEC_FIRST_OPC = SEC_CTX_INIT,
};
/** The link ea holds 1 \a link_ea_entry for each hardlink */
* Stored in this crazy struct for maximum packing and endian-neutrality
*/
struct link_ea_entry {
- /** __u16 stored big-endian, unaligned */
- unsigned char lee_reclen[2];
- unsigned char lee_parent_fid[sizeof(struct lu_fid)];
- char lee_name[0];
+ unsigned char lee_reclen[2]; /* __u16 big-endian, unaligned */
+ unsigned char lee_parent_fid[sizeof(struct lu_fid)];
+ char lee_name[0];
} __attribute__((packed));
/** fid2path request/reply structure */
__u64 ur_batchid;
__u32 ur_flags;
/* If the operation includes multiple updates, then ur_index
- * means the index of the update inside the whole updates. */
+ * means the index of the update inside the whole updates.
+ */
__u32 ur_index;
__u32 ur_update_count;
__u32 ur_param_count;
struct llog_rec_hdr lur_hdr;
struct update_records lur_update_rec;
/* Note ur_update_rec has a variable size, so comment out
- * the following ur_tail, in case someone use it directly
- *
- * struct llog_rec_tail lur_tail;
- */
+ * the following ur_tail, in case someone use it directly
+ *
+ * struct llog_rec_tail lur_tail;
+ */
};
/* sepol string format is:
* kept so other modules (mgs, mdt, etc) can define the type
* of search easily
*/
-
enum nodemap_id_type {
NODEMAP_UID,
NODEMAP_GID,
/* This is the lu_ladvise struct which goes out on the wire.
* Corresponds to the userspace arg llapi_lu_ladvise.
- * value[1-4] are unspecified fields, used differently by different advices */
+ * value[1-4] are unspecified fields, used differently by different advices
+ */
struct lu_ladvise {
__u16 lla_advice; /* advice type */
__u16 lla_value1; /* values for different advice types */
/* This is the ladvise_hdr which goes on the wire, corresponds to the userspace
* arg llapi_ladvise_hdr.
- * value[1-3] are unspecified fields, used differently by different advices */
+ * value[1-3] are unspecified fields, used differently by different advices
+ */
struct ladvise_hdr {
__u32 lah_magic; /* LADVISE_MAGIC */
__u32 lah_count; /* number of advices */