#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
-#include <libcfs/kp30.h>
-
-#if defined(__linux__)
-#include <linux/lustre_types.h>
-#elif defined(__APPLE__)
-#include <darwin/lustre_types.h>
-#elif defined(__WINNT__)
-#include <winnt/lustre_types.h>
-#else
-#error Unsupported operating system.
-#endif
+#include <libcfs/libcfs.h>
+
+#include <lustre/types.h>
/* Defn's shared with user-space. */
#include <lustre/lustre_user.h>
struct lu_fid {
__u64 f_seq; /* holds fid sequence. Lustre should support 2 ^ 64
* objects, thus even if one sequence has one object we
- * reach this value. */
+ * will never reach this value. */
__u32 f_oid; /* fid number within its sequence. */
__u32 f_ver; /* holds fid version. */
};
* fid constants
*/
enum {
- LUSTRE_ROOT_FID_SEQ = 1ULL, /* XXX: should go into mkfs. */
-
/* initial fid id value */
LUSTRE_FID_INIT_OID = 1UL
};
return fid->f_ver;
}
-static inline int fid_seq_is_sane(__u64 seq)
-{
- return seq != 0;
-}
-
static inline void fid_zero(struct lu_fid *fid)
{
memset(fid, 0, sizeof(*fid));
}
+/* Normal FID sequence starts from this value, i.e. 1<<33 */
+#define FID_SEQ_START 0x200000000ULL
+
+/* IDIF sequence starts from this value, i.e. 1<<32 */
+#define IDIF_SEQ_START 0x100000000ULL
+
+/**
+ * Check if a fid is igif or not.
+ * \param fid the fid to be tested.
+ * \return true if the fid is a igif; otherwise false.
+ */
static inline int fid_is_igif(const struct lu_fid *fid)
{
- return fid_seq(fid) == LUSTRE_ROOT_FID_SEQ;
+ return fid_seq(fid) > 0 && fid_seq(fid) < IDIF_SEQ_START;
+}
+
+/**
+ * Check if a fid is idif or not.
+ * \param fid the fid to be tested.
+ * \return true if the fid is a idif; otherwise false.
+ */
+static inline int fid_is_idif(const struct lu_fid *fid)
+{
+ return fid_seq(fid) >= IDIF_SEQ_START && fid_seq(fid) < FID_SEQ_START;
+}
+
+/**
+ * Get inode number from a igif.
+ * \param fid a igif to get inode number from.
+ * \return inode number for the igif.
+ */
+static inline ino_t lu_igif_ino(const struct lu_fid *fid)
+{
+ return fid_seq(fid);
+}
+
+/**
+ * Get inode generation from a igif.
+ * \param fid a igif to get inode generation from.
+ * \return inode generation for the igif.
+ */
+static inline __u32 lu_igif_gen(const struct lu_fid *fid)
+{
+ return fid_oid(fid);
}
#define DFID "[0x%16.16"LPF64"x/0x%8.8x:0x%8.8x]"
{
return
fid != NULL &&
- ((fid_seq_is_sane(fid_seq(fid)) && fid_oid(fid) != 0
+ ((fid_seq(fid) >= FID_SEQ_START && fid_oid(fid) != 0
&& fid_ver(fid) == 0) ||
fid_is_igif(fid));
}
*/
struct lu_dirent {
struct lu_fid lde_fid;
- __u32 lde_hash;
+ __u64 lde_hash;
__u16 lde_reclen;
__u16 lde_namelen;
+ __u32 lde_pad0;
char lde_name[0];
};
struct lu_dirpage {
- __u32 ldp_hash_start;
- __u32 ldp_hash_end;
- __u16 ldp_flags;
+ __u64 ldp_hash_start;
+ __u64 ldp_hash_end;
+ __u32 ldp_flags;
__u32 ldp_pad0;
struct lu_dirent ldp_entries[0];
};
static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
{
- if (le16_to_cpu(dp->ldp_flags) & LDF_EMPTY)
+ if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
return NULL;
else
return dp->ldp_entries;
{
if (le16_to_cpu(ent->lde_reclen) == 0) {
return (sizeof(*ent) +
- le16_to_cpu(ent->lde_namelen) + 3) & ~3;
+ le16_to_cpu(ent->lde_namelen) + 7) & ~7;
}
return le16_to_cpu(ent->lde_reclen);
}
-#define DIR_END_OFF 0xfffffffeUL
+#define DIR_END_OFF 0xfffffffffffffffeULL
struct lustre_handle {
__u64 cookie;
tgt->cookie = src->cookie;
}
-/* we depend on this structure to be 8-byte aligned */
-/* this type is only endian-adjusted in lustre_unpack_msg() */
-struct lustre_msg_v1 {
- struct lustre_handle lm_handle;
- __u32 lm_magic;
- __u32 lm_type;
- __u32 lm_version;
- __u32 lm_opc;
- __u64 lm_last_xid;
- __u64 lm_last_committed;
- __u64 lm_transno;
- __u32 lm_status;
- __u32 lm_flags;
- __u32 lm_conn_cnt;
- __u32 lm_bufcount;
- __u32 lm_buflens[0];
-};
+/* flags for lm_flags */
+#define MSGHDR_AT_SUPPORT 0x1
#define lustre_msg lustre_msg_v2
/* we depend on this structure to be 8-byte aligned */
__u32 lm_secflvr;
__u32 lm_magic;
__u32 lm_repsize;
- __u32 lm_timeout;
- __u32 lm_padding_1;
+ __u32 lm_cksum;
+ __u32 lm_flags;
__u32 lm_padding_2;
__u32 lm_padding_3;
__u32 lm_buflens[0];
__u32 pb_flags;
__u32 pb_op_flags;
__u32 pb_conn_cnt;
- __u32 pb_padding_1;
- __u32 pb_padding_2;
+ __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
+ __u32 pb_service_time; /* for rep, actual service time */
__u32 pb_limit;
__u64 pb_slv;
};
#define MSG_OP_FLAG_SHIFT 16
/* Flags that apply to all requests are in the bottom 16 bits */
-#define MSG_GEN_FLAG_MASK 0x0000ffff
-#define MSG_LAST_REPLAY 1
-#define MSG_RESENT 2
-#define MSG_REPLAY 4
-#define MSG_REQ_REPLAY_DONE 8
-#define MSG_LOCK_REPLAY_DONE 16
+#define MSG_GEN_FLAG_MASK 0x0000ffff
+#define MSG_LAST_REPLAY 0x0001
+#define MSG_RESENT 0x0002
+#define MSG_REPLAY 0x0004
+/* #define MSG_AT_SUPPORT 0x0008
+ * This was used in early prototypes of adaptive timeouts, and while there
+ * shouldn't be any users of that code there also isn't a need for using this
+ * bits. Defer usage until at least 1.10 to avoid potential conflict. */
+#define MSG_REQ_REPLAY_DONE 0x0010
+#define MSG_LOCK_REPLAY_DONE 0x0020
/*
* Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA | \
OBD_CONNECT_MDS_MDS | OBD_CONNECT_CANCELSET | \
OBD_CONNECT_FID | \
- LRU_RESIZE_CONNECT_FLAG)
+ LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_AT)
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
OBD_CONNECT_BRW_SIZE | OBD_CONNECT_QUOTA64 | \
OBD_CONNECT_OSS_CAPA | OBD_CONNECT_CANCELSET | \
- OBD_CONNECT_FID | \
- LRU_RESIZE_CONNECT_FLAG)
+ OBD_CONNECT_CKSUM | LRU_RESIZE_CONNECT_FLAG | \
+ OBD_CONNECT_AT)
#define ECHO_CONNECT_SUPPORTED (0)
-#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_FID)
+#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT)
#define MAX_QUOTA_COUNT32 (0xffffffffULL)
__u32 ocd_nllg; /* non-local-lustre-group */
__u64 ocd_transno; /* first transno from client to be replayed */
__u32 ocd_group; /* MDS group on OST */
- __u32 padding1; /* also fix lustre_swab_connect */
+ __u32 ocd_cksum_types; /* supported checksum algorithms */
+ __u64 padding1; /* also fix lustre_swab_connect */
__u64 padding2; /* also fix lustre_swab_connect */
- __u64 padding3; /* also fix lustre_swab_connect */
};
extern void lustre_swab_connect(struct obd_connect_data *ocd);
/*
+ * Supported checksum algorithms. Up to 32 checksum types are supported.
+ * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
+ * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
+ * algorithm and also the OBD_FL_CKSUM* flags.
+ */
+typedef enum {
+ OBD_CKSUM_CRC32 = 0x00000001,
+ OBD_CKSUM_ADLER = 0x00000002,
+} cksum_type_t;
+
+/*
* OST requests: OBDO & OBD request records
*/
#define OBD_FL_TRUNCLOCK (0x00000800)
/*
+ * Checksum types
+ */
+#define OBD_FL_CKSUM_CRC32 (0x00001000)
+#define OBD_FL_CKSUM_ADLER (0x00002000)
+#define OBD_FL_CKSUM_ALL (OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER)
+
+/*
* This should not be smaller than sizeof(struct lustre_handle) + sizeof(struct
* llog_cookie) + sizeof(struct ll_fid). Nevertheless struct ll_fid is not
* longer stored in o_inline, we keep this just for case.
};
extern void lustre_swab_obd_statfs (struct obd_statfs *os);
+#define OBD_STATFS_NODELAY 0x0001 /* requests should be send without delay
+ * and resends for avoid deadlocks */
/* ost_body.data values for OST_BRW */
__u32 eadatasize;
__u32 aclsize;
__u32 max_mdsize;
- __u32 max_cookiesize; /* also fix lustre_swab_mds_body */
+ __u32 max_cookiesize;
__u32 padding_4; /* also fix lustre_swab_mds_body */
};
MDS_CHECK_SPLIT = 1 << 0,
MDS_CROSS_REF = 1 << 1,
MDS_VTX_BYPASS = 1 << 2,
- MDS_PERM_BYPASS = 1 << 3
+ MDS_PERM_BYPASS = 1 << 3,
+ MDS_SOM = 1 << 4
};
struct mds_rec_join {
__u32 sx_padding_2;
__u32 sx_padding_3;
__u64 sx_valid;
- __u64 sx_padding_4;
+ __u64 sx_time;
__u64 sx_padding_5;
__u64 sx_padding_6;
__u64 sx_padding_7;
LCK_MAXMODE
} ldlm_mode_t;
+#define LCK_MODE_NUM 7
+
typedef enum {
LDLM_PLAIN = 10,
LDLM_EXTENT = 11,
__u64 gid;
};
+static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
+ struct ldlm_extent *ex2)
+{
+ return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
+}
+
struct ldlm_inodebits {
__u64 bits;
};