#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
-#if !defined(LPU64)
#include <libcfs/libcfs.h> /* for LPUX64, etc */
-#endif
-
-/* Defn's shared with user-space. */
-#include <lustre/lustre_user.h>
-
+#include <lnet/types.h>
+#include <lustre/lustre_user.h> /* Defn's shared with user-space. */
#include <lustre/lustre_errno.h>
#include <lustre_ver.h>
#define LUSTRE_LOG_VERSION 0x00050000
#define LUSTRE_MGS_VERSION 0x00060000
-typedef __u32 mdsno_t;
-typedef __u64 seqno_t;
typedef __u64 obd_id;
typedef __u64 obd_seq;
typedef __s64 obd_time;
* been in production for years. This can handle create rates
* of 1M objects/s/OST for 9 years, or combinations thereof. */
if (oid >= IDIF_MAX_OID) {
- CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
- POSTID(ostid), ost_idx);
- return -EBADF;
+ CERROR("bad MDT0 id(1), "DOSTID" ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
+ return -EBADF;
}
fid->f_seq = fid_idif_seq(oid, ost_idx);
/* truncate to 32 bits by assignment */
* OST objects into the FID namespace. In both cases, we just
* pass the FID through, no conversion needed. */
if (ostid->oi_fid.f_ver != 0) {
- CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
+ CERROR("bad MDT0 id(2), "DOSTID" ost_idx:%u\n",
POSTID(ostid), ost_idx);
return -EBADF;
}
static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
struct ost_id *dst_oi)
{
- if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
+ if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
} else {
static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
struct ost_id *dst_oi)
{
- if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
+ if (fid_seq_is_mdt0(src_oi->oi.oi_seq)) {
dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
} else {
return next;
}
-static inline int lu_dirent_calc_size(int namelen, __u16 attr)
+static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
{
- int size;
+ size_t size;
- if (attr & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
+ if (attr & LUDA_TYPE) {
+ const size_t align = sizeof(struct luda_type) - 1;
size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
size += sizeof(struct luda_type);
} else
return (size + 7) & ~7;
}
-static inline int lu_dirent_size(const struct lu_dirent *ent)
-{
- if (le16_to_cpu(ent->lde_reclen) == 0) {
- return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
- le32_to_cpu(ent->lde_attrs));
- }
- return le16_to_cpu(ent->lde_reclen);
-}
-
#define MDS_DIR_END_OFF 0xfffffffffffffffeULL
/**
/* without gss, ptlrpc_body is put at the first buffer. */
#define PTLRPC_NUM_VERSIONS 4
-#define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
struct ptlrpc_body_v3 {
struct lustre_handle pb_handle;
__u32 pb_type;
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
/* padding for future needs */
__u64 pb_padding[4];
- char pb_jobid[JOBSTATS_JOBID_SIZE];
+ char pb_jobid[LUSTRE_JOBID_SIZE];
};
#define ptlrpc_body ptlrpc_body_v3
#define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
+/* reserved for specifying OSTs */
+#define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
#define LOV_MAGIC LOV_MAGIC_V1
/*
#define XATTR_USER_PREFIX "user."
#define XATTR_TRUSTED_PREFIX "trusted."
#define XATTR_SECURITY_PREFIX "security."
-#define XATTR_LUSTRE_PREFIX "lustre."
#define XATTR_NAME_LOV "trusted.lov"
#define XATTR_NAME_LMA "trusted.lma"
#define XATTR_NAME_VERSION "trusted.version"
#define XATTR_NAME_SOM "trusted.som"
#define XATTR_NAME_HSM "trusted.hsm"
-#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
+#define XATTR_NAME_LFSCK_BITMAP "trusted.lfsck_bitmap"
+
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0)
+# define XATTR_NAME_LFSCK_NAMESPACE_OLD "trusted.lfsck_namespace"
+#endif
+
+#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_ns"
#define XATTR_NAME_MAX_LEN 32 /* increase this, if there is longer name. */
struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
/* lmm_stripe_count used to be __u32 */
__u16 lmm_stripe_count; /* num stripes in use for this object */
__u16 lmm_layout_gen; /* layout generation number */
- char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
+ char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};
__u32 lmv_padding1;
__u64 lmv_padding2;
__u64 lmv_padding3;
- char lmv_pool_name[LOV_MAXPOOLNAME]; /* pool name */
+ char lmv_pool_name[LOV_MAXPOOLNAME + 1]; /* pool name */
struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
};
#define LMV_HASH_FLAG_MIGRATION 0x80000000
#define LMV_HASH_FLAG_DEAD 0x40000000
+#define LMV_HASH_FLAG_BAD_TYPE 0x20000000
+
+/* The striped directory has ever lost its master LMV EA, then LFSCK
+ * re-generated it. This flag is used to indicate such case. It is an
+ * on-disk flag. */
+#define LMV_HASH_FLAG_LOST_LMV 0x10000000
/**
* The FNV-1a hash algorithm is as follows:
__u64 gid;
};
+#define LDLM_GID_ANY ((__u64) -1)
+
static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
const struct ldlm_extent *ex2)
{
/** changelog record */
struct llog_changelog_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_rec cr;
- struct llog_rec_tail cr_tail; /**< for_sizezof_only */
-} __attribute__((packed));
-
-struct llog_changelog_ext_rec {
- struct llog_rec_hdr cr_hdr;
- struct changelog_ext_rec cr;
- struct llog_rec_tail cr_tail; /**< for_sizezof_only */
+ struct llog_rec_hdr cr_hdr;
+ struct changelog_rec cr; /**< Variable length field */
+ struct llog_rec_tail cr_do_not_use; /**< for_sizeof_only */
} __attribute__((packed));
#define CHANGELOG_USER_PREFIX "cl"
LLOG_F_ZAP_WHEN_EMPTY = 0x1,
LLOG_F_IS_CAT = 0x2,
LLOG_F_IS_PLAIN = 0x4,
+ LLOG_F_EXT_JOBID = 0x8,
+
+ LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
};
struct llog_log_hdr {
union {
__u32 lr_speed;
__u32 lr_status;
+ __u32 lr_type;
};
__u16 lr_version;
__u16 lr_active;
__u16 lr_param;
__u16 lr_async_windows;
- __u32 lr_padding_1;
+ union {
+ __u32 lr_flags2;
+ __u32 lr_layout_version;
+ };
struct lu_fid lr_fid;
struct lu_fid lr_fid2;
- struct lu_fid lr_fid3;
- __u64 lr_padding_2;
+ union {
+ struct lu_fid lr_fid3;
+ char lr_pool_name[LOV_MAXPOOLNAME + 1];
+ };
+ __u32 lr_stripe_count;
+ __u32 lr_hash_type;
__u64 lr_padding_3;
};
LE_PEER_EXIT = 9,
LE_CONDITIONAL_DESTROY = 10,
LE_PAIRS_VERIFY = 11,
+ LE_CREATE_ORPHAN = 12,
+ LE_SKIP_NLINK_DECLARE = 13,
+ LE_SKIP_NLINK = 14,
+ LE_SET_LMV_MASTER = 15,
+ LE_SET_LMV_SLAVE = 16,
};
enum lfsck_event_flags {
LEF_TO_OST = 0x00000001,
LEF_FROM_OST = 0x00000002,
+ LEF_SET_LMV_HASH = 0x00000004,
+ LEF_SET_LMV_ALL = 0x00000008,
+ LEF_RECHECK_NAME_HASH = 0x00000010,
};
static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
int stripe_count);
extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+void lustre_print_user_md(unsigned int level, struct lov_user_md *lum,
+ const char *msg);
/* llog_swab.c */
extern void lustre_swab_llogd_body (struct llogd_body *d);
void lustre_swab_fid2path (struct getinfo_fid2path *gf);
+/** path2parent request/reply structures */
+struct getparent {
+ struct lu_fid gp_fid; /**< parent FID */
+ __u32 gp_linkno; /**< hardlink number */
+ __u32 gp_name_size; /**< size of the name field */
+ char gp_name[0]; /**< zero-terminated link name */
+} __attribute__((packed));
+
enum {
LAYOUT_INTENT_ACCESS = 0,
LAYOUT_INTENT_READ = 1,
char oup_buf[0];
};
+static inline size_t
+object_update_param_size(const struct object_update_param *param)
+{
+ return cfs_size_round(sizeof(*param) + param->oup_len);
+}
+
/* object update */
struct object_update {
__u16 ou_type; /* enum update_type */
void lustre_swab_object_update(struct object_update *ou);
void lustre_swab_object_update_request(struct object_update_request *our);
+static inline size_t
+object_update_size(const struct object_update *update)
+{
+ const struct object_update_param *param;
+ size_t size;
+ unsigned int i;
+
+ size = offsetof(struct object_update, ou_params[0]);
+ for (i = 0; i < update->ou_params_count; i++) {
+ param = (struct object_update_param *)((char *)update + size);
+ size += object_update_param_size(param);
+ }
+
+ return size;
+}
+
+static inline struct object_update *
+object_update_request_get(const struct object_update_request *our,
+ unsigned int index, size_t *size)
+{
+ void *ptr;
+ unsigned int i;
+
+ if (index >= our->ourq_count)
+ return NULL;
+
+ ptr = (void *)&our->ourq_updates[0];
+ for (i = 0; i < index; i++)
+ ptr += object_update_size(ptr);
+
+ if (size != NULL)
+ *size = object_update_size(ptr);
+
+ return ptr;
+}
+
+
/* the result of object update */
struct object_update_result {
__u32 our_rc;
void lustre_swab_object_update_result(struct object_update_result *our);
void lustre_swab_object_update_reply(struct object_update_reply *our);
+static inline struct object_update_result *
+object_update_result_get(const struct object_update_reply *reply,
+ unsigned int index, size_t *size)
+{
+ __u16 count = reply->ourp_count;
+ unsigned int i;
+ void *ptr;
+
+ if (index >= count)
+ return NULL;
+
+ ptr = (char *)reply +
+ cfs_size_round(offsetof(struct object_update_reply,
+ ourp_lens[count]));
+ for (i = 0; i < index; i++) {
+ if (reply->ourp_lens[i] == 0)
+ return NULL;
+
+ ptr += cfs_size_round(reply->ourp_lens[i]);
+ }
+
+ if (size != NULL)
+ *size = reply->ourp_lens[index];
+
+ return ptr;
+}
+
/** layout swap request structure
* fid1 and fid2 are in mdt_body
*/