#define LU_SEQ_RANGE_MASK 0x3
-static inline unsigned fld_range_type(const struct lu_seq_range *range)
-{
- return range->lsr_flags & LU_SEQ_RANGE_MASK;
-}
-
-static inline bool fld_range_is_ost(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_OST;
-}
-
-static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_MDT;
-}
-
-/**
- * This all range is only being used when fld client sends fld query request,
- * but it does not know whether the seq is MDT or OST, so it will send req
- * with ALL type, which means either seq type gotten from lookup can be
- * expected.
- */
-static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
-{
- return fld_range_type(range) == LU_SEQ_RANGE_ANY;
-}
-
-static inline void fld_range_set_type(struct lu_seq_range *range,
- unsigned flags)
-{
- range->lsr_flags |= flags;
-}
-
-static inline void fld_range_set_mdt(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_MDT);
-}
-
-static inline void fld_range_set_ost(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_OST);
-}
-
-static inline void fld_range_set_any(struct lu_seq_range *range)
-{
- fld_range_set_type(range, LU_SEQ_RANGE_ANY);
-}
-
-/**
- * returns width of given range \a r
- */
-
-static inline __u64 range_space(const struct lu_seq_range *range)
-{
- return range->lsr_end - range->lsr_start;
-}
-
-/**
- * initialize range to zero
- */
-
-static inline void range_init(struct lu_seq_range *range)
-{
- memset(range, 0, sizeof(*range));
-}
-
-/**
- * check if given seq id \a s is within given range \a r
- */
-
-static inline bool range_within(const struct lu_seq_range *range,
- __u64 s)
-{
- return s >= range->lsr_start && s < range->lsr_end;
-}
-
-static inline bool range_is_sane(const struct lu_seq_range *range)
-{
- return range->lsr_end >= range->lsr_start;
-}
-
-static inline bool range_is_zero(const struct lu_seq_range *range)
-{
- return range->lsr_start == 0 && range->lsr_end == 0;
-}
-
-static inline bool range_is_exhausted(const struct lu_seq_range *range)
-{
- return range_space(range) == 0;
-}
-
-/* return 0 if two range have the same location */
-static inline int range_compare_loc(const struct lu_seq_range *r1,
- const struct lu_seq_range *r2)
-{
- return r1->lsr_index != r2->lsr_index ||
- r1->lsr_flags != r2->lsr_flags;
-}
-
-#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s"
-
-#define PRANGE(range) \
- (range)->lsr_start, \
- (range)->lsr_end, \
- (range)->lsr_index, \
- fld_range_is_mdt(range) ? "mdt" : "ost"
-
-
/** \defgroup lu_fid lu_fid
* @{ */
FID_SEQ_OST_MDT0 = 0,
FID_SEQ_LLOG = 1, /* unnamed llogs */
FID_SEQ_ECHO = 2,
- FID_SEQ_OST_MDT1 = 3,
- FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
+ FID_SEQ_UNUSED_START = 3,
+ FID_SEQ_UNUSED_END = 9,
FID_SEQ_LLOG_NAME = 10, /* named llogs */
FID_SEQ_RSVD = 11,
FID_SEQ_IGIF = 12,
FID_SEQ_QUOTA_GLB = 0x200000006ULL,
FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
FID_SEQ_LAYOUT_RBTREE = 0x200000008ULL,
+ /* sequence is used for update logs of cross-MDT operation */
+ FID_SEQ_UPDATE_LOG = 0x200000009ULL,
+ /* Sequence is used for the directory under which update logs
+ * are created. */
+ FID_SEQ_UPDATE_LOG_DIR = 0x20000000aULL,
FID_SEQ_NORMAL = 0x200000400ULL,
FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
};
fid->f_ver = 0;
}
+static inline void lu_update_log_fid(struct lu_fid *fid, __u32 index)
+{
+ fid->f_seq = FID_SEQ_UPDATE_LOG;
+ fid->f_oid = index;
+ fid->f_ver = 0;
+}
+
+static inline void lu_update_log_dir_fid(struct lu_fid *fid, __u32 index)
+{
+ fid->f_seq = FID_SEQ_UPDATE_LOG_DIR;
+ fid->f_oid = index;
+ fid->f_ver = 0;
+}
+
/**
* Check if a fid is igif or not.
* \param fid the fid to be tested.
return fid_seq(fid) == FID_SEQ_LAYOUT_RBTREE;
}
+static inline bool fid_seq_is_update_log(__u64 seq)
+{
+ return seq == FID_SEQ_UPDATE_LOG;
+}
+
+static inline bool fid_is_update_log(const struct lu_fid *fid)
+{
+ return fid_seq_is_update_log(fid_seq(fid));
+}
+
+static inline bool fid_seq_is_update_log_dir(__u64 seq)
+{
+ return seq == FID_SEQ_UPDATE_LOG_DIR;
+}
+
+static inline bool fid_is_update_log_dir(const struct lu_fid *fid)
+{
+ return fid_seq_is_update_log_dir(fid_seq(fid));
+}
+
/* convert an OST objid into an IDIF FID SEQ number */
static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
{
{
if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad "LPU64" to set "DOSTID"\n",
- oid, POSTID(oi));
+ CERROR("Bad %llu to set "DOSTID"\n",
+ (unsigned long long)oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
} else if (fid_is_idif(&oi->oi_fid)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad "LPU64" to set "DOSTID"\n",
- oid, POSTID(oi));
+ CERROR("Bad %llu to set "DOSTID"\n",
+ (unsigned long long)oid, POSTID(oi));
return;
}
oi->oi_fid.f_seq = fid_idif_seq(oid,
oi->oi_fid.f_ver = oid >> 48;
} else {
if (oid > OBIF_MAX_OID) {
- CERROR("Bad "LPU64" to set "DOSTID"\n",
- oid, POSTID(oi));
+ CERROR("Bad %llu to set "DOSTID"\n",
+ (unsigned long long)oid, POSTID(oi));
return;
}
oi->oi_fid.f_oid = oid;
if (fid_is_idif(fid)) {
if (oid >= IDIF_MAX_OID) {
- CERROR("Bad "LPU64" to set "DFID"\n",
- oid, PFID(fid));
+ CERROR("Bad %llu to set "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
return -EBADF;
}
fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
fid->f_ver = oid >> 48;
} else {
if (oid > OBIF_MAX_OID) {
- CERROR("Bad "LPU64" to set "DFID"\n",
- oid, PFID(fid));
+ CERROR("Bad %llu to set "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
return -EBADF;
}
fid->f_oid = oid;
/* Check whether the fid is for LAST_ID */
static inline bool fid_is_last_id(const struct lu_fid *fid)
{
- return fid_oid(fid) == 0;
+ return fid_oid(fid) == 0 && fid_seq(fid) != FID_SEQ_UPDATE_LOG &&
+ fid_seq(fid) != FID_SEQ_UPDATE_LOG_DIR;
}
/**
}
extern void lustre_swab_lu_fid(struct lu_fid *fid);
-extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
LUDA_UPGRADE = 0x1000,
/* Ignore this record, go to next directly. */
LUDA_IGNORE = 0x0800,
+ /* Something in the record is unknown, to be verified in further. */
+ LUDA_UNKNOWN = 0x1000,
};
#define LU_DIRENT_ATTRS_MASK 0xf800
__u64 pb_slv;
/* VBR: pre-versions */
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_mbits; /**< match bits for bulk request */
/* padding for future needs */
- __u64 pb_padding[4];
+ __u64 pb_padding64_0;
+ __u64 pb_padding64_1;
+ __u64 pb_padding64_2;
char pb_jobid[LUSTRE_JOBID_SIZE];
};
#define ptlrpc_body ptlrpc_body_v3
__u64 pb_slv;
/* VBR: pre-versions */
__u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
+ __u64 pb_mbits; /**< unused in V2 */
/* padding for future needs */
- __u64 pb_padding[4];
+ __u64 pb_padding64_0;
+ __u64 pb_padding64_1;
+ __u64 pb_padding64_2;
};
extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_MULTIMODRPCS 0x200000000000000ULL /* support multiple modify
RPCs in parallel */
#define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL /* striped DNE dir */
-
+/** bulk matchbits is sent within ptlrpc_body */
+#define OBD_CONNECT_BULK_MBITS 0x2000000000000000ULL
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
* flag value is not in use on some other branch. Please clear any such
OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
OBD_CONNECT_RMT_CLIENT | \
OBD_CONNECT_RMT_CLIENT_FORCE | \
- OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
- OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
+ OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_MDS | \
OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
OBD_CONNECT_FULL20 | \
OBD_CONNECT_FLOCK_DEAD | \
OBD_CONNECT_DISP_STRIPE | OBD_CONNECT_LFSCK | \
OBD_CONNECT_OPEN_BY_FID | \
- OBD_CONNECT_DIR_STRIPE)
+ OBD_CONNECT_DIR_STRIPE | \
+ OBD_CONNECT_BULK_MBITS | \
+ OBD_CONNECT_MULTIMODRPCS)
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
- OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
+ OBD_CONNECT_BRW_SIZE | \
OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
OBD_CONNECT_RMT_CLIENT | \
OBD_CONNECT_JOBSTATS | \
OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
- OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK)
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_LFSCK | \
+ OBD_CONNECT_BULK_MBITS)
#define ECHO_CONNECT_SUPPORTED (0)
#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
- OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
+ OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS |\
+ OBD_CONNECT_BULK_MBITS)
/* Features required for this version of the client to work with server */
#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
MDS_OPEN_RELEASE)
-/* permission for create non-directory file */
-#define MAY_CREATE (1 << 7)
-/* permission for create directory file */
-#define MAY_LINK (1 << 8)
-/* permission for delete from the directory */
-#define MAY_UNLINK (1 << 9)
-/* source's permission for rename */
-#define MAY_RENAME_SRC (1 << 10)
-/* target's permission for rename */
-#define MAY_RENAME_TAR (1 << 11)
-/* part (parent's) VTX permission check */
-#define MAY_VTX_PART (1 << 12)
-/* full VTX permission check */
-#define MAY_VTX_FULL (1 << 13)
-/* lfs rgetfacl permission check */
-#define MAY_RGETFACL (1 << 14)
-
enum mds_op_bias {
MDS_CHECK_SPLIT = 1 << 0,
MDS_CROSS_REF = 1 << 1,
__u64 name[RES_NAME_SIZE];
};
-#define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i
-#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
- (res)->lr_name.name[2], (res)->lr_name.name[3]
+#define DLDLMRES "[%#llx:%#llx:%#llx].%#llx"
+#define PLDLMRES(res) (unsigned long long)(res)->lr_name.name[0], \
+ (unsigned long long)(res)->lr_name.name[1], \
+ (unsigned long long)(res)->lr_name.name[2], \
+ (unsigned long long)(res)->lr_name.name[3]
extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
}
/* lock types */
-typedef enum {
- LCK_MINMODE = 0,
- LCK_EX = 1,
- LCK_PW = 2,
- LCK_PR = 4,
- LCK_CW = 8,
- LCK_CR = 16,
- LCK_NL = 32,
- LCK_GROUP = 64,
- LCK_COS = 128,
- LCK_MAXMODE
+typedef enum ldlm_mode {
+ LCK_MINMODE = 0,
+ LCK_EX = 1,
+ LCK_PW = 2,
+ LCK_PR = 4,
+ LCK_CW = 8,
+ LCK_CR = 16,
+ LCK_NL = 32,
+ LCK_GROUP = 64,
+ LCK_COS = 128,
+ LCK_MAXMODE
} ldlm_mode_t;
#define LCK_MODE_NUM 8
-typedef enum {
- LDLM_PLAIN = 10,
- LDLM_EXTENT = 11,
- LDLM_FLOCK = 12,
- LDLM_IBITS = 13,
- LDLM_MAX_TYPE
+typedef enum ldlm_type {
+ LDLM_PLAIN = 10,
+ LDLM_EXTENT = 11,
+ LDLM_FLOCK = 12,
+ LDLM_IBITS = 13,
+ LDLM_MAX_TYPE
} ldlm_type_t;
#define LDLM_MIN_TYPE LDLM_PLAIN
* this ever changes we will need to swab the union differently based
* on the resource type. */
-typedef union {
- struct ldlm_extent l_extent;
- struct ldlm_flock_wire l_flock;
- struct ldlm_inodebits l_inodebits;
+typedef union ldlm_wire_policy_data {
+ struct ldlm_extent l_extent;
+ struct ldlm_flock_wire l_flock;
+ struct ldlm_inodebits l_inodebits;
} ldlm_wire_policy_data_t;
-extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
+extern void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d);
union ldlm_gl_desc {
struct ldlm_gl_lquota_desc lquota_desc;
extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
struct ldlm_resource_desc {
- ldlm_type_t lr_type;
- __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
- struct ldlm_res_id lr_name;
+ enum ldlm_type lr_type;
+ __u32 lr_pad; /* also fix lustre_swab_ldlm_resource_desc */
+ struct ldlm_res_id lr_name;
};
extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
struct ldlm_lock_desc {
- struct ldlm_resource_desc l_resource;
- ldlm_mode_t l_req_mode;
- ldlm_mode_t l_granted_mode;
- ldlm_wire_policy_data_t l_policy_data;
+ struct ldlm_resource_desc l_resource;
+ enum ldlm_mode l_req_mode;
+ enum ldlm_mode l_granted_mode;
+ union ldlm_wire_policy_data l_policy_data;
};
-extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
+extern void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
#define LDLM_LOCKREQ_HANDLES 2
#define LDLM_ENQUEUE_CANCEL_OFF 1
/* for multiple changelog consumers */
LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
+ LLOG_UPDATELOG_ORIG_CTXT = 16, /* update log */
+ LLOG_UPDATELOG_REPL_CTXT = 17, /* update log */
LLOG_MAX_CTXTS
};
struct llog_rec_tail lgr_tail;
};
-/* On-disk header structure of each log object, stored in little endian order */
-#define LLOG_CHUNK_SIZE 8192
-#define LLOG_HEADER_SIZE (96)
-#define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
-
-#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
-
/* flags for the logs */
enum llog_flag {
LLOG_F_ZAP_WHEN_EMPTY = 0x1,
LLOG_F_IS_CAT = 0x2,
LLOG_F_IS_PLAIN = 0x4,
LLOG_F_EXT_JOBID = 0x8,
+ LLOG_F_IS_FIXSIZE = 0x10,
+ /* Note: Flags covered by LLOG_F_EXT_MASK will be inherited from
+ * catlog to plain log, so do not add LLOG_F_IS_FIXSIZE here,
+ * because the catlog record is usually fixed size, but its plain
+ * log record can be variable */
LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
};
+/* On-disk header structure of each log object, stored in little endian order */
+#define LLOG_MIN_CHUNK_SIZE 8192
+#define LLOG_HEADER_SIZE (96) /* sizeof (llog_log_hdr) + sizeof(llh_tail)
+ * - sizeof(llh_bitmap) */
+#define LLOG_BITMAP_BYTES (LLOG_MIN_CHUNK_SIZE - LLOG_HEADER_SIZE)
+#define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
+
struct llog_log_hdr {
struct llog_rec_hdr llh_hdr;
__s64 llh_timestamp;
- __u32 llh_count;
- __u32 llh_bitmap_offset;
- __u32 llh_size;
- __u32 llh_flags;
- __u32 llh_cat_idx;
- /* for a catalog the first plain slot is next to it */
- struct obd_uuid llh_tgtuuid;
- __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
- __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
- struct llog_rec_tail llh_tail;
+ __u32 llh_count;
+ __u32 llh_bitmap_offset;
+ __u32 llh_size;
+ __u32 llh_flags;
+ __u32 llh_cat_idx;
+ /* for a catalog the first plain slot is next to it */
+ struct obd_uuid llh_tgtuuid;
+ __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32)-23];
+ /* These fields must always be at the end of the llog_log_hdr.
+ * Note: llh_bitmap size is variable because llog chunk size could be
+ * bigger than LLOG_MIN_CHUNK_SIZE, i.e. sizeof(llog_log_hdr) > 8192
+ * bytes, and the real size is stored in llh_hdr.lrh_len, which means
+ * llh_tail should only be refered by LLOG_HDR_TAIL().
+ * But this structure is also used by client/server llog interface
+ * (see llog_client.c), it will be kept in its original way to avoid
+ * compatiblity issue. */
+ __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
+ struct llog_rec_tail llh_tail;
} __attribute__((packed));
-
-#define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
- llh->llh_bitmap_offset - \
- sizeof(llh->llh_tail)) * 8)
+#undef LLOG_HEADER_SIZE
+#undef LLOG_BITMAP_BYTES
+
+#define LLOG_HDR_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
+ llh->llh_bitmap_offset - \
+ sizeof(llh->llh_tail)) * 8)
+#define LLOG_HDR_BITMAP(llh) (__u32 *)((char *)(llh) + \
+ (llh)->llh_bitmap_offset)
+#define LLOG_HDR_TAIL(llh) ((struct llog_rec_tail *)((char *)llh + \
+ llh->llh_hdr.lrh_len - \
+ sizeof(llh->llh_tail)))
/** log cookies are used to reference a specific log file and a record therein */
struct llog_cookie {
#define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
#define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
-/* MDS capability covers object capability for operations of body r/w
- * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
- * while OSS capability only covers object capability for operations of
- * oss data(file content) r/w/truncate.
- */
-static inline int capa_for_mds(struct lustre_capa *c)
-{
- return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
-}
-
-static inline int capa_for_oss(struct lustre_capa *c)
-{
- return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
-}
-
static inline bool lovea_slot_is_dummy(const struct lov_ost_data_v1 *obj)
{
/* zero area does not care about the bytes-order. */
OUT_INDEX_DELETE = 11,
OUT_WRITE = 12,
OUT_XATTR_DEL = 13,
+ OUT_PUNCH = 14,
+ OUT_READ = 15,
+ OUT_NOOP = 16,
OUT_LAST
};
struct object_update ourq_updates[0];
};
+#define OUT_UPDATE_HEADER_MAGIC 0xBDDF0001
+#define OUT_UPDATE_MAX_INLINE_SIZE 4096
+/* Header for updates request between MDTs */
+struct out_update_header {
+ __u32 ouh_magic;
+ __u32 ouh_count;
+ __u32 ouh_inline_length;
+ __u32 ouh_padding;
+ __u32 ouh_inline_data[0];
+};
+
+struct out_update_buffer {
+ __u32 oub_size;
+ __u32 oub_padding;
+};
+
void lustre_swab_object_update(struct object_update *ou);
void lustre_swab_object_update_request(struct object_update_request *our);
+void lustre_swab_out_update_header(struct out_update_header *ouh);
+void lustre_swab_out_update_buffer(struct out_update_buffer *oub);
static inline size_t
object_update_params_size(const struct object_update *update)
return ptr;
}
+/* read update result */
+struct out_read_reply {
+ __u32 orr_size;
+ __u32 orr_padding;
+ __u64 orr_offset;
+ char orr_data[0];
+};
+
+static inline void orr_cpu_to_le(struct out_read_reply *orr_dst,
+ const struct out_read_reply *orr_src)
+{
+ orr_dst->orr_size = cpu_to_le32(orr_src->orr_size);
+ orr_dst->orr_padding = cpu_to_le32(orr_src->orr_padding);
+ orr_dst->orr_offset = cpu_to_le64(orr_dst->orr_offset);
+}
+
+static inline void orr_le_to_cpu(struct out_read_reply *orr_dst,
+ const struct out_read_reply *orr_src)
+{
+ orr_dst->orr_size = le32_to_cpu(orr_src->orr_size);
+ orr_dst->orr_padding = le32_to_cpu(orr_src->orr_padding);
+ orr_dst->orr_offset = le64_to_cpu(orr_dst->orr_offset);
+}
+
/** layout swap request structure
* fid1 and fid2 are in mdt_body
*/
void lustre_swab_close_data(struct close_data *data);
+struct update_ops;
+void lustre_swab_update_ops(struct update_ops *uops, unsigned int op_count);
+
+/* Update llog format */
+struct update_op {
+ struct lu_fid uop_fid;
+ __u16 uop_type;
+ __u16 uop_param_count;
+ __u16 uop_params_off[0];
+};
+
+struct update_ops {
+ struct update_op uops_op[0];
+};
+
+struct update_params {
+ struct object_update_param up_params[0];
+};
+
+enum update_records_flag {
+ UPDATE_RECORD_CONTINUE = 1 >> 0,
+};
+/*
+ * This is the update record format used to store the updates in
+ * disk. All updates of the operation will be stored in ur_ops.
+ * All of parameters for updates of the operation will be stored
+ * in ur_params.
+ * To save the space of the record, parameters in ur_ops will only
+ * remember their offset in ur_params, so to avoid storing duplicate
+ * parameters in ur_params, which can help us save a lot space for
+ * operation like creating striped directory.
+ */
+struct update_records {
+ __u64 ur_master_transno;
+ __u64 ur_batchid;
+ __u32 ur_flags;
+ /* If the operation includes multiple updates, then ur_index
+ * means the index of the update inside the whole updates. */
+ __u32 ur_index;
+ __u32 ur_update_count;
+ __u32 ur_param_count;
+ struct update_ops ur_ops;
+ /* Note ur_ops has a variable size, so comment out
+ * the following ur_params, in case some use it directly
+ * update_records->ur_params
+ *
+ * struct update_params ur_params;
+ */
+};
+
+struct llog_update_record {
+ struct llog_rec_hdr lur_hdr;
+ struct update_records lur_update_rec;
+ /* Note ur_update_rec has a variable size, so comment out
+ * the following ur_tail, in case someone use it directly
+ *
+ * struct llog_rec_tail lur_tail;
+ */
+};
+
+
#endif
/** @} lustreidl */