+typedef __u64 mdsno_t;
+typedef __u64 seqno_t;
+
+struct lu_range {
+ __u64 lr_start;
+ __u64 lr_end;
+};
+
+static inline __u64 range_space(struct lu_range *r)
+{
+ return r->lr_end - r->lr_start;
+}
+
+static inline void range_zero(struct lu_range *r)
+{
+ r->lr_start = r->lr_end = 0;
+}
+
+static inline int range_within(struct lu_range *r,
+ __u64 s)
+{
+ return s >= r->lr_start && s < r->lr_end;
+}
+
+static inline void range_alloc(struct lu_range *r,
+ struct lu_range *s,
+ __u64 w)
+{
+ r->lr_start = s->lr_start;
+ r->lr_end = s->lr_start + w;
+ s->lr_start += w;
+}
+
+static inline int range_is_sane(struct lu_range *r)
+{
+ return (r->lr_end >= r->lr_start);
+}
+
+static inline int range_is_zero(struct lu_range *r)
+{
+ return (r->lr_start == 0 && r->lr_end == 0);
+}
+
+static inline int range_is_exhausted(struct lu_range *r)
+{
+ return range_space(r) == 0;
+}
+
+#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x]"
+
+#define PRANGE(range) \
+ (range)->lr_start, \
+ (range)->lr_end
+
+struct lu_fid {
+ __u64 f_seq; /* holds fid sequence. Lustre should support 2 ^ 64
+ * objects, thus even if one sequence has one object we
+ * reach this value. */
+ __u32 f_oid; /* fid number within its sequence. */
+ __u32 f_ver; /* holds fid version. */
+};
+
+/*
+ * fid constants
+ */
+enum {
+ LUSTRE_ROOT_FID_SEQ = 1ULL, /* XXX: should go into mkfs. */
+
+ /* initial fid id value */
+ LUSTRE_FID_INIT_OID = 1UL
+};
+
+/* get object sequence */
+static inline __u64 fid_seq(const struct lu_fid *fid)
+{
+ return fid->f_seq;
+}
+
+/* get object id */
+static inline __u32 fid_oid(const struct lu_fid *fid)
+{
+ return fid->f_oid;
+}
+
+/* get object version */
+static inline __u32 fid_ver(const struct lu_fid *fid)
+{
+ return fid->f_ver;
+}
+
+static inline int fid_seq_is_sane(__u64 seq)
+{
+ return seq != 0;
+}
+
+static inline void fid_zero(struct lu_fid *fid)
+{
+ memset(fid, 0, sizeof(*fid));
+}
+
+static inline int fid_is_igif(const struct lu_fid *fid)
+{
+ return fid_seq(fid) == LUSTRE_ROOT_FID_SEQ;
+}
+
+#define DFID "[0x%16.16"LPF64"x/0x%8.8x:0x%8.8x]"
+
+#define PFID(fid) \
+ fid_seq(fid), \
+ fid_oid(fid), \
+ fid_ver(fid)
+
+static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ LASSERTF(fid_is_igif(src) || fid_ver(src) == 0, DFID"\n", PFID(src));
+ dst->f_seq = cpu_to_le64(fid_seq(src));
+ dst->f_oid = cpu_to_le32(fid_oid(src));
+ dst->f_ver = cpu_to_le32(fid_ver(src));
+}
+
+static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ dst->f_seq = le64_to_cpu(fid_seq(src));
+ dst->f_oid = le32_to_cpu(fid_oid(src));
+ dst->f_ver = le32_to_cpu(fid_ver(src));
+ LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst));
+}
+
+static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ LASSERTF(fid_is_igif(src) || fid_ver(src) == 0, DFID"\n", PFID(src));
+ dst->f_seq = cpu_to_be64(fid_seq(src));
+ dst->f_oid = cpu_to_be32(fid_oid(src));
+ dst->f_ver = cpu_to_be32(fid_ver(src));
+}
+
+static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
+{
+ /* check that all fields are converted */
+ CLASSERT(sizeof *src ==
+ sizeof fid_seq(src) +
+ sizeof fid_oid(src) + sizeof fid_ver(src));
+ dst->f_seq = be64_to_cpu(fid_seq(src));
+ dst->f_oid = be32_to_cpu(fid_oid(src));
+ dst->f_ver = be32_to_cpu(fid_ver(src));
+ LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst));
+}
+
+#ifdef __KERNEL__
+/*
+ * Storage representation for fids.
+ *
+ * Variable size, first byte contains the length of the whole record.
+ */
+
+struct lu_fid_pack {
+ char fp_len;
+ char fp_area[sizeof(struct lu_fid)];
+};
+
+void fid_pack(struct lu_fid_pack *pack, const struct lu_fid *fid,
+ struct lu_fid *befider);
+int fid_unpack(const struct lu_fid_pack *pack, struct lu_fid *fid);
+
+/* __KERNEL__ */
+#endif
+
+static inline int fid_is_sane(const struct lu_fid *fid)
+{
+ return
+ fid != NULL &&
+ ((fid_seq_is_sane(fid_seq(fid)) && fid_oid(fid) != 0
+ && fid_ver(fid) == 0) ||
+ fid_is_igif(fid));
+}
+
+static inline int fid_is_zero(const struct lu_fid *fid)
+{
+ return fid_seq(fid) == 0 && fid_oid(fid) == 0;
+}
+
+extern void lustre_swab_lu_fid(struct lu_fid *fid);
+extern void lustre_swab_lu_range(struct lu_range *range);
+
+static inline int lu_fid_eq(const struct lu_fid *f0,
+ const struct lu_fid *f1)
+{
+ /* Check that there is no alignment padding. */
+ CLASSERT(sizeof *f0 ==
+ sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver);
+ LASSERTF(fid_is_igif(f0) || fid_ver(f0) == 0, DFID, PFID(f0));
+ LASSERTF(fid_is_igif(f1) || fid_ver(f1) == 0, DFID, PFID(f1));
+ return memcmp(f0, f1, sizeof *f0) == 0;
+}
+
+/*
+ * Layout of readdir pages, as transmitted on wire.
+ */
+struct lu_dirent {
+ struct lu_fid lde_fid;
+ __u32 lde_hash;
+ __u16 lde_reclen;
+ __u16 lde_namelen;
+ char lde_name[0];
+};
+
+struct lu_dirpage {
+ __u32 ldp_hash_start;
+ __u32 ldp_hash_end;
+ __u16 ldp_flags;
+ __u32 ldp_pad0;
+ struct lu_dirent ldp_entries[0];
+};
+
+enum lu_dirpage_flags {
+ LDF_EMPTY = 1 << 0
+};
+
+static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
+{
+ if (le16_to_cpu(dp->ldp_flags) & LDF_EMPTY)
+ return NULL;
+ else
+ return dp->ldp_entries;
+}
+
+static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
+{
+ struct lu_dirent *next;
+
+ if (le16_to_cpu(ent->lde_reclen) != 0)
+ next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
+ else
+ next = NULL;
+
+ return next;
+}
+
+static inline int lu_dirent_size(struct lu_dirent *ent)
+{
+ if (le16_to_cpu(ent->lde_reclen) == 0) {
+ return (sizeof(*ent) +
+ le16_to_cpu(ent->lde_namelen) + 3) & ~3;
+ }
+ return le16_to_cpu(ent->lde_reclen);
+}
+
+#define DIR_END_OFF 0xfffffffeUL
+