* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
-#if !defined(LASSERT) && !defined(LPU64)
-#include <libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
+#if !defined(LPU64)
+#include <libcfs/libcfs.h> /* for LPUX64, etc */
#endif
/* Defn's shared with user-space. */
#include <lustre/lustre_user.h>
+#include <lustre/lustre_errno.h>
+
/*
* GENERAL STUFF
*/
//#define PTLBD_BULK_PORTAL 21
#define MDS_SETATTR_PORTAL 22
#define MDS_READPAGE_PORTAL 23
-#define MDS_MDS_PORTAL 24
-
+#define OUT_PORTAL 24
#define MGC_REPLY_PORTAL 25
#define MGS_REQUEST_PORTAL 26
#define MGS_REPLY_PORTAL 27
* Same structure is used in fld module where lsr_index field holds mdt id
* of the home mdt.
*/
-
-#define LU_SEQ_RANGE_MDT 0x0
-#define LU_SEQ_RANGE_OST 0x1
-
struct lu_seq_range {
- __u64 lsr_start;
- __u64 lsr_end;
- __u32 lsr_index;
- __u32 lsr_flags;
+ __u64 lsr_start;
+ __u64 lsr_end;
+ __u32 lsr_index;
+ __u32 lsr_flags;
};
+#define LU_SEQ_RANGE_MDT 0x0
+#define LU_SEQ_RANGE_OST 0x1
+#define LU_SEQ_RANGE_ANY 0x3
+
+#define LU_SEQ_RANGE_MASK 0x3
+
+static inline unsigned fld_range_type(const struct lu_seq_range *range)
+{
+ return range->lsr_flags & LU_SEQ_RANGE_MASK;
+}
+
+static inline int fld_range_is_ost(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_OST;
+}
+
+static inline int fld_range_is_mdt(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_MDT;
+}
+
+/**
+ * This all range is only being used when fld client sends fld query request,
+ * but it does not know whether the seq is MDT or OST, so it will send req
+ * with ALL type, which means either seq type gotten from lookup can be
+ * expected.
+ */
+static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
+{
+ return fld_range_type(range) == LU_SEQ_RANGE_ANY;
+}
+
+static inline void fld_range_set_type(struct lu_seq_range *range,
+ unsigned flags)
+{
+ range->lsr_flags |= flags;
+}
+
+static inline void fld_range_set_mdt(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_MDT);
+}
+
+static inline void fld_range_set_ost(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_OST);
+}
+
+static inline void fld_range_set_any(struct lu_seq_range *range)
+{
+ fld_range_set_type(range, LU_SEQ_RANGE_ANY);
+}
+
/**
* returns width of given range \a r
*/
#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%s"
-#define PRANGE(range) \
- (range)->lsr_start, \
- (range)->lsr_end, \
- (range)->lsr_index, \
- (range)->lsr_flags == LU_SEQ_RANGE_MDT ? "mdt" : "ost"
+#define PRANGE(range) \
+ (range)->lsr_start, \
+ (range)->lsr_end, \
+ (range)->lsr_index, \
+ fld_range_is_mdt(range) ? "mdt" : "ost"
/** \defgroup lu_fid lu_fid
/**
* Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
+ * Deprecated since HSM and SOM attributes are now stored in separate on-disk
+ * xattr.
*/
enum lma_compat {
- LMAC_HSM = 0x00000001,
- LMAC_SOM = 0x00000002,
+ LMAC_HSM = 0x00000001,
+ LMAC_SOM = 0x00000002,
+ LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
+ LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
+ * under /O/<seq>/d<x>. */
};
/**
* Masks for all features that should be supported by a Lustre version to
* access a specific file.
* This information is stored in lustre_mdt_attrs::lma_incompat.
- *
- * NOTE: No incompat feature should be added before bug #17670 is landed.
*/
-#define LMA_INCOMPAT_SUPP 0x0
-
+enum lma_incompat {
+ LMAI_RELEASED = 0x00000001, /* file is released */
+ LMAI_AGENT = 0x00000002, /* agent inode */
+ LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
+ is on the remote MDT */
+};
+#define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
+
+extern void lustre_lma_swab(struct lustre_mdt_attrs *lma);
+extern void lustre_lma_init(struct lustre_mdt_attrs *lma,
+ const struct lu_fid *fid,
+ __u32 compat, __u32 incompat);
/**
- * Following struct for MDT attributes, that will be kept inode's EA.
- * Introduced in 2.0 release (please see b15993, for details)
+ * SOM on-disk attributes stored in a separate xattr.
*/
-struct lustre_mdt_attrs {
- /**
- * Bitfield for supported data in this structure. From enum lma_compat.
- * lma_self_fid and lma_flags are always available.
- */
- __u32 lma_compat;
- /**
- * Per-file incompat feature list. Lustre version should support all
- * flags set in this field. The supported feature mask is available in
- * LMA_INCOMPAT_SUPP.
- */
- __u32 lma_incompat;
- /** FID of this inode */
- struct lu_fid lma_self_fid;
- /** mdt/ost type, others */
- __u64 lma_flags;
- /* IO Epoch SOM attributes belongs to */
- __u64 lma_ioepoch;
- /** total file size in objects */
- __u64 lma_som_size;
- /** total fs blocks in objects */
- __u64 lma_som_blocks;
- /** mds mount id the size is valid for */
- __u64 lma_som_mountid;
-};
+struct som_attrs {
+ /** Bitfield for supported data in this structure. For future use. */
+ __u32 som_compat;
-/**
- * Fill \a lma with its first content.
- * Only fid is stored.
- */
-static inline void lustre_lma_init(struct lustre_mdt_attrs *lma,
- const struct lu_fid *fid)
-{
- lma->lma_compat = 0;
- lma->lma_incompat = 0;
- memcpy(&lma->lma_self_fid, fid, sizeof(*fid));
- lma->lma_flags = 0;
- lma->lma_ioepoch = 0;
- lma->lma_som_size = 0;
- lma->lma_som_blocks = 0;
- lma->lma_som_mountid = 0;
+ /** Incompat feature list. The supported feature mask is availabe in
+ * SOM_INCOMPAT_SUPP */
+ __u32 som_incompat;
- /* If a field is added in struct lustre_mdt_attrs, zero it explicitly
- * and change the test below. */
- LASSERT(sizeof(*lma) ==
- (offsetof(struct lustre_mdt_attrs, lma_som_mountid) +
- sizeof(lma->lma_som_mountid)));
+ /** IO Epoch SOM attributes belongs to */
+ __u64 som_ioepoch;
+ /** total file size in objects */
+ __u64 som_size;
+ /** total fs blocks in objects */
+ __u64 som_blocks;
+ /** mds mount id the size is valid for */
+ __u64 som_mountid;
};
+extern void lustre_som_swab(struct som_attrs *attrs);
-extern void lustre_swab_lu_fid(struct lu_fid *fid);
+#define SOM_INCOMPAT_SUPP 0x0
/**
- * Swab, if needed, lustre_mdt_attr struct to on-disk format.
- * Otherwise, do not touch it.
+ * HSM on-disk attributes stored in a separate xattr.
*/
-static inline void lustre_lma_swab(struct lustre_mdt_attrs *lma)
-{
- /* Use LUSTRE_MSG_MAGIC to detect local endianess. */
- if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) {
- __swab32s(&lma->lma_compat);
- __swab32s(&lma->lma_incompat);
- lustre_swab_lu_fid(&lma->lma_self_fid);
- __swab64s(&lma->lma_flags);
- __swab64s(&lma->lma_ioepoch);
- __swab64s(&lma->lma_som_size);
- __swab64s(&lma->lma_som_blocks);
- __swab64s(&lma->lma_som_mountid);
- }
-};
-
-/* This is the maximum number of MDTs allowed in CMD testing until such
- * a time that FID-on-OST is implemented. This is due to the limitations
- * of packing non-0-MDT numbers into the FID SEQ namespace. Once FID-on-OST
- * is implemented this limit will be virtually unlimited. */
-#define MAX_MDT_COUNT 8
+struct hsm_attrs {
+ /** Bitfield for supported data in this structure. For future use. */
+ __u32 hsm_compat;
+ /** HSM flags, see hsm_flags enum below */
+ __u32 hsm_flags;
+ /** backend archive id associated with the file */
+ __u64 hsm_arch_id;
+ /** version associated with the last archiving, if any */
+ __u64 hsm_arch_ver;
+};
+extern void lustre_hsm_swab(struct hsm_attrs *attrs);
/**
* fid constants
*/
enum {
+ /** LASTID file has zero OID */
+ LUSTRE_FID_LASTID_OID = 0UL,
/** initial fid id value */
LUSTRE_FID_INIT_OID = 1UL
};
* http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
*/
enum fid_seq {
- FID_SEQ_OST_MDT0 = 0,
- FID_SEQ_LLOG = 1,
- FID_SEQ_ECHO = 2,
- FID_SEQ_OST_MDT1 = 3,
- FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
- FID_SEQ_RSVD = 11,
- FID_SEQ_IGIF = 12,
- FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
- FID_SEQ_IDIF = 0x100000000ULL,
- FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
- /* Normal FID sequence starts from this value, i.e. 1<<33 */
- FID_SEQ_START = 0x200000000ULL,
+ FID_SEQ_OST_MDT0 = 0,
+ FID_SEQ_LLOG = 1, /* unnamed llogs */
+ FID_SEQ_ECHO = 2,
+ FID_SEQ_OST_MDT1 = 3,
+ FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
+ FID_SEQ_LLOG_NAME = 10, /* named llogs */
+ FID_SEQ_RSVD = 11,
+ FID_SEQ_IGIF = 12,
+ FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
+ FID_SEQ_IDIF = 0x100000000ULL,
+ FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
+ /* Normal FID sequence starts from this value, i.e. 1<<33 */
+ FID_SEQ_START = 0x200000000ULL,
/* sequence for local pre-defined FIDs listed in local_oid */
- FID_SEQ_LOCAL_FILE = 0x200000001ULL,
- FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
+ FID_SEQ_LOCAL_FILE = 0x200000001ULL,
+ FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
/* sequence is used for local named objects FIDs generated
* by local_object_storage library */
- FID_SEQ_LOCAL_NAME = 0x200000003ULL,
- FID_SEQ_SPECIAL = 0x200000004ULL,
- FID_SEQ_QUOTA = 0x200000005ULL,
- FID_SEQ_QUOTA_GLB = 0x200000006ULL,
- FID_SEQ_NORMAL = 0x200000400ULL,
- FID_SEQ_LOV_DEFAULT= 0xffffffffffffffffULL
+ FID_SEQ_LOCAL_NAME = 0x200000003ULL,
+ /* Because current FLD will only cache the fid sequence, instead
+ * of oid on the client side, if the FID needs to be exposed to
+ * clients sides, it needs to make sure all of fids under one
+ * sequence will be located in one MDT. */
+ FID_SEQ_SPECIAL = 0x200000004ULL,
+ FID_SEQ_QUOTA = 0x200000005ULL,
+ FID_SEQ_QUOTA_GLB = 0x200000006ULL,
+ FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
+ FID_SEQ_NORMAL = 0x200000400ULL,
+ FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
};
#define OBIF_OID_MAX_BITS 32
return (seq == FID_SEQ_OST_MDT0);
}
-static inline int fid_seq_is_cmd(const __u64 seq)
+static inline int fid_seq_is_mdt(const __u64 seq)
{
- return (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX);
+ return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
};
-static inline int fid_seq_is_mdt(const __u64 seq)
+static inline int fid_seq_is_echo(obd_seq seq)
{
- return seq == FID_SEQ_OST_MDT0 ||
- (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX);
-};
+ return (seq == FID_SEQ_ECHO);
+}
+
+static inline int fid_is_echo(const struct lu_fid *fid)
+{
+ return fid_seq_is_echo(fid_seq(fid));
+}
+
+static inline int fid_seq_is_llog(obd_seq seq)
+{
+ return (seq == FID_SEQ_LLOG);
+}
+
+static inline int fid_is_llog(const struct lu_fid *fid)
+{
+ /* file with OID == 0 is not llog but contains last oid */
+ return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
+}
static inline int fid_seq_is_rsvd(const __u64 seq)
{
return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
};
+static inline int fid_seq_is_special(const __u64 seq)
+{
+ return seq == FID_SEQ_SPECIAL;
+};
+
+static inline int fid_seq_is_local_file(const __u64 seq)
+{
+ return seq == FID_SEQ_LOCAL_FILE ||
+ seq == FID_SEQ_LOCAL_NAME;
+};
+
+static inline int fid_seq_is_root(const __u64 seq)
+{
+ return seq == FID_SEQ_ROOT;
+}
+
+static inline int fid_seq_is_dot(const __u64 seq)
+{
+ return seq == FID_SEQ_DOT_LUSTRE;
+}
+
+static inline int fid_seq_is_default(const __u64 seq)
+{
+ return seq == FID_SEQ_LOV_DEFAULT;
+}
+
static inline int fid_is_mdt0(const struct lu_fid *fid)
{
return fid_seq_is_mdt0(fid_seq(fid));
}
+static inline void lu_root_fid(struct lu_fid *fid)
+{
+ fid->f_seq = FID_SEQ_ROOT;
+ fid->f_oid = 1;
+ fid->f_ver = 0;
+}
+
/**
* Check if a fid is igif or not.
* \param fid the fid to be tested.
return fid_seq_is_idif(fid_seq(fid));
}
-struct ost_id {
- obd_id oi_id;
- obd_seq oi_seq;
-};
+static inline int fid_is_local_file(const struct lu_fid *fid)
+{
+ return fid_seq_is_local_file(fid_seq(fid));
+}
static inline int fid_seq_is_norm(const __u64 seq)
{
/* extract ost index from IDIF FID */
static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
{
- LASSERT(fid_is_idif(fid));
return (fid_seq(fid) >> 16) & 0xffff;
}
-/* unpack an ostid (id/seq) from a wire/disk structure into an IDIF FID */
-static inline void ostid_idif_unpack(struct ost_id *ostid,
- struct lu_fid *fid, __u32 ost_idx)
+/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
+static inline obd_seq ostid_seq(const struct ost_id *ostid)
{
- fid->f_seq = fid_idif_seq(ostid->oi_id, ost_idx);
- fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */
- fid->f_ver = ostid->oi_id >> 48; /* in theory, not currently used */
-}
+ if (fid_seq_is_mdt0(ostid->oi.oi_seq))
+ return FID_SEQ_OST_MDT0;
-/* unpack an ostid (id/seq) from a wire/disk structure into a non-IDIF FID */
-static inline void ostid_fid_unpack(struct ost_id *ostid, struct lu_fid *fid)
-{
- fid->f_seq = ostid->oi_seq;
- fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */
- fid->f_ver = ostid->oi_id >> 32; /* in theory, not currently used */
+ if (fid_seq_is_default(ostid->oi.oi_seq))
+ return FID_SEQ_LOV_DEFAULT;
+
+ if (fid_is_idif(&ostid->oi_fid))
+ return FID_SEQ_OST_MDT0;
+
+ return fid_seq(&ostid->oi_fid);
}
-/* Unpack an OST object id/seq (group) into a FID. This is needed for
- * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
- * FIDs. Note that if an id/seq is already in FID/IDIF format it will
- * be passed through unchanged. Only legacy OST objects in "group 0"
- * will be mapped into the IDIF namespace so that they can fit into the
- * struct lu_fid fields without loss. For reference see:
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
- */
-static inline int fid_ostid_unpack(struct lu_fid *fid, struct ost_id *ostid,
- __u32 ost_idx)
+/* extract OST objid from a wire ost_id (id/seq) pair */
+static inline obd_id ostid_id(const struct ost_id *ostid)
{
- if (ost_idx > 0xffff) {
- CERROR("bad ost_idx, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
- ostid->oi_seq, ostid->oi_id, ost_idx);
- return -EBADF;
- }
+ if (fid_seq_is_mdt0(ostid_seq(ostid)))
+ return ostid->oi.oi_id & IDIF_OID_MASK;
- if (fid_seq_is_mdt0(ostid->oi_seq)) {
- /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
- * that we map into the IDIF namespace. It allows up to 2^48
- * objects per OST, as this is the object namespace that has
- * been in production for years. This can handle create rates
- * of 1M objects/s/OST for 9 years, or combinations thereof. */
- if (ostid->oi_id >= IDIF_MAX_OID) {
- CERROR("bad MDT0 id, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
- ostid->oi_seq, ostid->oi_id, ost_idx);
- return -EBADF;
- }
- ostid_idif_unpack(ostid, fid, ost_idx);
-
- } else if (fid_seq_is_rsvd(ostid->oi_seq)) {
- /* These are legacy OST objects for LLOG/ECHO and CMD testing.
- * We only support 2^32 objects in these groups, and cannot
- * uniquely identify them in the system (i.e. they are the
- * duplicated on all OSTs), but this is not strictly required
- * for the old object protocol, which has a separate ost_idx. */
- if (ostid->oi_id >= 0xffffffffULL) {
- CERROR("bad RSVD id, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
- ostid->oi_seq, ostid->oi_id, ost_idx);
- return -EBADF;
- }
- ostid_fid_unpack(ostid, fid);
-
- } else if (unlikely(fid_seq_is_igif(ostid->oi_seq))) {
- /* This is an MDT inode number, which should never collide with
- * proper OST object IDs, and is probably a broken filesystem */
- CERROR("bad IGIF, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
- ostid->oi_seq, ostid->oi_id, ost_idx);
- return -EBADF;
-
- } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
- /* This is either an IDIF object, which identifies objects across
- * all OSTs, or a regular FID. The IDIF namespace maps legacy
- * OST objects into the FID namespace. In both cases, we just
- * pass the FID through, no conversion needed. */
- ostid_fid_unpack(ostid, fid);
- }
+ if (fid_is_idif(&ostid->oi_fid))
+ return fid_idif_id(fid_seq(&ostid->oi_fid),
+ fid_oid(&ostid->oi_fid), 0);
- return 0;
+ return fid_oid(&ostid->oi_fid);
}
-/* pack an IDIF FID into an ostid (id/seq) for the wire/disk */
-static inline void ostid_idif_pack(const struct lu_fid *fid,
- struct ost_id *ostid)
-{
- ostid->oi_seq = FID_SEQ_OST_MDT0;
- ostid->oi_id = fid_idif_id(fid->f_seq, fid->f_oid, fid->f_ver);
+static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
+{
+ if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
+ oi->oi.oi_seq = seq;
+ } else {
+ oi->oi_fid.f_seq = seq;
+ /* Note: if f_oid + f_ver is zero, we need init it
+ * to be 1, otherwise, ostid_seq will treat this
+ * as old ostid (oi_seq == 0) */
+ if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
+ oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
+ }
}
-/* pack a non-IDIF FID into an ostid (id/seq) for the wire/disk */
-static inline void ostid_fid_pack(const struct lu_fid *fid,
- struct ost_id *ostid)
+static inline void ostid_set_seq_mdt0(struct ost_id *oi)
{
- ostid->oi_seq = fid_seq(fid);
- ostid->oi_id = fid_ver_oid(fid);
+ ostid_set_seq(oi, FID_SEQ_OST_MDT0);
}
-/* pack any OST FID into an ostid (id/seq) for the wire/disk */
-static inline int fid_ostid_pack(const struct lu_fid *fid,
- struct ost_id *ostid)
+static inline void ostid_set_seq_echo(struct ost_id *oi)
{
- if (unlikely(fid_seq_is_igif(fid->f_seq))) {
- CERROR("bad IGIF, "DFID"\n", PFID(fid));
- return -EBADF;
- }
-
- if (fid_is_idif(fid))
- ostid_idif_pack(fid, ostid);
- else
- ostid_fid_pack(fid, ostid);
-
- return 0;
+ ostid_set_seq(oi, FID_SEQ_ECHO);
}
-/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
-static inline obd_seq ostid_seq(struct ost_id *ostid)
+static inline void ostid_set_seq_llog(struct ost_id *oi)
{
- if (unlikely(fid_seq_is_igif(ostid->oi_seq)))
- CWARN("bad IGIF, oi_seq: "LPU64" oi_id: "LPX64"\n",
- ostid->oi_seq, ostid->oi_id);
+ ostid_set_seq(oi, FID_SEQ_LLOG);
+}
- if (unlikely(fid_seq_is_idif(ostid->oi_seq)))
- return FID_SEQ_OST_MDT0;
+/**
+ * Note: we need check oi_seq to decide where to set oi_id,
+ * so oi_seq should always be set ahead of oi_id.
+ */
+static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
+{
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Bad "LPU64" to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi.oi_id = oid;
+ } else {
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Bad "LPU64" to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi_fid.f_oid = oid;
+ }
+}
- return ostid->oi_seq;
+static inline void ostid_inc_id(struct ost_id *oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
+ CERROR("Bad inc "DOSTID"\n", POSTID(oi));
+ return;
+ }
+ oi->oi.oi_id++;
+ } else {
+ oi->oi_fid.f_oid++;
+ }
}
-/* extract OST objid from a wire ost_id (id/seq) pair */
-static inline obd_id ostid_id(struct ost_id *ostid)
+static inline void ostid_dec_id(struct ost_id *oi)
{
- if (ostid->oi_seq == FID_SEQ_OST_MDT0)
- return ostid->oi_id & IDIF_OID_MASK;
+ if (fid_seq_is_mdt0(ostid_seq(oi)))
+ oi->oi.oi_id--;
+ else
+ oi->oi_fid.f_oid--;
+}
+
+/**
+ * Unpack an OST object id/seq (group) into a FID. This is needed for
+ * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
+ * FIDs. Note that if an id/seq is already in FID/IDIF format it will
+ * be passed through unchanged. Only legacy OST objects in "group 0"
+ * will be mapped into the IDIF namespace so that they can fit into the
+ * struct lu_fid fields without loss. For reference see:
+ * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
+ */
+static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
+ __u32 ost_idx)
+{
+ if (ost_idx > 0xffff) {
+ CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
+ ost_idx);
+ return -EBADF;
+ }
+
+ if (fid_seq_is_mdt0(ostid_seq(ostid))) {
+ /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
+ * that we map into the IDIF namespace. It allows up to 2^48
+ * objects per OST, as this is the object namespace that has
+ * been in production for years. This can handle create rates
+ * of 1M objects/s/OST for 9 years, or combinations thereof. */
+ if (ostid_id(ostid) >= IDIF_MAX_OID) {
+ CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
+ return -EBADF;
+ }
+ fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
+ /* truncate to 32 bits by assignment */
+ fid->f_oid = ostid_id(ostid);
+ /* in theory, not currently used */
+ fid->f_ver = ostid_id(ostid) >> 48;
+ } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
+ /* This is either an IDIF object, which identifies objects across
+ * all OSTs, or a regular FID. The IDIF namespace maps legacy
+ * OST objects into the FID namespace. In both cases, we just
+ * pass the FID through, no conversion needed. */
+ if (ostid->oi_fid.f_ver != 0) {
+ CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
+ POSTID(ostid), ost_idx);
+ return -EBADF;
+ }
+ *fid = ostid->oi_fid;
+ }
- if (fid_seq_is_rsvd(ostid->oi_seq))
- return ostid->oi_id & OBIF_OID_MASK;
+ return 0;
+}
- if (fid_seq_is_idif(ostid->oi_seq))
- return fid_idif_id(ostid->oi_seq, ostid->oi_id, 0);
+/* pack any OST FID into an ostid (id/seq) for the wire/disk */
+static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
+{
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid)) {
+ ostid_set_seq_mdt0(ostid);
+ ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
+ fid_ver(fid)));
+ } else {
+ ostid->oi_fid = *fid;
+ }
+
+ return 0;
+}
- return ostid->oi_id;
+/* Check whether the fid is for LAST_ID */
+static inline int fid_is_last_id(const struct lu_fid *fid)
+{
+ return (fid_oid(fid) == 0);
}
/**
return fid_seq(fid);
}
-/**
- * Build igif from the inode number/generation.
- */
-#define LU_IGIF_BUILD(fid, ino, gen) \
-do { \
- fid->f_seq = ino; \
- fid->f_oid = gen; \
- fid->f_ver = 0; \
-} while(0)
-static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
-{
- LU_IGIF_BUILD(fid, ino, gen);
- LASSERT(fid_is_igif(fid));
-}
+extern void lustre_swab_ost_id(struct ost_id *oid);
/**
* Get inode generation from a igif.
return fid_oid(fid);
}
+/**
+ * Build igif from the inode number/generation.
+ */
+static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
+{
+ fid->f_seq = ino;
+ fid->f_oid = gen;
+ fid->f_ver = 0;
+}
+
/*
* Fids are transmitted across network (in the sender byte-ordering),
* and stored on disk in big-endian order.
*/
static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
- LASSERTF(fid_is_igif(src) || fid_ver(src) == 0, DFID"\n", PFID(src));
- dst->f_seq = cpu_to_le64(fid_seq(src));
- dst->f_oid = cpu_to_le32(fid_oid(src));
- dst->f_ver = cpu_to_le32(fid_ver(src));
+ dst->f_seq = cpu_to_le64(fid_seq(src));
+ dst->f_oid = cpu_to_le32(fid_oid(src));
+ dst->f_ver = cpu_to_le32(fid_ver(src));
}
static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
- dst->f_seq = le64_to_cpu(fid_seq(src));
- dst->f_oid = le32_to_cpu(fid_oid(src));
- dst->f_ver = le32_to_cpu(fid_ver(src));
- LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst));
+ dst->f_seq = le64_to_cpu(fid_seq(src));
+ dst->f_oid = le32_to_cpu(fid_oid(src));
+ dst->f_ver = le32_to_cpu(fid_ver(src));
}
static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
- LASSERTF(fid_is_igif(src) || fid_ver(src) == 0, DFID"\n", PFID(src));
- dst->f_seq = cpu_to_be64(fid_seq(src));
- dst->f_oid = cpu_to_be32(fid_oid(src));
- dst->f_ver = cpu_to_be32(fid_ver(src));
+ dst->f_seq = cpu_to_be64(fid_seq(src));
+ dst->f_oid = cpu_to_be32(fid_oid(src));
+ dst->f_ver = cpu_to_be32(fid_ver(src));
}
static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
{
- /* check that all fields are converted */
- CLASSERT(sizeof *src ==
- sizeof fid_seq(src) +
- sizeof fid_oid(src) + sizeof fid_ver(src));
- dst->f_seq = be64_to_cpu(fid_seq(src));
- dst->f_oid = be32_to_cpu(fid_oid(src));
- dst->f_ver = be32_to_cpu(fid_ver(src));
- LASSERTF(fid_is_igif(dst) || fid_ver(dst) == 0, DFID"\n", PFID(dst));
+ dst->f_seq = be64_to_cpu(fid_seq(src));
+ dst->f_oid = be32_to_cpu(fid_oid(src));
+ dst->f_ver = be32_to_cpu(fid_ver(src));
}
static inline int fid_is_sane(const struct lu_fid *fid)
{
- return
- fid != NULL &&
- ((fid_seq(fid) >= FID_SEQ_START && fid_oid(fid) != 0
- && fid_ver(fid) == 0) ||
- fid_is_igif(fid) || fid_seq_is_rsvd(fid_seq(fid)));
+ return fid != NULL &&
+ ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
+ fid_is_igif(fid) || fid_is_idif(fid) ||
+ fid_seq_is_rsvd(fid_seq(fid)));
}
static inline int fid_is_zero(const struct lu_fid *fid)
extern void lustre_swab_lu_fid(struct lu_fid *fid);
extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
-static inline int lu_fid_eq(const struct lu_fid *f0,
- const struct lu_fid *f1)
+static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
{
- /* Check that there is no alignment padding. */
- CLASSERT(sizeof *f0 ==
- sizeof f0->f_seq + sizeof f0->f_oid + sizeof f0->f_ver);
- LASSERTF((fid_is_igif(f0) || fid_is_idif(f0)) ||
- fid_ver(f0) == 0, DFID, PFID(f0));
- LASSERTF((fid_is_igif(f1) || fid_is_idif(f1)) ||
- fid_ver(f1) == 0, DFID, PFID(f1));
- return memcmp(f0, f1, sizeof *f0) == 0;
+ return memcmp(f0, f1, sizeof *f0) == 0;
}
#define __diff_normalize(val0, val1) \
__diff_normalize(fid_ver(f0), fid_ver(f1));
}
+static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
+ struct ost_id *dst_oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
+ dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
+ } else {
+ fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
+ }
+}
+
+static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
+ struct ost_id *dst_oi)
+{
+ if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
+ dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
+ } else {
+ fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
+ }
+}
+
/** @} lu_fid */
/** \defgroup lu_dir lu_dir
* enumeration.
*/
enum lu_dirent_attrs {
- LUDA_FID = 0x0001,
- LUDA_TYPE = 0x0002,
- LUDA_64BITHASH = 0x0004,
+ LUDA_FID = 0x0001,
+ LUDA_TYPE = 0x0002,
+ LUDA_64BITHASH = 0x0004,
+
+ /* The following attrs are used for MDT interanl only,
+ * not visible to client */
+
+ /* Verify the dirent consistency */
+ LUDA_VERIFY = 0x8000,
+ /* Only check but not repair the dirent inconsistency */
+ LUDA_VERIFY_DRYRUN = 0x4000,
+ /* The dirent has been repaired, or to be repaired (dryrun). */
+ LUDA_REPAIR = 0x2000,
+ /* The system is upgraded, has beed or to be repaired (dryrun). */
+ LUDA_UPGRADE = 0x1000,
+ /* Ignore this record, go to next directly. */
+ LUDA_IGNORE = 0x0800,
};
+#define LU_DIRENT_ATTRS_MASK 0xf800
+
/**
* Layout of readdir pages, as transmitted on wire.
*/
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
- * It's different than CFS_PAGE_SIZE because the client needs to
+ * It's different than PAGE_CACHE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
- * lu_dirpage header is if client and server CFS_PAGE_SIZE differ.
+ * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
-#define LU_PAGE_COUNT (1 << (CFS_PAGE_SHIFT - LU_PAGE_SHIFT))
+#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
/** @} lu_dir */
* RPC error properly */
#define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
* finer space reservation */
-#define OBD_CONNECT_NANOSEC_TIME 0x200000000000ULL /* nanosecond timestamps */
+#define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
+ * policy and 2.x server */
#define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
+#define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
#define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
+#define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
+#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
+#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* improved flock deadlock detection */
+
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
* flag value is not in use on some other branch. Please clear any such
* changes with senior engineers before starting to use a new flag. Then,
- * submit a small patch against EVERY branch that ONLY adds the new flag
- * and updates obd_connect_names[] for lprocfs_rd_connect_flags(), so it
+ * submit a small patch against EVERY branch that ONLY adds the new flag,
+ * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
+ * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
* can be approved and landed easily to reserve the flag for future use. */
/* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
OBD_CONNECT_EINPROGRESS | \
- OBD_CONNECT_LIGHTWEIGHT)
+ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
+ OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
+ OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
+ OBD_CONNECT_FLOCK_DEAD)
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
OBD_CONNECT_MAX_EASIZE | \
OBD_CONNECT_EINPROGRESS | \
- OBD_CONNECT_JOBSTATS | OBD_CONNECT_LIGHTWEIGHT)
+ OBD_CONNECT_JOBSTATS | \
+ OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
+ OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
+ OBD_CONNECT_PINGLESS)
#define ECHO_CONNECT_SUPPORTED (0)
#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
- OBD_CONNECT_MNE_SWAB)
+ OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
/* Features required for this version of the client to work with server */
#define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
* If we eventually have separate connect data for different types, which we
* almost certainly will, then perhaps we stick a union in here. */
struct obd_connect_data_v1 {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes */
+ __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
+ __u32 ocd_version; /* lustre release version number */
+ __u32 ocd_grant; /* initial cache grant amount (bytes) */
+ __u32 ocd_index; /* LOV index to connect to */
+ __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
__u64 ocd_ibits_known; /* inode bits this client understands */
__u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
__u8 ocd_inodespace; /* log2 of the per-inode space consumption */
};
struct obd_connect_data {
- __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
- __u32 ocd_version; /* lustre release version number */
- __u32 ocd_grant; /* initial cache grant amount (bytes) */
- __u32 ocd_index; /* LOV index to connect to */
- __u32 ocd_brw_size; /* Maximum BRW size in bytes */
+ __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
+ __u32 ocd_version; /* lustre release version number */
+ __u32 ocd_grant; /* initial cache grant amount (bytes) */
+ __u32 ocd_index; /* LOV index to connect to */
+ __u32 ocd_brw_size; /* Maximum BRW size in bytes */
__u64 ocd_ibits_known; /* inode bits this client understands */
__u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
__u8 ocd_inodespace; /* log2 of the per-inode space consumption */
#define LOV_MAGIC_V1_DEF 0x0CD10BD0
#define LOV_MAGIC_V3_DEF 0x0CD30BD0
-#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
-#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
-#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
-#define LOV_PATTERN_CMOBD 0x200
+#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
+#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
+#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
+#define LOV_PATTERN_CMOBD 0x200
+
+#define LOV_PATTERN_F_MASK 0xffff0000
+#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
+
+#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
+#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
#define lov_ost_data lov_ost_data_v1
struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
- __u64 l_object_id; /* OST object ID */
- __u64 l_object_seq; /* OST object seq number */
- __u32 l_ost_gen; /* generation of this l_ost_idx */
- __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
+ struct ost_id l_ost_oi; /* OST object ID */
+ __u32 l_ost_gen; /* generation of this l_ost_idx */
+ __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
};
#define lov_mds_md lov_mds_md_v1
struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
- __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- __u64 lmm_object_id; /* LOV object ID */
- __u64 lmm_object_seq; /* LOV object seq number */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- /* lmm_stripe_count used to be __u32 */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- __u16 lmm_layout_gen; /* layout generation number */
- struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
+ __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
+ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
+ struct ost_id lmm_oi; /* LOV object ID */
+ __u32 lmm_stripe_size; /* size of stripe in bytes */
+ /* lmm_stripe_count used to be __u32 */
+ __u16 lmm_stripe_count; /* num stripes in use for this object */
+ __u16 lmm_layout_gen; /* layout generation number */
+ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};
+/**
+ * Sigh, because pre-2.4 uses
+ * struct lov_mds_md_v1 {
+ * ........
+ * __u64 lmm_object_id;
+ * __u64 lmm_object_seq;
+ * ......
+ * }
+ * to identify the LOV(MDT) object, and lmm_object_seq will
+ * be normal_fid, which make it hard to combine these conversion
+ * to ostid_to FID. so we will do lmm_oi/fid conversion separately
+ *
+ * We can tell the lmm_oi by this way,
+ * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
+ * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
+ * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
+ * lmm_oi.f_ver = 0
+ *
+ * But currently lmm_oi/lsm_oi does not have any "real" usages,
+ * except for printing some information, and the user can always
+ * get the real FID from LMA, besides this multiple case check might
+ * make swab more complicate. So we will keep using id/seq for lmm_oi.
+ */
+
+static inline void fid_to_lmm_oi(const struct lu_fid *fid,
+ struct ost_id *oi)
+{
+ oi->oi.oi_id = fid_oid(fid);
+ oi->oi.oi_seq = fid_seq(fid);
+}
+
+static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
+{
+ oi->oi.oi_seq = seq;
+}
+
+static inline __u64 lmm_oi_id(struct ost_id *oi)
+{
+ return oi->oi.oi_id;
+}
+
+static inline __u64 lmm_oi_seq(struct ost_id *oi)
+{
+ return oi->oi.oi_seq;
+}
+
+static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
+ struct ost_id *src_oi)
+{
+ dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
+}
+
+static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
+ struct ost_id *src_oi)
+{
+ dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
+ dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
+}
+
/* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
#define MAX_MD_SIZE (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
#define XATTR_NAME_LINK "trusted.link"
#define XATTR_NAME_FID "trusted.fid"
#define XATTR_NAME_VERSION "trusted.version"
-
+#define XATTR_NAME_SOM "trusted.som"
+#define XATTR_NAME_HSM "trusted.hsm"
+#define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
- __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
- __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
- __u64 lmm_object_id; /* LOV object ID */
- __u64 lmm_object_seq; /* LOV object seq number */
- __u32 lmm_stripe_size; /* size of stripe in bytes */
- /* lmm_stripe_count used to be __u32 */
- __u16 lmm_stripe_count; /* num stripes in use for this object */
- __u16 lmm_layout_gen; /* layout generation number */
- char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
- struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
-};
+ __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
+ __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
+ struct ost_id lmm_oi; /* LOV object ID */
+ __u32 lmm_stripe_size; /* size of stripe in bytes */
+ /* lmm_stripe_count used to be __u32 */
+ __u16 lmm_stripe_count; /* num stripes in use for this object */
+ __u16 lmm_layout_gen; /* layout generation number */
+ char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
+ struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
+};
+
+static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
+{
+ if (lmm_magic == LOV_MAGIC_V3)
+ return sizeof(struct lov_mds_md_v3) +
+ stripes * sizeof(struct lov_ost_data_v1);
+ else
+ return sizeof(struct lov_mds_md_v1) +
+ stripes * sizeof(struct lov_ost_data_v1);
+}
#define OBD_MD_FLID (0x00000001ULL) /* object ID */
#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
-#define OBD_MD_MDTIDX (0x0000000800000000ULL) /* Get MDT index */
+#define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
- * under lock */
+ * under lock; for xattr
+ * requests means the
+ * client holds the lock */
#define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
#define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
+#define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
#define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
+#define OBD_MD_FLXATTRLOCKED OBD_MD_FLGETATTRLOCK
+#define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
+
/* don't forget obdo_fid which is way down at the bottom so it can
* come after the definition of llog_cookie */
+enum hss_valid {
+ HSS_SETMASK = 0x01,
+ HSS_CLEARMASK = 0x02,
+ HSS_ARCHIVE_ID = 0x04,
+};
+
+struct hsm_state_set {
+ __u32 hss_valid;
+ __u32 hss_archive_id;
+ __u64 hss_setmask;
+ __u64 hss_clearmask;
+};
+
+extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
extern void lustre_swab_obd_statfs (struct obd_statfs *os);
#define OST_MAX_PRECREATE 20000
struct obd_ioobj {
- obd_id ioo_id;
- obd_seq ioo_seq;
- __u32 ioo_type;
- __u32 ioo_bufcnt;
+ struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
+ __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
+ * now (PTLRPC_BULK_OPS_COUNT - 1) in
+ * high 16 bits in 2.4 and later */
+ __u32 ioo_bufcnt; /* number of niobufs for this object */
};
+#define IOOBJ_MAX_BRW_BITS 16
+#define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
+#define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
+#define ioobj_max_brw_set(ioo, num) \
+do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
+
extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
/* multiple of 8 bytes => can array */
do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
+struct ost_lvb_v1 {
+ __u64 lvb_size;
+ obd_time lvb_mtime;
+ obd_time lvb_atime;
+ obd_time lvb_ctime;
+ __u64 lvb_blocks;
+};
+
+extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+
struct ost_lvb {
- __u64 lvb_size;
- obd_time lvb_mtime;
- obd_time lvb_atime;
- obd_time lvb_ctime;
- __u64 lvb_blocks;
+ __u64 lvb_size;
+ obd_time lvb_mtime;
+ obd_time lvb_atime;
+ obd_time lvb_ctime;
+ __u64 lvb_blocks;
+ __u32 lvb_mtime_ns;
+ __u32 lvb_atime_ns;
+ __u32 lvb_ctime_ns;
+ __u32 lvb_padding;
};
+extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+
/*
* lquota data structures
*/
__u64 gl_ver; /* new index version */
__u64 gl_hardlimit; /* new hardlimit or qunit value */
__u64 gl_softlimit; /* new softlimit */
- __u64 gl_pad1;
+ __u64 gl_time;
__u64 gl_pad2;
};
#define gl_qunit gl_hardlimit /* current qunit value used when
__u64 lvb_pad1;
};
+extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+
/* LVB used with global quota lock */
#define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
/* opcodes */
typedef enum {
- MDS_GETATTR = 33,
- MDS_GETATTR_NAME = 34,
- MDS_CLOSE = 35,
- MDS_REINT = 36,
- MDS_READPAGE = 37,
- MDS_CONNECT = 38,
- MDS_DISCONNECT = 39,
- MDS_GETSTATUS = 40,
- MDS_STATFS = 41,
- MDS_PIN = 42,
- MDS_UNPIN = 43,
- MDS_SYNC = 44,
- MDS_DONE_WRITING = 45,
- MDS_SET_INFO = 46,
- MDS_QUOTACHECK = 47,
- MDS_QUOTACTL = 48,
- MDS_GETXATTR = 49,
- MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
- MDS_WRITEPAGE = 51,
- MDS_IS_SUBDIR = 52,
- MDS_GET_INFO = 53,
- MDS_LAST_OPC
+ MDS_GETATTR = 33,
+ MDS_GETATTR_NAME = 34,
+ MDS_CLOSE = 35,
+ MDS_REINT = 36,
+ MDS_READPAGE = 37,
+ MDS_CONNECT = 38,
+ MDS_DISCONNECT = 39,
+ MDS_GETSTATUS = 40,
+ MDS_STATFS = 41,
+ MDS_PIN = 42,
+ MDS_UNPIN = 43,
+ MDS_SYNC = 44,
+ MDS_DONE_WRITING = 45,
+ MDS_SET_INFO = 46,
+ MDS_QUOTACHECK = 47,
+ MDS_QUOTACTL = 48,
+ MDS_GETXATTR = 49,
+ MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
+ MDS_WRITEPAGE = 51,
+ MDS_IS_SUBDIR = 52,
+ MDS_GET_INFO = 53,
+ MDS_HSM_STATE_GET = 54,
+ MDS_HSM_STATE_SET = 55,
+ MDS_HSM_ACTION = 56,
+ MDS_HSM_PROGRESS = 57,
+ MDS_HSM_REQUEST = 58,
+ MDS_HSM_CT_REGISTER = 59,
+ MDS_HSM_CT_UNREGISTER = 60,
+ MDS_SWAP_LAYOUTS = 61,
+ MDS_LAST_OPC
} mds_cmd_t;
#define MDS_FIRST_OPC MDS_GETATTR
+
+/* opcodes for object update */
+typedef enum {
+ UPDATE_OBJ = 1000,
+ UPDATE_LAST_OPC
+} update_cmd_t;
+
+#define UPDATE_FIRST_OPC UPDATE_OBJ
+
/*
* Do not exceed 63
*/
typedef enum {
- REINT_SETATTR = 1,
- REINT_CREATE = 2,
- REINT_LINK = 3,
- REINT_UNLINK = 4,
- REINT_RENAME = 5,
- REINT_OPEN = 6,
- REINT_SETXATTR = 7,
-// REINT_CLOSE = 8,
+ REINT_SETATTR = 1,
+ REINT_CREATE = 2,
+ REINT_LINK = 3,
+ REINT_UNLINK = 4,
+ REINT_RENAME = 5,
+ REINT_OPEN = 6,
+ REINT_SETXATTR = 7,
+ REINT_RMENTRY = 8,
// REINT_WRITE = 9,
REINT_MAX
} mds_reint_t, mdt_reint_t;
#define DISP_ENQ_OPEN_REF 0x00800000
#define DISP_ENQ_CREATE_REF 0x01000000
#define DISP_OPEN_LOCK 0x02000000
+#define DISP_OPEN_LEASE 0x04000000
/* INODE LOCK PARTS */
#define MDS_INODELOCK_LOOKUP 0x000001 /* dentry, mode, owner, group */
#define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
#define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
#define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
+#define MDS_INODELOCK_PERM 0x000010 /* for permission */
+#define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
-/* Do not forget to increase MDS_INODELOCK_MAXSHIFT when adding new bits
- * XXX: MDS_INODELOCK_MAXSHIFT should be increased to 3 once the layout lock is
- * supported */
-#define MDS_INODELOCK_MAXSHIFT 2
+#define MDS_INODELOCK_MAXSHIFT 5
/* This FULL lock is useful to take on unlink sort of operations */
#define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
/* mdt_thread_info.mti_flags. */
enum md_op_flags {
- /* The flag indicates Size-on-MDS attributes are changed. */
- MF_SOM_CHANGE = (1 << 0),
- /* Flags indicates an epoch opens or closes. */
- MF_EPOCH_OPEN = (1 << 1),
- MF_EPOCH_CLOSE = (1 << 2),
- MF_MDC_CANCEL_FID1 = (1 << 3),
- MF_MDC_CANCEL_FID2 = (1 << 4),
- MF_MDC_CANCEL_FID3 = (1 << 5),
- MF_MDC_CANCEL_FID4 = (1 << 6),
- /* There is a pending attribute update. */
- MF_SOM_AU = (1 << 7),
- /* Cancel OST locks while getattr OST attributes. */
- MF_GETATTR_LOCK = (1 << 8),
+ /* The flag indicates Size-on-MDS attributes are changed. */
+ MF_SOM_CHANGE = (1 << 0),
+ /* Flags indicates an epoch opens or closes. */
+ MF_EPOCH_OPEN = (1 << 1),
+ MF_EPOCH_CLOSE = (1 << 2),
+ MF_MDC_CANCEL_FID1 = (1 << 3),
+ MF_MDC_CANCEL_FID2 = (1 << 4),
+ MF_MDC_CANCEL_FID3 = (1 << 5),
+ MF_MDC_CANCEL_FID4 = (1 << 6),
+ /* There is a pending attribute update. */
+ MF_SOM_AU = (1 << 7),
+ /* Cancel OST locks while getattr OST attributes. */
+ MF_GETATTR_LOCK = (1 << 8),
+ MF_GET_MDT_IDX = (1 << 9),
};
#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
}
#endif
+/* 64 possible states */
+enum md_transient_state {
+ MS_RESTORE = (1 << 0), /* restore is running */
+};
+
struct mdt_body {
struct lu_fid fid1;
struct lu_fid fid2;
obd_time ctime;
__u64 blocks; /* XID, in the case of MDS_READPAGE */
__u64 ioepoch;
- __u64 ino;
+ __u64 t_state; /* transient file state defined in
+ * enum md_transient_state
+ * was "ino" until 2.4.0 */
__u32 fsuid;
__u32 fsgid;
__u32 capability;
__u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
__u32 rdev;
__u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
- __u32 generation;
+ __u32 unused2; /* was "generation" until 2.4.0 */
__u32 suppgid;
__u32 eadatasize;
__u32 aclsize;
obd_time sa_ctime;
__u32 sa_attr_flags;
__u32 sa_mode;
- __u32 sa_padding_2;
+ __u32 sa_bias; /* some operation flags */
__u32 sa_padding_3;
__u32 sa_padding_4;
__u32 sa_padding_5;
* anymore, reserve this flags
* just for preventing such bit
* to be reused. */
-#define MDS_CREATE_RMT_ACL 01000000000 /* indicate create on remote server
- * with default ACL */
-#define MDS_CREATE_SLAVE_OBJ 02000000000 /* indicate create slave object
- * actually, this is for create, not
- * conflict with other open flags */
+
#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
* hsm restore) */
+#define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
+ unlinked */
+#define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
+ * delegation, succeed if it's not
+ * being opened with conflict mode.
+ */
+#define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
/* permission for create non-directory file */
#define MAY_CREATE (1 << 7)
/* lfs rgetfacl permission check */
#define MAY_RGETFACL (1 << 14)
-enum {
- MDS_CHECK_SPLIT = 1 << 0,
- MDS_CROSS_REF = 1 << 1,
- MDS_VTX_BYPASS = 1 << 2,
- MDS_PERM_BYPASS = 1 << 3,
- MDS_SOM = 1 << 4,
- MDS_QUOTA_IGNORE = 1 << 5,
- MDS_CLOSE_CLEANUP = 1 << 6,
- MDS_KEEP_ORPHAN = 1 << 7,
- MDS_RECOV_OPEN = 1 << 8,
+enum mds_op_bias {
+ MDS_CHECK_SPLIT = 1 << 0,
+ MDS_CROSS_REF = 1 << 1,
+ MDS_VTX_BYPASS = 1 << 2,
+ MDS_PERM_BYPASS = 1 << 3,
+ MDS_SOM = 1 << 4,
+ MDS_QUOTA_IGNORE = 1 << 5,
+ MDS_CLOSE_CLEANUP = 1 << 6,
+ MDS_KEEP_ORPHAN = 1 << 7,
+ MDS_RECOV_OPEN = 1 << 8,
+ MDS_DATA_MODIFIED = 1 << 9,
+ MDS_CREATE_VOLATILE = 1 << 10,
+ MDS_OWNEROVERRIDE = 1 << 11,
+ MDS_HSM_RELEASE = 1 << 12,
};
/* instance of mdt_reint_rec */
* extend cr_flags size without breaking 1.8 compat */
__u32 cr_flags_l; /* for use with open, low 32 bits */
__u32 cr_flags_h; /* for use with open, high 32 bits */
- __u32 cr_padding_3; /* rr_padding_3 */
+ __u32 cr_umask; /* umask for create */
__u32 cr_padding_4; /* rr_padding_4 */
};
* rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
*/
struct mdt_rec_reint {
- __u32 rr_opcode;
- __u32 rr_cap;
- __u32 rr_fsuid;
- __u32 rr_fsuid_h;
- __u32 rr_fsgid;
- __u32 rr_fsgid_h;
- __u32 rr_suppgid1;
- __u32 rr_suppgid1_h;
- __u32 rr_suppgid2;
- __u32 rr_suppgid2_h;
- struct lu_fid rr_fid1;
- struct lu_fid rr_fid2;
- obd_time rr_mtime;
- obd_time rr_atime;
- obd_time rr_ctime;
- __u64 rr_size;
- __u64 rr_blocks;
- __u32 rr_bias;
- __u32 rr_mode;
- __u32 rr_flags;
- __u32 rr_padding_2; /* also fix lustre_swab_mdt_rec_reint */
- __u32 rr_padding_3; /* also fix lustre_swab_mdt_rec_reint */
- __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
+ __u32 rr_opcode;
+ __u32 rr_cap;
+ __u32 rr_fsuid;
+ __u32 rr_fsuid_h;
+ __u32 rr_fsgid;
+ __u32 rr_fsgid_h;
+ __u32 rr_suppgid1;
+ __u32 rr_suppgid1_h;
+ __u32 rr_suppgid2;
+ __u32 rr_suppgid2_h;
+ struct lu_fid rr_fid1;
+ struct lu_fid rr_fid2;
+ obd_time rr_mtime;
+ obd_time rr_atime;
+ obd_time rr_ctime;
+ __u64 rr_size;
+ __u64 rr_blocks;
+ __u32 rr_bias;
+ __u32 rr_mode;
+ __u32 rr_flags;
+ __u32 rr_flags_h;
+ __u32 rr_umask;
+ __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
};
extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
__u64 name[RES_NAME_SIZE];
};
+#define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i
+#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
+ (res)->lr_name.name[2], (res)->lr_name.name[3]
+
extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
-/* Similarly to ldlm_wire_policy_data_t, there is one common swabber for all
- * LVB types. As a result, any new LVB structure must match the fields of the
- * ost_lvb structure. */
-union ldlm_wire_lvb {
- struct ost_lvb l_ost;
- struct lquota_lvb l_lquota;
-};
-
-extern void lustre_swab_lvb(union ldlm_wire_lvb *);
-
union ldlm_gl_desc {
struct ldlm_gl_lquota_desc lquota_desc;
};
extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
+#define ldlm_flags_to_wire(flags) ((__u32)(flags))
+#define ldlm_flags_from_wire(flags) ((__u64)(flags))
+
/*
* Opcodes for mountconf (mgs and mgc)
*/
/** Identifier for a single log object */
struct llog_logid {
- __u64 lgl_oid;
- __u64 lgl_oseq;
+ struct ost_id lgl_oi;
__u32 lgl_ogen;
} __attribute__((packed));
/* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
+ HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
} llog_op_type;
__u32 lrt_index;
};
+/* Where data follow just after header */
+#define REC_DATA(ptr) \
+ ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
+
+#define REC_DATA_LEN(rec) \
+ (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
+ sizeof(struct llog_rec_tail))
+
struct llog_logid_rec {
struct llog_rec_hdr lid_hdr;
struct llog_logid lid_id;
struct llog_setattr64_rec {
struct llog_rec_hdr lsr_hdr;
- obd_id lsr_oid;
- obd_seq lsr_oseq;
+ struct ost_id lsr_oi;
__u32 lsr_uid;
__u32 lsr_uid_h;
__u32 lsr_gid;
struct llog_rec_tail cur_tail;
} __attribute__((packed));
+enum agent_req_status {
+ ARS_WAITING,
+ ARS_STARTED,
+ ARS_FAILED,
+ ARS_CANCELED,
+ ARS_SUCCEED,
+};
+
+static inline char *agent_req_status2name(enum agent_req_status ars)
+{
+ switch (ars) {
+ case ARS_WAITING:
+ return "WAITING";
+ case ARS_STARTED:
+ return "STARTED";
+ case ARS_FAILED:
+ return "FAILED";
+ case ARS_CANCELED:
+ return "CANCELED";
+ case ARS_SUCCEED:
+ return "SUCCEED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline bool agent_req_in_final_state(enum agent_req_status ars)
+{
+ return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
+ (ars == ARS_CANCELED));
+}
+
+struct llog_agent_req_rec {
+ struct llog_rec_hdr arr_hdr; /**< record header */
+ __u32 arr_status; /**< status of the request */
+ /* must match enum
+ * agent_req_status */
+ __u32 arr_archive_id; /**< backend archive number */
+ __u64 arr_flags; /**< req flags */
+ __u64 arr_compound_id; /**< compound cookie */
+ __u64 arr_req_create; /**< req. creation time */
+ __u64 arr_req_change; /**< req. status change time */
+ struct hsm_action_item arr_hai; /**< req. to the agent */
+ struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
+} __attribute__((packed));
+
/* Old llog gen for compatibility */
struct llog_gen {
__u64 mnt_cnt;
/* Note: 64-bit types are 64-bit aligned in structure */
struct obdo {
obd_valid o_valid; /* hot fields in this obdo */
- struct ost_id o_oi;
+ struct ost_id o_oi;
obd_id o_parent_seq;
obd_size o_size; /* o_size-o_blocks == ost_lvb */
obd_time o_mtime;
obd_flag o_flags;
obd_count o_nlink; /* brw: checksum */
obd_count o_parent_oid;
- obd_count o_misc; /* brw: o_dropped */
+ obd_count o_misc; /* brw: o_dropped */
+
__u64 o_ioepoch; /* epoch in ost writes */
__u32 o_stripe_idx; /* holds stripe idx */
__u32 o_parent_ver;
* locks */
struct llog_cookie o_lcookie; /* destroy: unlink cookie from
* MDS */
- __u32 o_uid_h;
- __u32 o_gid_h;
+ __u32 o_uid_h;
+ __u32 o_gid_h;
- __u64 o_data_version; /* getattr: sum of iversion for
- * each stripe.
- * brw: grant space consumed on
- * the client for the write */
- __u64 o_padding_4;
- __u64 o_padding_5;
- __u64 o_padding_6;
+ __u64 o_data_version; /* getattr: sum of iversion for
+ * each stripe.
+ * brw: grant space consumed on
+ * the client for the write */
+ __u64 o_padding_4;
+ __u64 o_padding_5;
+ __u64 o_padding_6;
};
-#define o_id o_oi.oi_id
-#define o_seq o_oi.oi_seq
#define o_dirty o_blocks
#define o_undirty o_mode
#define o_dropped o_misc
#define o_cksum o_nlink
#define o_grant_used o_data_version
-static inline void lustre_set_wire_obdo(struct obdo *wobdo, struct obdo *lobdo)
-{
- memcpy(wobdo, lobdo, sizeof(*lobdo));
- wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
+static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
+ struct obdo *wobdo,
+ const struct obdo *lobdo)
+{
+ *wobdo = *lobdo;
+ wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
+ if (ocd == NULL)
+ return;
+
+ if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
+ fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
+ /* Currently OBD_FL_OSTID will only be used when 2.4 echo
+ * client communicate with pre-2.4 server */
+ wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
+ wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
+ }
}
-static inline void lustre_get_wire_obdo(struct obdo *lobdo, struct obdo *wobdo)
+static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
+ struct obdo *lobdo,
+ const struct obdo *wobdo)
{
obd_flag local_flags = 0;
if (lobdo->o_valid & OBD_MD_FLFLAGS)
local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
- LASSERT(!(wobdo->o_flags & OBD_FL_LOCAL_MASK));
-
- memcpy(lobdo, wobdo, sizeof(*lobdo));
- if (local_flags != 0) {
- lobdo->o_valid |= OBD_MD_FLFLAGS;
- lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
- lobdo->o_flags |= local_flags;
- }
+ *lobdo = *wobdo;
+ if (local_flags != 0) {
+ lobdo->o_valid |= OBD_MD_FLFLAGS;
+ lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
+ lobdo->o_flags |= local_flags;
+ }
+ if (ocd == NULL)
+ return;
+
+ if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
+ fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
+ /* see above */
+ lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
+ lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
+ lobdo->o_oi.oi_fid.f_ver = 0;
+ }
}
extern void lustre_swab_obdo (struct obdo *o);
extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+extern void lustre_swab_llog_id(struct llog_logid *lid);
struct lustre_cfg;
extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
* For the time being, we only support fixed-size key & record. */
char lip_entries[0];
};
+extern void lustre_swab_lip_header(struct lu_idxpage *lip);
#define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
void lustre_swab_fid2path (struct getinfo_fid2path *gf);
+enum {
+ LAYOUT_INTENT_ACCESS = 0,
+ LAYOUT_INTENT_READ = 1,
+ LAYOUT_INTENT_WRITE = 2,
+ LAYOUT_INTENT_GLIMPSE = 3,
+ LAYOUT_INTENT_TRUNC = 4,
+ LAYOUT_INTENT_RELEASE = 5,
+ LAYOUT_INTENT_RESTORE = 6
+};
+
+/* enqueue layout lock with intent */
+struct layout_intent {
+ __u32 li_opc; /* intent operation for enqueue, read, write etc */
+ __u32 li_flags;
+ __u64 li_start;
+ __u64 li_end;
+};
+
+void lustre_swab_layout_intent(struct layout_intent *li);
+
+/**
+ * On the wire version of hsm_progress structure.
+ *
+ * Contains the userspace hsm_progress and some internal fields.
+ */
+struct hsm_progress_kernel {
+ /* Field taken from struct hsm_progress */
+ lustre_fid hpk_fid;
+ __u64 hpk_cookie;
+ struct hsm_extent hpk_extent;
+ __u16 hpk_flags;
+ __u16 hpk_errval; /* positive val */
+ __u32 hpk_padding1;
+ /* Additional fields */
+ __u64 hpk_data_version;
+ __u64 hpk_padding2;
+} __attribute__((packed));
+
+extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+extern void lustre_swab_hsm_request(struct hsm_request *hr);
+
+/**
+ * These are object update opcode under UPDATE_OBJ, which is currently
+ * being used by cross-ref operations between MDT.
+ *
+ * During the cross-ref operation, the Master MDT, which the client send the
+ * request to, will disassembly the operation into object updates, then OSP
+ * will send these updates to the remote MDT to be executed.
+ *
+ * Update request format
+ * magic: UPDATE_BUFFER_MAGIC_V1
+ * Count: How many updates in the req.
+ * bufs[0] : following are packets of object.
+ * update[0]:
+ * type: object_update_op, the op code of update
+ * fid: The object fid of the update.
+ * lens/bufs: other parameters of the update.
+ * update[1]:
+ * type: object_update_op, the op code of update
+ * fid: The object fid of the update.
+ * lens/bufs: other parameters of the update.
+ * ..........
+ * update[7]: type: object_update_op, the op code of update
+ * fid: The object fid of the update.
+ * lens/bufs: other parameters of the update.
+ * Current 8 maxim updates per object update request.
+ *
+ *******************************************************************
+ * update reply format:
+ *
+ * ur_version: UPDATE_REPLY_V1
+ * ur_count: The count of the reply, which is usually equal
+ * to the number of updates in the request.
+ * ur_lens: The reply lengths of each object update.
+ *
+ * replies: 1st update reply [4bytes_ret: other body]
+ * 2nd update reply [4bytes_ret: other body]
+ * .....
+ * nth update reply [4bytes_ret: other body]
+ *
+ * For each reply of the update, the format would be
+ * result(4 bytes):Other stuff
+ */
+
+#define UPDATE_MAX_OPS 10
+#define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
+#define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
+#define UPDATE_BUF_COUNT 8
+enum object_update_op {
+ OBJ_CREATE = 1,
+ OBJ_DESTROY = 2,
+ OBJ_REF_ADD = 3,
+ OBJ_REF_DEL = 4,
+ OBJ_ATTR_SET = 5,
+ OBJ_ATTR_GET = 6,
+ OBJ_XATTR_SET = 7,
+ OBJ_XATTR_GET = 8,
+ OBJ_INDEX_LOOKUP = 9,
+ OBJ_INDEX_INSERT = 10,
+ OBJ_INDEX_DELETE = 11,
+ OBJ_LAST
+};
+
+struct update {
+ __u32 u_type;
+ __u32 u_batchid;
+ struct lu_fid u_fid;
+ __u32 u_lens[UPDATE_BUF_COUNT];
+ __u32 u_bufs[0];
+};
+
+struct update_buf {
+ __u32 ub_magic;
+ __u32 ub_count;
+ __u32 ub_bufs[0];
+};
+
+#define UPDATE_REPLY_V1 0x00BD0001
+struct update_reply {
+ __u32 ur_version;
+ __u32 ur_count;
+ __u32 ur_lens[0];
+};
+
+void lustre_swab_update_buf(struct update_buf *ub);
+void lustre_swab_update_reply_buf(struct update_reply *ur);
+
+/** layout swap request structure
+ * fid1 and fid2 are in mdt_body
+ */
+struct mdc_swap_layouts {
+ __u64 msl_flags;
+} __packed;
+
+void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
+
+struct close_data {
+ struct lustre_handle cd_handle;
+ struct lu_fid cd_fid;
+ __u64 cd_data_version;
+ __u64 cd_reserved[8];
+};
+
+void lustre_swab_close_data(struct close_data *data);
#endif
/** @} lustreidl */