* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#ifndef _LUSTRE_IDL_H_
#define _LUSTRE_IDL_H_
-#include <libcfs/libcfs.h>
+#include <libcfs/libcfs.h> /* for LASSERT, LPUX64, etc */
/* Defn's shared with user-space. */
#include <lustre/lustre_user.h>
-#include <lustre/ll_fiemap.h>
/*
* GENERAL STUFF
/* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
-#define SVC_KILLED 1
-#define SVC_EVENT 2
-#define SVC_SIGNAL 4
-#define SVC_RUNNING 8
-#define SVC_STOPPING 16
-#define SVC_STOPPED 32
-
/* packet types */
#define PTL_RPC_MSG_REQUEST 4711
#define PTL_RPC_MSG_ERR 4712
typedef __u32 mdsno_t;
typedef __u64 seqno_t;
+typedef __u64 obd_id;
+typedef __u64 obd_seq;
+typedef __s64 obd_time;
+typedef __u64 obd_size;
+typedef __u64 obd_off;
+typedef __u64 obd_blocks;
+typedef __u64 obd_valid;
+typedef __u32 obd_blksize;
+typedef __u32 obd_mode;
+typedef __u32 obd_uid;
+typedef __u32 obd_gid;
+typedef __u32 obd_flag;
+typedef __u32 obd_count;
/**
* Describes a range of sequence, lsr_start is included but lsr_end is
* not in the range.
- * Same structure is used in fld module where lsr_mdt field holds mdt id
+ * Same structure is used in fld module where lsr_index field holds mdt id
* of the home mdt.
*/
+#define LU_SEQ_RANGE_MDT 0x0
+#define LU_SEQ_RANGE_OST 0x1
+
struct lu_seq_range {
__u64 lsr_start;
__u64 lsr_end;
- __u32 lsr_mdt;
- __u32 lsr_padding;
+ __u32 lsr_index;
+ __u32 lsr_flags;
};
/**
static inline void range_init(struct lu_seq_range *range)
{
- range->lsr_start = range->lsr_end = range->lsr_mdt = 0;
+ range->lsr_start = range->lsr_end = range->lsr_index = 0;
}
/**
return s >= range->lsr_start && s < range->lsr_end;
}
-/**
- * allocate \a w units of sequence from range \a from.
- */
-static inline void range_alloc(struct lu_seq_range *to,
- struct lu_seq_range *from,
- __u64 width)
-{
- to->lsr_start = from->lsr_start;
- to->lsr_end = from->lsr_start + width;
- from->lsr_start += width;
-}
-
static inline int range_is_sane(const struct lu_seq_range *range)
{
return (range->lsr_end >= range->lsr_start);
return range_space(range) == 0;
}
-#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x"
+/* return 0 if two range have the same location */
+static inline int range_compare_loc(const struct lu_seq_range *r1,
+ const struct lu_seq_range *r2)
+{
+ return r1->lsr_index != r2->lsr_index ||
+ r1->lsr_flags != r2->lsr_flags;
+}
+
+#define DRANGE "[%#16.16"LPF64"x-%#16.16"LPF64"x):%x:%x"
#define PRANGE(range) \
(range)->lsr_start, \
(range)->lsr_end, \
- (range)->lsr_mdt
+ (range)->lsr_index, \
+ (range)->lsr_flags
/** \defgroup lu_fid lu_fid
* @{ */
}
};
+/* This is the maximum number of MDTs allowed in CMD testing until such
+ * a time that FID-on-OST is implemented. This is due to the limitations
+ * of packing non-0-MDT numbers into the FID SEQ namespace. Once FID-on-OST
+ * is implemented this limit will be virtually unlimited. */
+#define MAX_MDT_COUNT 8
+
/**
* fid constants
memset(fid, 0, sizeof(*fid));
}
-/* Normal FID sequence starts from this value, i.e. 1<<33 */
-#define FID_SEQ_START 0x200000000ULL
+static inline obd_id fid_ver_oid(const struct lu_fid *fid)
+{
+ return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
+}
+
+/**
+ * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
+ * inodes in the IGIF namespace, so these reserved SEQ numbers can be
+ * used for other purposes and not risk collisions with existing inodes.
+ *
+ * Different FID Format
+ * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
+ */
+enum fid_seq {
+ FID_SEQ_OST_MDT0 = 0,
+ FID_SEQ_LLOG = 1,
+ FID_SEQ_ECHO = 2,
+ FID_SEQ_OST_MDT1 = 3,
+ FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
+ FID_SEQ_RSVD = 11,
+ FID_SEQ_IGIF = 12,
+ FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
+ FID_SEQ_IDIF = 0x100000000ULL,
+ FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
+ /* Normal FID sequence starts from this value, i.e. 1<<33 */
+ FID_SEQ_START = 0x200000000ULL,
+ FID_SEQ_LOCAL_FILE = 0x200000001ULL,
+ FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
+ FID_SEQ_NORMAL = 0x200000400ULL
+};
+
+#define OBIF_OID_MAX_BITS 32
+#define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
+#define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
+#define IDIF_OID_MAX_BITS 48
+#define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
+#define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
+
+
+static inline int fid_seq_is_mdt0(obd_seq seq)
+{
+ return (seq == FID_SEQ_OST_MDT0);
+}
+
+static inline int fid_seq_is_cmd(const __u64 seq)
+{
+ return (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX);
+};
+
+static inline int fid_seq_is_mdt(const __u64 seq)
+{
+ return seq == FID_SEQ_OST_MDT0 ||
+ (seq >= FID_SEQ_OST_MDT1 && seq <= FID_SEQ_OST_MAX);
+};
-/* IDIF sequence starts from this value, i.e. 1<<32 */
-#define IDIF_SEQ_START 0x100000000ULL
+static inline int fid_seq_is_rsvd(const __u64 seq)
+{
+ return seq <= FID_SEQ_RSVD;
+};
+
+static inline int fid_is_mdt0(const struct lu_fid *fid)
+{
+ return fid_seq_is_mdt0(fid_seq(fid));
+}
/**
* Check if a fid is igif or not.
* \param fid the fid to be tested.
* \return true if the fid is a igif; otherwise false.
*/
+static inline int fid_seq_is_igif(const __u64 seq)
+{
+ return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
+}
+
static inline int fid_is_igif(const struct lu_fid *fid)
{
- return fid_seq(fid) > 0 && fid_seq(fid) < IDIF_SEQ_START;
+ return fid_seq_is_igif(fid_seq(fid));
}
/**
* \param fid the fid to be tested.
* \return true if the fid is a idif; otherwise false.
*/
+static inline int fid_seq_is_idif(const __u64 seq)
+{
+ return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
+}
+
static inline int fid_is_idif(const struct lu_fid *fid)
{
- return fid_seq(fid) >= IDIF_SEQ_START && fid_seq(fid) < FID_SEQ_START;
+ return fid_seq_is_idif(fid_seq(fid));
+}
+
+struct ost_id {
+ obd_id oi_id;
+ obd_seq oi_seq;
+};
+
+static inline int fid_seq_is_norm(const __u64 seq)
+{
+ return (seq >= FID_SEQ_NORMAL);
+}
+
+static inline int fid_is_norm(const struct lu_fid *fid)
+{
+ return fid_seq_is_norm(fid_seq(fid));
+}
+
+/* convert an OST objid into an IDIF FID SEQ number */
+static inline obd_seq fid_idif_seq(obd_id id, __u32 ost_idx)
+{
+ return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
+}
+
+/* convert a packed IDIF FID into an OST objid */
+static inline obd_id fid_idif_id(obd_seq seq, __u32 oid, __u32 ver)
+{
+ return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
+}
+
+/* unpack an ostid (id/seq) from a wire/disk structure into an IDIF FID */
+static inline void ostid_idif_unpack(struct ost_id *ostid,
+ struct lu_fid *fid, __u32 ost_idx)
+{
+ fid->f_seq = fid_idif_seq(ostid->oi_id, ost_idx);
+ fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */
+ fid->f_ver = ostid->oi_id >> 48; /* in theory, not currently used */
+}
+
+/* unpack an ostid (id/seq) from a wire/disk structure into a non-IDIF FID */
+static inline void ostid_fid_unpack(struct ost_id *ostid, struct lu_fid *fid)
+{
+ fid->f_seq = ostid->oi_seq;
+ fid->f_oid = ostid->oi_id; /* truncate to 32 bits by assignment */
+ fid->f_ver = ostid->oi_id >> 32; /* in theory, not currently used */
+}
+
+/* Unpack an OST object id/seq (group) into a FID. This is needed for
+ * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
+ * FIDs. Note that if an id/seq is already in FID/IDIF format it will
+ * be passed through unchanged. Only legacy OST objects in "group 0"
+ * will be mapped into the IDIF namespace so that they can fit into the
+ * struct lu_fid fields without loss. For reference see:
+ * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
+ */
+static inline int fid_ostid_unpack(struct lu_fid *fid, struct ost_id *ostid,
+ __u32 ost_idx)
+{
+ if (ost_idx > 0xffff) {
+ CERROR("bad ost_idx, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
+ ostid->oi_seq, ostid->oi_id, ost_idx);
+ return -EBADF;
+ }
+
+ if (fid_seq_is_mdt0(ostid->oi_seq)) {
+ /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
+ * that we map into the IDIF namespace. It allows up to 2^48
+ * objects per OST, as this is the object namespace that has
+ * been in production for years. This can handle create rates
+ * of 1M objects/s/OST for 9 years, or combinations thereof. */
+ if (ostid->oi_id >= IDIF_MAX_OID) {
+ CERROR("bad MDT0 id, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
+ ostid->oi_seq, ostid->oi_id, ost_idx);
+ return -EBADF;
+ }
+ ostid_idif_unpack(ostid, fid, ost_idx);
+
+ } else if (fid_seq_is_rsvd(ostid->oi_seq)) {
+ /* These are legacy OST objects for LLOG/ECHO and CMD testing.
+ * We only support 2^32 objects in these groups, and cannot
+ * uniquely identify them in the system (i.e. they are the
+ * duplicated on all OSTs), but this is not strictly required
+ * for the old object protocol, which has a separate ost_idx. */
+ if (ostid->oi_id >= 0xffffffffULL) {
+ CERROR("bad RSVD id, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
+ ostid->oi_seq, ostid->oi_id, ost_idx);
+ return -EBADF;
+ }
+ ostid_fid_unpack(ostid, fid);
+
+ } else if (unlikely(fid_seq_is_igif(ostid->oi_seq))) {
+ /* This is an MDT inode number, which should never collide with
+ * proper OST object IDs, and is probably a broken filesystem */
+ CERROR("bad IGIF, seq:"LPU64" id:"LPU64" ost_idx:%u\n",
+ ostid->oi_seq, ostid->oi_id, ost_idx);
+ return -EBADF;
+
+ } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
+ /* This is either an IDIF object, which identifies objects across
+ * all OSTs, or a regular FID. The IDIF namespace maps legacy
+ * OST objects into the FID namespace. In both cases, we just
+ * pass the FID through, no conversion needed. */
+ ostid_fid_unpack(ostid, fid);
+ }
+
+ return 0;
+}
+
+/* pack an IDIF FID into an ostid (id/seq) for the wire/disk */
+static inline void ostid_idif_pack(struct lu_fid *fid, struct ost_id *ostid)
+{
+ ostid->oi_seq = FID_SEQ_OST_MDT0;
+ ostid->oi_id = fid_idif_id(fid->f_seq, fid->f_oid, fid->f_ver);
+}
+
+/* pack a non-IDIF FID into an ostid (id/seq) for the wire/disk */
+static inline void ostid_fid_pack(struct lu_fid *fid, struct ost_id *ostid)
+{
+ ostid->oi_seq = fid_seq(fid);
+ ostid->oi_id = fid_ver_oid(fid);
+}
+
+/* pack any OST FID into an ostid (id/seq) for the wire/disk */
+static inline int fid_ostid_pack(struct lu_fid *fid, struct ost_id *ostid)
+{
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid))
+ ostid_idif_pack(fid, ostid);
+ else
+ ostid_fid_pack(fid, ostid);
+
+ return 0;
+}
+
+/* extract OST sequence (group) from a wire ost_id (id/seq) pair */
+static inline obd_seq ostid_seq(struct ost_id *ostid)
+{
+ if (unlikely(fid_seq_is_igif(ostid->oi_seq)))
+ CWARN("bad IGIF, oi_seq: "LPU64" oi_id: "LPX64"\n",
+ ostid->oi_seq, ostid->oi_id);
+
+ if (unlikely(fid_seq_is_idif(ostid->oi_seq)))
+ return FID_SEQ_OST_MDT0;
+
+ return ostid->oi_seq;
+}
+
+/* extract OST objid from a wire ost_id (id/seq) pair */
+static inline obd_id ostid_id(struct ost_id *ostid)
+{
+ if (ostid->oi_seq == FID_SEQ_OST_MDT0)
+ return ostid->oi_id & IDIF_OID_MASK;
+
+ if (fid_seq_is_rsvd(ostid->oi_seq))
+ return ostid->oi_id & OBIF_OID_MASK;
+
+ if (fid_seq_is_idif(ostid->oi_seq))
+ return fid_idif_id(ostid->oi_seq, ostid->oi_id, 0);
+
+ return ostid->oi_id;
}
/**
}
/**
+ * Build igif from the inode number/generation.
+ */
+#define LU_IGIF_BUILD(fid, ino, gen) \
+do { \
+ fid->f_seq = ino; \
+ fid->f_oid = gen; \
+ fid->f_ver = 0; \
+} while(0)
+static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
+{
+ LU_IGIF_BUILD(fid, ino, gen);
+ LASSERT(fid_is_igif(fid));
+}
+
+/**
* Get inode generation from a igif.
* \param fid a igif to get inode generation from.
* \return inode generation for the igif.
/* flags for lm_flags */
#define MSGHDR_AT_SUPPORT 0x1
+#define MSGHDR_CKSUM_INCOMPAT18 0x2
#define lustre_msg lustre_msg_v2
/* we depend on this structure to be 8-byte aligned */
#define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
#define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
#define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
+#define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
+#define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
+#define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client supports layout lock */
/* also update obd_connect_names[] for lprocfs_rd_connect_flags()
* and lustre/utils/wirecheck.c */
OBD_CONNECT_MDS_CAPA | OBD_CONNECT_OSS_CAPA | \
OBD_CONNECT_MDS_MDS | OBD_CONNECT_FID | \
LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_VBR | \
- OBD_CONNECT_LOV_V3 | OBD_CONNECT_SOM)
+ OBD_CONNECT_LOV_V3 | OBD_CONNECT_SOM | \
+ OBD_CONNECT_FULL20)
#define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
OBD_CONNECT_OSS_CAPA | OBD_CONNECT_RMT_CLIENT | \
OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
- OBD_CONNECT_GRANT_SHRINK)
+ OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20)
#define ECHO_CONNECT_SUPPORTED (0)
-#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT)
+#define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
+ OBD_CONNECT_FULL20)
#define OBD_OCD_VERSION(major,minor,patch,fix) (((major)<<24) + ((minor)<<16) +\
((patch)<<8) + (fix))
} ost_cmd_t;
#define OST_FIRST_OPC OST_REPLY
-typedef __u64 obd_id;
-typedef __u64 obd_gr;
-typedef __u64 obd_time;
-typedef __u64 obd_size;
-typedef __u64 obd_off;
-typedef __u64 obd_blocks;
-typedef __u64 obd_valid;
-typedef __u32 obd_blksize;
-typedef __u32 obd_mode;
-typedef __u32 obd_uid;
-typedef __u32 obd_gid;
-typedef __u32 obd_flag;
-typedef __u32 obd_count;
-
enum obdo_flags {
OBD_FL_INLINEDATA = 0x00000001,
OBD_FL_OBDMDEXISTS = 0x00000002,
OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
- OBD_FL_TRUNCLOCK = 0x00000800, /* delegate DLM locking during punch*/
+ OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
OBD_FL_CKSUM_RSVD1 = 0x00004000, /* for future cksum types */
OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
+ OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client */
OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER,
/* mask for local-only flag, which won't be sent over network */
OBD_FL_LOCAL_MASK = 0xF0000000,
- /* temporary OBDO used by osc_brw_async (see bug 18364) */
- OBD_FL_TEMPORARY = 0x10000000,
};
#define LOV_MAGIC_V1 0x0BD10BD0
#define lov_ost_data lov_ost_data_v1
struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
__u64 l_object_id; /* OST object ID */
- __u64 l_object_gr; /* OST object group (creating MDS number) */
+ __u64 l_object_seq; /* OST object seq number */
__u32 l_ost_gen; /* generation of this l_ost_idx */
__u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
};
__u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
__u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
__u64 lmm_object_id; /* LOV object ID */
- __u64 lmm_object_gr; /* LOV object group */
+ __u64 lmm_object_seq; /* LOV object seq number */
__u32 lmm_stripe_size; /* size of stripe in bytes */
__u32 lmm_stripe_count; /* num stripes in use for this object */
struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
__u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
__u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
__u64 lmm_object_id; /* LOV object ID */
- __u64 lmm_object_gr; /* LOV object group */
+ __u64 lmm_object_seq; /* LOV object seq number */
__u32 lmm_stripe_size; /* size of stripe in bytes */
__u32 lmm_stripe_count; /* num stripes in use for this object */
char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
#define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
#define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
#define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
+#define OBD_MD_MDTIDX (0x0000000800000000ULL) /* Get MDT index */
#define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
#define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
#define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
#define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
#define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
-
+#define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
+ * under lock */
#define OBD_FL_TRUNC (0x0000200000000000ULL) /* for filter_truncate */
#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
#define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
#define OBD_BRW_NOQUOTA 0x100
#define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
+#define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
#define OBD_OBJECT_EOF 0xffffffffffffffffULL
struct obd_ioobj {
obd_id ioo_id;
- obd_gr ioo_gr;
+ obd_seq ioo_seq;
__u32 ioo_type;
__u32 ioo_bufcnt;
};
#define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
struct ost_lvb {
- __u64 lvb_size;
- __u64 lvb_mtime;
- __u64 lvb_atime;
- __u64 lvb_ctime;
- __u64 lvb_blocks;
+ __u64 lvb_size;
+ obd_time lvb_mtime;
+ obd_time lvb_atime;
+ obd_time lvb_ctime;
+ __u64 lvb_blocks;
};
extern void lustre_swab_ost_lvb(struct ost_lvb *);
MF_MDC_CANCEL_FID2 = (1 << 4),
MF_MDC_CANCEL_FID3 = (1 << 5),
MF_MDC_CANCEL_FID4 = (1 << 6),
+ /* There is a pending attribute update. */
+ MF_SOM_AU = (1 << 7),
+ /* Cancel OST locks while getattr OST attributes. */
+ MF_GETATTR_LOCK = (1 << 8),
};
#define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
#define MDS_BFLAG_UNCOMMITTED_WRITES 0x1
-#define MDS_BFLAG_EXT_FLAGS 0x80000000 /* == EXT3_RESERVED_FL */
/* these should be identical to their EXT3_*_FL counterparts, and are
* redefined here only to avoid dragging in ext3_fs.h */
#define MDS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
#ifdef __KERNEL__
-/* If MDS_BFLAG_IOC_FLAGS is set it means we requested EXT3_*_FL inode flags
- * and we need to decode these into local S_* flags in the inode. Otherwise
- * we pass flags straight through (see bug 9486). */
+/* Convert wire MDS_*_FL to corresponding client local VFS S_* values
+ * for the client inode i_flags. The MDS_*_FL are the Lustre wire
+ * protocol equivalents of LDISKFS_*_FL values stored on disk, while
+ * the S_* flags are kernel-internal values that change between kernel
+ * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
+ * See b=16526 for a full history. */
static inline int ll_ext_to_inode_flags(int flags)
{
- return (flags & MDS_BFLAG_EXT_FLAGS) ?
- (((flags & MDS_SYNC_FL) ? S_SYNC : 0) |
+ return (((flags & MDS_SYNC_FL) ? S_SYNC : 0) |
((flags & MDS_NOATIME_FL) ? S_NOATIME : 0) |
((flags & MDS_APPEND_FL) ? S_APPEND : 0) |
#if defined(S_DIRSYNC)
((flags & MDS_DIRSYNC_FL) ? S_DIRSYNC : 0) |
#endif
- ((flags & MDS_IMMUTABLE_FL) ? S_IMMUTABLE : 0)) :
- (flags & ~MDS_BFLAG_EXT_FLAGS);
+ ((flags & MDS_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
}
-/* If MDS_BFLAG_EXT_FLAGS is set it means we requested EXT3_*_FL inode flags
- * and we pass these straight through. Otherwise we need to convert from
- * S_* flags to their EXT3_*_FL equivalents (see bug 9486). */
-static inline int ll_inode_to_ext_flags(int oflags, int iflags)
+static inline int ll_inode_to_ext_flags(int iflags)
{
- return (oflags & MDS_BFLAG_EXT_FLAGS) ? (oflags & ~MDS_BFLAG_EXT_FLAGS):
- (((iflags & S_SYNC) ? MDS_SYNC_FL : 0) |
+ return (((iflags & S_SYNC) ? MDS_SYNC_FL : 0) |
((iflags & S_NOATIME) ? MDS_NOATIME_FL : 0) |
((iflags & S_APPEND) ? MDS_APPEND_FL : 0) |
#if defined(S_DIRSYNC)
struct lustre_handle handle;
__u64 valid;
__u64 size; /* Offset, in the case of MDS_READPAGE */
- __u64 mtime;
- __u64 atime;
- __u64 ctime;
+ obd_time mtime;
+ obd_time atime;
+ obd_time ctime;
__u64 blocks; /* XID, in the case of MDS_READPAGE */
__u64 io_epoch;
__u64 ino;
struct lustre_handle handle;
__u64 valid;
__u64 size; /* Offset, in the case of MDS_READPAGE */
- __u64 mtime;
- __u64 atime;
- __u64 ctime;
+ obd_time mtime;
+ obd_time atime;
+ obd_time ctime;
__u64 blocks; /* XID, in the case of MDS_READPAGE */
__u64 ioepoch;
__u64 ino; /* for 1.6 compatibility */
__u32 aclsize;
__u32 max_mdsize;
__u32 max_cookiesize;
- __u32 padding_4; /* also fix lustre_swab_mdt_body */
- __u64 padding_5;
+ __u32 uid_h; /* high 32-bits of uid, for FUID */
+ __u32 gid_h; /* high 32-bits of gid, for FUID */
+ __u32 padding_5; /* also fix lustre_swab_mdt_body */
__u64 padding_6;
__u64 padding_7;
__u64 padding_8;
#define LQUOTA_FLAGS_ADJINO 8UL /* adjust the inode qunit size */
#define LQUOTA_FLAGS_CHG_QS 16UL /* indicate whether it has capability of
* OBD_CONNECT_CHANGE_QS */
+#define LQUOTA_FLAGS_RECOVERY 32UL /* recovery is going on a uid/gid */
+#define LQUOTA_FLAGS_SETQUOTA 64UL /* being setquota on a uid/gid */
/* flags is specific for quota_adjust_qunit */
#define LQUOTA_QAQ_CREATE_LQS (1 << 31) /* when it is set, need create lqs */
__u32 rp_uid;
__u32 rp_gid;
__u32 rp_fsuid;
+ __u32 rp_fsuid_h;
__u32 rp_fsgid;
+ __u32 rp_fsgid_h;
__u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
};
__u32 sa_gid;
__u64 sa_size;
__u64 sa_blocks;
- __u64 sa_mtime;
- __u64 sa_atime;
- __u64 sa_ctime;
+ obd_time sa_mtime;
+ obd_time sa_atime;
+ obd_time sa_ctime;
__u32 sa_attr_flags;
__u32 sa_mode;
__u32 sa_padding_2;
#define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
#define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
#define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
+#define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
+#define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
+ * hsm restore) */
/* permission for create non-directory file */
#define MAY_CREATE (1 << 7)
MDS_CLOSE_CLEANUP = 1 << 6
};
+/* instance of mdt_reint_rec */
struct mdt_rec_create {
__u32 cr_opcode;
__u32 cr_cap;
struct lu_fid cr_fid1;
struct lu_fid cr_fid2;
struct lustre_handle cr_old_handle; /* handle in case of open replay */
- __u64 cr_time;
+ obd_time cr_time;
__u64 cr_rdev;
__u64 cr_ioepoch;
- __u64 cr_padding_1; /* pad for 64 bits*/
+ __u64 cr_padding_1; /* rr_blocks */
__u32 cr_mode;
__u32 cr_bias;
- __u32 cr_flags; /* for use with open */
- __u32 cr_padding_2;
- __u32 cr_padding_3;
- __u32 cr_padding_4;
+ /* use of helpers set/get_mrc_cr_flags() is needed to access
+ * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
+ * extend cr_flags size without breaking 1.8 compat */
+ __u32 cr_flags_l; /* for use with open, low 32 bits */
+ __u32 cr_flags_h; /* for use with open, high 32 bits */
+ __u32 cr_padding_3; /* rr_padding_3 */
+ __u32 cr_padding_4; /* rr_padding_4 */
};
+static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
+{
+ mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
+ mrc->cr_flags_h = (__u32)(flags >> 32);
+}
+
+static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
+{
+ return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
+}
+
+/* instance of mdt_reint_rec */
struct mdt_rec_link {
__u32 lk_opcode;
__u32 lk_cap;
__u32 lk_suppgid2_h;
struct lu_fid lk_fid1;
struct lu_fid lk_fid2;
- __u64 lk_time;
- __u64 lk_padding_1;
- __u64 lk_padding_2;
- __u64 lk_padding_3;
- __u64 lk_padding_4;
+ obd_time lk_time;
+ __u64 lk_padding_1; /* rr_atime */
+ __u64 lk_padding_2; /* rr_ctime */
+ __u64 lk_padding_3; /* rr_size */
+ __u64 lk_padding_4; /* rr_blocks */
__u32 lk_bias;
- __u32 lk_padding_5;
- __u32 lk_padding_6;
- __u32 lk_padding_7;
- __u32 lk_padding_8;
- __u32 lk_padding_9;
+ __u32 lk_padding_5; /* rr_mode */
+ __u32 lk_padding_6; /* rr_flags */
+ __u32 lk_padding_7; /* rr_padding_2 */
+ __u32 lk_padding_8; /* rr_padding_3 */
+ __u32 lk_padding_9; /* rr_padding_4 */
};
+/* instance of mdt_reint_rec */
struct mdt_rec_unlink {
__u32 ul_opcode;
__u32 ul_cap;
__u32 ul_suppgid2_h;
struct lu_fid ul_fid1;
struct lu_fid ul_fid2;
- __u64 ul_time;
- __u64 ul_padding_2;
- __u64 ul_padding_3;
- __u64 ul_padding_4;
- __u64 ul_padding_5;
+ obd_time ul_time;
+ __u64 ul_padding_2; /* rr_atime */
+ __u64 ul_padding_3; /* rr_ctime */
+ __u64 ul_padding_4; /* rr_size */
+ __u64 ul_padding_5; /* rr_blocks */
__u32 ul_bias;
__u32 ul_mode;
- __u32 ul_padding_6;
- __u32 ul_padding_7;
- __u32 ul_padding_8;
- __u32 ul_padding_9;
+ __u32 ul_padding_6; /* rr_flags */
+ __u32 ul_padding_7; /* rr_padding_2 */
+ __u32 ul_padding_8; /* rr_padding_3 */
+ __u32 ul_padding_9; /* rr_padding_4 */
};
+/* instance of mdt_reint_rec */
struct mdt_rec_rename {
__u32 rn_opcode;
__u32 rn_cap;
__u32 rn_suppgid2_h;
struct lu_fid rn_fid1;
struct lu_fid rn_fid2;
- __u64 rn_time;
- __u64 rn_padding_1;
- __u64 rn_padding_2;
- __u64 rn_padding_3;
- __u64 rn_padding_4;
- __u32 rn_bias; /* some operation flags */
- __u32 rn_mode; /* cross-ref rename has mode */
- __u32 rn_padding_5;
- __u32 rn_padding_6;
- __u32 rn_padding_7;
- __u32 rn_padding_8;
-};
-
+ obd_time rn_time;
+ __u64 rn_padding_1; /* rr_atime */
+ __u64 rn_padding_2; /* rr_ctime */
+ __u64 rn_padding_3; /* rr_size */
+ __u64 rn_padding_4; /* rr_blocks */
+ __u32 rn_bias; /* some operation flags */
+ __u32 rn_mode; /* cross-ref rename has mode */
+ __u32 rn_padding_5; /* rr_flags */
+ __u32 rn_padding_6; /* rr_padding_2 */
+ __u32 rn_padding_7; /* rr_padding_3 */
+ __u32 rn_padding_8; /* rr_padding_4 */
+};
+
+/* instance of mdt_reint_rec */
struct mdt_rec_setxattr {
__u32 sx_opcode;
__u32 sx_cap;
__u32 sx_suppgid2;
__u32 sx_suppgid2_h;
struct lu_fid sx_fid;
- __u64 sx_padding_1; /* These three members are lu_fid size */
+ __u64 sx_padding_1; /* These three are rr_fid2 */
__u32 sx_padding_2;
__u32 sx_padding_3;
__u64 sx_valid;
- __u64 sx_time;
- __u64 sx_padding_5;
- __u64 sx_padding_6;
- __u64 sx_padding_7;
+ obd_time sx_time;
+ __u64 sx_padding_5; /* rr_ctime */
+ __u64 sx_padding_6; /* rr_size */
+ __u64 sx_padding_7; /* rr_blocks */
__u32 sx_size;
__u32 sx_flags;
- __u32 sx_padding_8;
- __u32 sx_padding_9;
- __u32 sx_padding_10;
- __u32 sx_padding_11;
+ __u32 sx_padding_8; /* rr_flags */
+ __u32 sx_padding_9; /* rr_padding_2 */
+ __u32 sx_padding_10; /* rr_padding_3 */
+ __u32 sx_padding_11; /* rr_padding_4 */
};
+/*
+ * mdt_rec_reint is the template for all mdt_reint_xxx structures.
+ * Do NOT change the size of various members, otherwise the value
+ * will be broken in lustre_swab_mdt_rec_reint().
+ *
+ * If you add new members in other mdt_reint_xxx structres and need to use the
+ * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
+ */
struct mdt_rec_reint {
__u32 rr_opcode;
__u32 rr_cap;
__u32 rr_suppgid2_h;
struct lu_fid rr_fid1;
struct lu_fid rr_fid2;
- __u64 rr_mtime;
- __u64 rr_atime;
- __u64 rr_ctime;
+ obd_time rr_mtime;
+ obd_time rr_atime;
+ obd_time rr_ctime;
__u64 rr_size;
__u64 rr_blocks;
__u32 rr_bias;
__u32 rr_mode;
- __u32 rr_padding_1; /* also fix lustre_swab_mdt_rec_reint */
+ __u32 rr_flags;
__u32 rr_padding_2; /* also fix lustre_swab_mdt_rec_reint */
__u32 rr_padding_3; /* also fix lustre_swab_mdt_rec_reint */
__u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
__u32 cm_flags;
__u32 cm_vers; /* lustre release version number */
__u32 padding; /* 64 bit align */
- __u64 cm_createtime; /*when this record was first created */
- __u64 cm_canceltime; /*when this record is no longer valid*/
+ obd_time cm_createtime; /*when this record was first created */
+ obd_time cm_canceltime; /*when this record is no longer valid*/
char cm_tgtname[MTI_NAME_MAXLEN];
char cm_comment[MTI_NAME_MAXLEN];
};
/** Identifier for a single log object */
struct llog_logid {
__u64 lgl_oid;
- __u64 lgl_ogr;
+ __u64 lgl_oseq;
__u32 lgl_ogen;
} __attribute__((packed));
struct llog_rec_hdr lcr_hdr;
struct ll_fid lcr_fid;
obd_id lcr_oid;
- obd_count lcr_ogr;
+ obd_count lcr_oseq;
__u32 padding;
struct llog_rec_tail lcr_tail;
} __attribute__((packed));
struct llog_unlink_rec {
struct llog_rec_hdr lur_hdr;
obd_id lur_oid;
- obd_count lur_ogr;
+ obd_count lur_oseq;
obd_count lur_count;
struct llog_rec_tail lur_tail;
} __attribute__((packed));
struct llog_setattr_rec {
struct llog_rec_hdr lsr_hdr;
obd_id lsr_oid;
- obd_count lsr_ogr;
+ obd_count lsr_oseq;
__u32 lsr_uid;
__u32 lsr_gid;
__u32 padding;
struct llog_setattr64_rec {
struct llog_rec_hdr lsr_hdr;
obd_id lsr_oid;
- obd_count lsr_ogr;
+ obd_count lsr_oseq;
__u32 padding;
__u32 lsr_uid;
__u32 lsr_uid_h;
/** bits covering all \a changelog_rec_type's */
#define CHANGELOG_ALLMASK 0XFFFFFFFF
/** default \a changelog_rec_type mask */
-#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK
+#define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME)
/* changelog llog name, needed by client replicators */
#define CHANGELOG_CATALOG "changelog_catalog"
__u32 cs_id;
} __attribute__((packed));
-struct changelog_show {
- __u64 cs_startrec;
- __u32 cs_pid;
- __u32 cs_flags;
-} __attribute__((packed));
-
/** changelog record */
struct llog_changelog_rec {
struct llog_rec_hdr cr_hdr;
struct llog_log_hdr {
struct llog_rec_hdr llh_hdr;
- __u64 llh_timestamp;
+ obd_time llh_timestamp;
__u32 llh_count;
__u32 llh_bitmap_offset;
__u32 llh_size;
/* Note: 64-bit types are 64-bit aligned in structure */
struct obdo {
obd_valid o_valid; /* hot fields in this obdo */
- obd_id o_id;
- obd_gr o_gr;
- obd_id o_fid;
+ struct ost_id o_oi;
+ obd_id o_parent_seq;
obd_size o_size; /* o_size-o_blocks == ost_lvb */
obd_time o_mtime;
obd_time o_atime;
obd_gid o_gid;
obd_flag o_flags;
obd_count o_nlink; /* brw: checksum */
- obd_count o_generation;
+ obd_count o_parent_oid;
obd_count o_misc; /* brw: o_dropped */
__u64 o_ioepoch; /* epoch in ost writes */
__u32 o_stripe_idx; /* holds stripe idx */
- __u32 o_padding_1;
+ __u32 o_parent_ver;
struct lustre_handle o_handle; /* brw: lock handle to prolong locks */
struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS */
- __u64 o_padding_2;
+ __u32 o_uid_h;
+ __u32 o_gid_h;
__u64 o_padding_3;
__u64 o_padding_4;
__u64 o_padding_5;
__u64 o_padding_6;
};
+#define o_id o_oi.oi_id
+#define o_seq o_oi.oi_seq
#define o_dirty o_blocks
#define o_undirty o_mode
#define o_dropped o_misc
#define CAPA_HMAC_ALG_MASK 0xff000000
struct lustre_capa_key {
- __u64 lk_mdsid; /**< mds# */
+ __u64 lk_seq; /**< mds# */
__u32 lk_keyid; /**< key# */
__u32 lk_padding;
__u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
* Stored in this crazy struct for maximum packing and endian-neutrality
*/
struct link_ea_entry {
- struct lu_fid lee_parent_fid;
/** __u16 stored big-endian, unaligned */
- char lee_reclen[2];
- __u16 lee_padding;
+ unsigned char lee_reclen[2];
+ unsigned char lee_parent_fid[sizeof(struct lu_fid)];
char lee_name[0];
}__attribute__((packed));
void lustre_swab_fid2path (struct getinfo_fid2path *gf);
-extern void lustre_swab_lnlh(struct lnl_hdr *);
-
#endif
/** @} lustreidl */