*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#include <libcfs/libcfs.h>
-#include <lustre/lustre_idl.h>
+#include <uapi/linux/lustre/lustre_fid.h>
+#include <uapi/linux/lustre/lustre_idl.h>
+#include <uapi/linux/lustre/lustre_ostid.h>
struct lu_env;
struct lu_site;
extern const struct lu_fid LU_OBF_FID;
extern const struct lu_fid LU_LPF_FID;
extern const struct lu_fid LU_DOT_LUSTRE_FID;
+extern const struct lu_fid LU_BACKEND_LPF_FID;
enum {
/*
LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
};
-enum {
- /** 2^6 FIDs for OI containers */
- OSD_OI_FID_OID_BITS = 6,
- /** reserve enough FIDs in case we want more in the future */
- OSD_OI_FID_OID_BITS_MAX = 10,
-};
-
/** special OID for local objects */
enum local_oid {
/** \see fld_mod_init */
ACCT_GROUP_OID = 16UL,
LFSCK_BOOKMARK_OID = 17UL,
OTABLE_IT_OID = 18UL,
- /* These two definitions are obsolete
- * OFD_GROUP0_LAST_OID = 20UL,
- * OFD_GROUP4K_LAST_OID = 20UL+4096,
- */
+ OSD_LPF_OID = 19UL,
+ REPLY_DATA_OID = 21UL,
+ ACCT_PROJECT_OID = 22UL,
+ INDEX_BACKUP_OID = 4116UL,
OFD_LAST_GROUP_OID = 4117UL,
LLOG_CATALOGS_OID = 4118UL,
MGS_CONFIGS_OID = 4119UL,
MDD_LOV_OBJ_OSEQ = 4121UL,
LFSCK_NAMESPACE_OID = 4122UL,
REMOTE_PARENT_DIR_OID = 4123UL,
- SLAVE_LLOG_CATALOGS_OID = 4124UL,
+ /* This definition is obsolete
+ * SLAVE_LLOG_CATALOGS_OID = 4124UL,
+ */
+ BATCHID_COMMITTED_OID = 4125UL,
};
static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
static inline int fid_is_root(const struct lu_fid *fid)
{
return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
- fid_oid(fid) == 1));
+ fid_oid(fid) == FID_OID_ROOT));
}
static inline int fid_is_dot_lustre(const struct lu_fid *fid)
fid_oid(fid) == OTABLE_IT_OID);
}
+static inline int fid_oid_is_quota(const struct lu_fid *fid)
+{
+ switch (fid_oid(fid)) {
+ case ACCT_USER_OID:
+ case ACCT_GROUP_OID:
+ case ACCT_PROJECT_OID:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static inline int fid_is_acct(const struct lu_fid *fid)
{
return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
- (fid_oid(fid) == ACCT_USER_OID ||
- fid_oid(fid) == ACCT_GROUP_OID);
+ fid_oid_is_quota(fid);
}
static inline int fid_is_quota(const struct lu_fid *fid)
fid_seq_is_root(seq) || fid_seq_is_dot(seq);
}
+static inline void ost_layout_cpu_to_le(struct ost_layout *dst,
+ const struct ost_layout *src)
+{
+ dst->ol_stripe_size = __cpu_to_le32(src->ol_stripe_size);
+ dst->ol_stripe_count = __cpu_to_le32(src->ol_stripe_count);
+ dst->ol_comp_start = __cpu_to_le64(src->ol_comp_start);
+ dst->ol_comp_end = __cpu_to_le64(src->ol_comp_end);
+ dst->ol_comp_id = __cpu_to_le32(src->ol_comp_id);
+}
+
+static inline void ost_layout_le_to_cpu(struct ost_layout *dst,
+ const struct ost_layout *src)
+{
+ dst->ol_stripe_size = __le32_to_cpu(src->ol_stripe_size);
+ dst->ol_stripe_count = __le32_to_cpu(src->ol_stripe_count);
+ dst->ol_comp_start = __le64_to_cpu(src->ol_comp_start);
+ dst->ol_comp_end = __le64_to_cpu(src->ol_comp_end);
+ dst->ol_comp_id = __le32_to_cpu(src->ol_comp_id);
+}
+
+static inline void filter_fid_cpu_to_le(struct filter_fid *dst,
+ const struct filter_fid *src, int size)
+{
+ fid_cpu_to_le(&dst->ff_parent, &src->ff_parent);
+
+ if (size < sizeof(struct filter_fid)) {
+ memset(&dst->ff_layout, 0, sizeof(dst->ff_layout));
+ } else {
+ ost_layout_cpu_to_le(&dst->ff_layout, &src->ff_layout);
+ dst->ff_layout_version = cpu_to_le32(src->ff_layout_version);
+ dst->ff_range = cpu_to_le32(src->ff_range);
+ }
+
+ /* XXX: Add more if filter_fid is enlarged in the future. */
+}
+
+static inline void filter_fid_le_to_cpu(struct filter_fid *dst,
+ const struct filter_fid *src, int size)
+{
+ fid_le_to_cpu(&dst->ff_parent, &src->ff_parent);
+
+ if (size < sizeof(struct filter_fid)) {
+ memset(&dst->ff_layout, 0, sizeof(dst->ff_layout));
+ } else {
+ ost_layout_le_to_cpu(&dst->ff_layout, &src->ff_layout);
+ dst->ff_layout_version = le32_to_cpu(src->ff_layout_version);
+ dst->ff_range = le32_to_cpu(src->ff_range);
+ }
+
+ /* XXX: Add more if filter_fid is enlarged in the future. */
+}
+
static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
{
if (fid_seq_is_mdt0(seq)) {
fid->f_seq = fid_idif_seq(0, ost_idx);
} else {
LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
- fid_seq_is_idif(seq), LPX64"\n", seq);
+ fid_seq_is_idif(seq), "%#llx\n", seq);
fid->f_seq = seq;
}
fid->f_oid = 0;
*/
struct lu_seq_range lcs_space;
- /* Seq related proc */
- struct proc_dir_entry *lcs_proc_dir;
+ /* Seq related debugfs */
+ struct dentry *lcs_debugfs_entry;
/* This holds last allocated fid in last obtained seq */
struct lu_fid lcs_fid;
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
+ * use it with debugfs.
*/
char lcs_name[80];
/* /seq file object device */
struct dt_object *lss_obj;
- /* Seq related proc */
- struct proc_dir_entry *lss_proc_dir;
+ /* Seq related debugfs */
+ struct dentry *lss_debugfs_entry;
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
- /* Client interafce to request controller */
+ /* Client interface to request controller */
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
+ * use it with debugfs.
*/
char lss_name[80];
struct seq_server_site *lss_site;
};
+struct seq_server_site {
+ struct lu_site *ss_lu;
+ /**
+ * mds number of this site.
+ */
+ u32 ss_node_id;
+ /**
+ * Fid location database
+ */
+ struct lu_server_fld *ss_server_fld;
+ struct lu_client_fld *ss_client_fld;
+
+ /**
+ * Server Seq Manager
+ */
+ struct lu_server_seq *ss_server_seq;
+
+ /**
+ * Controller Seq Manager
+ */
+ struct lu_server_seq *ss_control_seq;
+ struct obd_export *ss_control_exp;
+
+ /**
+ * Client Seq Manager
+ */
+ struct lu_client_seq *ss_client_seq;
+};
+
/* Server methods */
int seq_server_init(const struct lu_env *env,
struct lu_server_seq *seq,
struct lu_client_seq *cli);
+int seq_server_check_and_alloc_super(const struct lu_env *env,
+ struct lu_server_seq *seq);
/* Client methods */
int seq_client_init(struct lu_client_seq *seq,
struct obd_export *exp,
}
}
+/**
+ * Note: we need check oi_seq to decide where to set oi_id,
+ * so oi_seq should always be set ahead of oi_id.
+ */
+static inline int ostid_set_id(struct ost_id *oi, __u64 oid)
+{
+ if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
+ if (oid >= IDIF_MAX_OID)
+ return -E2BIG;
+ oi->oi.oi_id = oid;
+ } else if (fid_is_idif(&oi->oi_fid)) {
+ if (oid >= IDIF_MAX_OID)
+ return -E2BIG;
+ oi->oi_fid.f_seq = fid_idif_seq(oid,
+ fid_idif_ost_idx(&oi->oi_fid));
+ oi->oi_fid.f_oid = oid;
+ oi->oi_fid.f_ver = oid >> 48;
+ } else {
+ if (oid >= OBIF_MAX_OID)
+ return -E2BIG;
+ oi->oi_fid.f_oid = oid;
+ }
+ return 0;
+}
+
+/* pack any OST FID into an ostid (id/seq) for the wire/disk */
+static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
+{
+ int rc = 0;
+
+ if (fid_seq_is_igif(fid->f_seq))
+ return -EBADF;
+
+ if (fid_is_idif(fid)) {
+ ostid_set_seq_mdt0(ostid);
+ rc = ostid_set_id(ostid, fid_idif_id(fid_seq(fid),
+ fid_oid(fid), fid_ver(fid)));
+ } else {
+ ostid->oi_fid = *fid;
+ }
+
+ return rc;
+}
+
/* The same as osc_build_res_name() */
static inline void ost_fid_build_resid(const struct lu_fid *fid,
struct ldlm_res_id *resname)
if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
/* old resid */
struct ost_id oi;
+
+ memset(&oi, 0, sizeof(oi));
ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
- ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
+ if (ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF])) {
+ CERROR("Bad %llu to set " DOSTID "\n",
+ name->name[LUSTRE_RES_ID_SEQ_OFF], POSTID(&oi));
+ }
ostid_to_fid(fid, &oi, ost_idx);
} else {
/* new resid */
*/
static inline __u64 fid_flatten(const struct lu_fid *fid)
{
- __u64 ino;
- __u64 seq;
+ __u64 ino;
+ __u64 seq;
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- RETURN(ino);
- }
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
- seq = fid_seq(fid);
+ seq = fid_seq(fid);
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
+ ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
- RETURN(ino ? ino : fid_oid(fid));
+ return ino ?: fid_oid(fid);
}
static inline __u32 fid_hash(const struct lu_fid *f, int bits)
* map fid to 32 bit value for ino on 32bit systems. */
static inline __u32 fid_flatten32(const struct lu_fid *fid)
{
- __u32 ino;
- __u64 seq;
+ __u32 ino;
+ __u64 seq;
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- RETURN(ino);
- }
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
- seq = fid_seq(fid) - FID_SEQ_START;
+ seq = fid_seq(fid) - FID_SEQ_START;
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files. */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
+ /* Map the high bits of the OID into higher bits of the inode number so
+ * that inodes generated at about the same time have a reduced chance
+ * of collisions. This will give a period of 2^12 = 1024 unique clients
+ * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
+ * (from OID), or up to 128M inodes without collisions for new files. */
+ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
+ (seq >> (64 - (40-8)) & 0xffffff00) +
+ (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
- RETURN(ino ? ino : fid_oid(fid));
+ return ino ?: fid_oid(fid);
}
static inline int
return fid_oid(fid1) - fid_oid(fid2);
}
+static inline int fid_set_id(struct lu_fid *fid, u64 oid)
+{
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
+ fid->f_oid = oid;
+ fid->f_ver = oid >> 48;
+ } else {
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set REG "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_oid = oid;
+ }
+ return 0;
+}
+
#define LUSTRE_SEQ_SRV_NAME "seq_srv"
#define LUSTRE_SEQ_CTL_NAME "seq_ctl"