*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Yury Umanets <umka@clusterfs.com>
*/
-#ifndef __LINUX_FID_H
-#define __LINUX_FID_H
+#ifndef __LUSTRE_FID_H
+#define __LUSTRE_FID_H
/** \defgroup fid fid
*
*/
#include <libcfs/libcfs.h>
-#include <lustre/lustre_idl.h>
-#include <lustre_req_layout.h>
-#include <lustre_mdt.h>
-#include <obd.h>
-
+#include <uapi/linux/lustre/lustre_fid.h>
+#include <uapi/linux/lustre/lustre_idl.h>
+#include <uapi/linux/lustre/lustre_ostid.h>
+struct lu_env;
struct lu_site;
struct lu_context;
+struct obd_device;
+struct obd_export;
/* Whole sequences space range and zero range definitions */
extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
extern const struct lu_fid LUSTRE_BFL_FID;
extern const struct lu_fid LU_OBF_FID;
+extern const struct lu_fid LU_LPF_FID;
extern const struct lu_fid LU_DOT_LUSTRE_FID;
+extern const struct lu_fid LU_BACKEND_LPF_FID;
enum {
/*
LUSTRE_SEQ_SUPER_WIDTH = ((1ULL << 30ULL) * LUSTRE_SEQ_META_WIDTH)
};
-enum {
- /** 2^6 FIDs for OI containers */
- OSD_OI_FID_OID_BITS = 6,
- /** reserve enough FIDs in case we want more in the future */
- OSD_OI_FID_OID_BITS_MAX = 10,
-};
-
/** special OID for local objects */
enum local_oid {
/** \see fld_mod_init */
FID_SEQ_CTL_OID = 4UL,
FID_SEQ_SRV_OID = 5UL,
/** \see mdd_mod_init */
- MDD_ROOT_INDEX_OID = 6UL,
- MDD_ORPHAN_OID = 7UL,
+ MDD_ROOT_INDEX_OID = 6UL, /* deprecated in 2.4 */
+ MDD_ORPHAN_OID = 7UL, /* deprecated in 2.4 */
MDD_LOV_OBJ_OID = 8UL,
MDD_CAPA_KEYS_OID = 9UL,
/** \see mdt_mod_init */
- MDT_LAST_RECV_OID = 11UL,
+ LAST_RECV_OID = 11UL,
OSD_FS_ROOT_OID = 13UL,
ACCT_USER_OID = 15UL,
ACCT_GROUP_OID = 16UL,
LFSCK_BOOKMARK_OID = 17UL,
OTABLE_IT_OID = 18UL,
- OFD_LAST_RECV_OID = 19UL,
- /* These two definitions are obsolete
- * OFD_GROUP0_LAST_OID = 20UL,
- * OFD_GROUP4K_LAST_OID = 20UL+4096,
- */
+ OSD_LPF_OID = 19UL,
+ REPLY_DATA_OID = 21UL,
+ ACCT_PROJECT_OID = 22UL,
+ INDEX_BACKUP_OID = 4116UL,
OFD_LAST_GROUP_OID = 4117UL,
LLOG_CATALOGS_OID = 4118UL,
MGS_CONFIGS_OID = 4119UL,
OFD_HEALTH_CHECK_OID = 4120UL,
MDD_LOV_OBJ_OSEQ = 4121UL,
+ LFSCK_NAMESPACE_OID = 4122UL,
+ REMOTE_PARENT_DIR_OID = 4123UL,
+ /* This definition is obsolete
+ * SLAVE_LLOG_CATALOGS_OID = 4124UL,
+ */
+ BATCHID_COMMITTED_OID = 4125UL,
};
static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
fid->f_ver = 0;
}
+/* For new FS (>= 2.4), the root FID will be changed to
+ * [FID_SEQ_ROOT:1:0], for existing FS, (upgraded to 2.4),
+ * the root FID will still be IGIF */
+static inline int fid_is_root(const struct lu_fid *fid)
+{
+ return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
+ fid_oid(fid) == FID_OID_ROOT));
+}
+
+static inline int fid_is_dot_lustre(const struct lu_fid *fid)
+{
+ return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
+ fid_oid(fid) == FID_OID_DOT_LUSTRE);
+}
+
+static inline int fid_is_obf(const struct lu_fid *fid)
+{
+ return unlikely(fid_seq(fid) == FID_SEQ_DOT_LUSTRE &&
+ fid_oid(fid) == FID_OID_DOT_LUSTRE_OBF);
+}
+
static inline int fid_is_otable_it(const struct lu_fid *fid)
{
return unlikely(fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
fid_oid(fid) == OTABLE_IT_OID);
}
+static inline int fid_oid_is_quota(const struct lu_fid *fid)
+{
+ switch (fid_oid(fid)) {
+ case ACCT_USER_OID:
+ case ACCT_GROUP_OID:
+ case ACCT_PROJECT_OID:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static inline int fid_is_acct(const struct lu_fid *fid)
{
return fid_seq(fid) == FID_SEQ_LOCAL_FILE &&
- (fid_oid(fid) == ACCT_USER_OID ||
- fid_oid(fid) == ACCT_GROUP_OID);
+ fid_oid_is_quota(fid);
}
static inline int fid_is_quota(const struct lu_fid *fid)
fid_seq(fid) == FID_SEQ_QUOTA_GLB;
}
-static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
+static inline int fid_is_name_llog(const struct lu_fid *fid)
+{
+ return fid_seq(fid) == FID_SEQ_LLOG_NAME;
+}
+
+static inline int fid_is_namespace_visible(const struct lu_fid *fid)
+{
+ const __u64 seq = fid_seq(fid);
+
+ /* Here, we cannot distinguish whether the normal FID is for OST
+ * object or not. It is caller's duty to check more if needed. */
+ return (!fid_is_last_id(fid) &&
+ (fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
+ fid_is_root(fid) || fid_seq_is_dot(seq);
+}
+
+static inline int fid_seq_in_fldb(__u64 seq)
+{
+ return fid_seq_is_igif(seq) || fid_seq_is_norm(seq) ||
+ fid_seq_is_root(seq) || fid_seq_is_dot(seq);
+}
+
+static inline void ost_layout_cpu_to_le(struct ost_layout *dst,
+ const struct ost_layout *src)
+{
+ dst->ol_stripe_size = __cpu_to_le32(src->ol_stripe_size);
+ dst->ol_stripe_count = __cpu_to_le32(src->ol_stripe_count);
+ dst->ol_comp_start = __cpu_to_le64(src->ol_comp_start);
+ dst->ol_comp_end = __cpu_to_le64(src->ol_comp_end);
+ dst->ol_comp_id = __cpu_to_le32(src->ol_comp_id);
+}
+
+static inline void ost_layout_le_to_cpu(struct ost_layout *dst,
+ const struct ost_layout *src)
+{
+ dst->ol_stripe_size = __le32_to_cpu(src->ol_stripe_size);
+ dst->ol_stripe_count = __le32_to_cpu(src->ol_stripe_count);
+ dst->ol_comp_start = __le64_to_cpu(src->ol_comp_start);
+ dst->ol_comp_end = __le64_to_cpu(src->ol_comp_end);
+ dst->ol_comp_id = __le32_to_cpu(src->ol_comp_id);
+}
+
+static inline void filter_fid_cpu_to_le(struct filter_fid *dst,
+ const struct filter_fid *src, int size)
+{
+ fid_cpu_to_le(&dst->ff_parent, &src->ff_parent);
+
+ if (size < sizeof(struct filter_fid)) {
+ memset(&dst->ff_layout, 0, sizeof(dst->ff_layout));
+ } else {
+ ost_layout_cpu_to_le(&dst->ff_layout, &src->ff_layout);
+ dst->ff_layout_version = cpu_to_le32(src->ff_layout_version);
+ dst->ff_range = cpu_to_le32(src->ff_range);
+ }
+
+ /* XXX: Add more if filter_fid is enlarged in the future. */
+}
+
+static inline void filter_fid_le_to_cpu(struct filter_fid *dst,
+ const struct filter_fid *src, int size)
+{
+ fid_le_to_cpu(&dst->ff_parent, &src->ff_parent);
+
+ if (size < sizeof(struct filter_fid)) {
+ memset(&dst->ff_layout, 0, sizeof(dst->ff_layout));
+ } else {
+ ost_layout_le_to_cpu(&dst->ff_layout, &src->ff_layout);
+ dst->ff_layout_version = le32_to_cpu(src->ff_layout_version);
+ dst->ff_range = le32_to_cpu(src->ff_range);
+ }
+
+ /* XXX: Add more if filter_fid is enlarged in the future. */
+}
+
+static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
{
if (fid_seq_is_mdt0(seq)) {
- fid->f_seq = fid_idif_seq(0, 0);
+ fid->f_seq = fid_idif_seq(0, ost_idx);
} else {
LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
- fid_seq_is_idif(seq), LPX64"\n", seq);
+ fid_seq_is_idif(seq), "%#llx\n", seq);
fid->f_seq = seq;
}
fid->f_oid = 0;
fid->f_ver = 0;
}
+static inline bool fid_is_md_operative(const struct lu_fid *fid)
+{
+ return fid_is_mdt0(fid) || fid_is_igif(fid) ||
+ fid_is_norm(fid) || fid_is_root(fid);
+}
+
+/* seq client type */
+enum lu_cli_type {
+ LUSTRE_SEQ_METADATA = 1,
+ LUSTRE_SEQ_DATA
+};
+
enum lu_mgr_type {
LUSTRE_SEQ_SERVER,
LUSTRE_SEQ_CONTROLLER
*/
struct lu_seq_range lcs_space;
- /* Seq related proc */
- cfs_proc_dir_entry_t *lcs_proc_dir;
+ /* Seq related debugfs */
+ struct dentry *lcs_debugfs_entry;
/* This holds last allocated fid in last obtained seq */
struct lu_fid lcs_fid;
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
+ * use it with debugfs.
*/
char lcs_name[80];
/* Seq-server for direct talking */
struct lu_server_seq *lcs_srv;
- /* wait queue for fid allocation and update indicator */
- cfs_waitq_t lcs_waitq;
- int lcs_update;
+ /* wait queue for fid allocation and update indicator */
+ wait_queue_head_t lcs_waitq;
+ int lcs_update;
};
/* server sequence manager interface */
/* /seq file object device */
struct dt_object *lss_obj;
- /* Seq related proc */
- cfs_proc_dir_entry_t *lss_proc_dir;
+ /* Seq related debugfs */
+ struct dentry *lss_debugfs_entry;
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
- /* Client interafce to request controller */
+ /* Client interface to request controller */
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
- * use it with procfs.
+ * use it with debugfs.
*/
char lss_name[80];
struct seq_server_site *lss_site;
};
-int seq_query(struct com_thread_info *info);
+struct seq_server_site {
+ struct lu_site *ss_lu;
+ /**
+ * mds number of this site.
+ */
+ u32 ss_node_id;
+ /**
+ * Fid location database
+ */
+ struct lu_server_fld *ss_server_fld;
+ struct lu_client_fld *ss_client_fld;
+
+ /**
+ * Server Seq Manager
+ */
+ struct lu_server_seq *ss_server_seq;
+
+ /**
+ * Controller Seq Manager
+ */
+ struct lu_server_seq *ss_control_seq;
+ struct obd_export *ss_control_exp;
+
+ /**
+ * Client Seq Manager
+ */
+ struct lu_client_seq *ss_client_seq;
+};
/* Server methods */
-int seq_server_init(struct lu_server_seq *seq,
+
+int seq_server_init(const struct lu_env *env,
+ struct lu_server_seq *seq,
struct dt_device *dev,
const char *prefix,
enum lu_mgr_type type,
- struct seq_server_site *ss,
- const struct lu_env *env);
+ struct seq_server_site *ss);
void seq_server_fini(struct lu_server_seq *seq,
const struct lu_env *env);
struct lu_seq_range *out,
const struct lu_env *env);
-int seq_server_set_cli(struct lu_server_seq *seq,
- struct lu_client_seq *cli,
- const struct lu_env *env);
+int seq_server_set_cli(const struct lu_env *env,
+ struct lu_server_seq *seq,
+ struct lu_client_seq *cli);
+int seq_server_check_and_alloc_super(const struct lu_env *env,
+ struct lu_server_seq *seq);
/* Client methods */
int seq_client_init(struct lu_client_seq *seq,
struct obd_export *exp,
int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
struct lu_fid *fid);
int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq,
- seqno_t *seqnr);
-
+ u64 *seqnr);
int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss);
/* Fids common stuff */
int fid_is_local(const struct lu_env *env,
struct lu_site *site, const struct lu_fid *fid);
-int client_fid_init(struct obd_export *exp, enum lu_cli_type type);
-int client_fid_fini(struct obd_export *exp);
+enum lu_cli_type;
+int client_fid_init(struct obd_device *obd, struct obd_export *exp,
+ enum lu_cli_type type);
+int client_fid_fini(struct obd_device *obd);
/* fid locking */
* but was moved into name[1] along with the OID to avoid consuming the
* renaming name[2,3] fields that need to be used for the quota identifier.
*/
-static inline struct ldlm_res_id *
-fid_build_reg_res_name(const struct lu_fid *f,
- struct ldlm_res_id *name)
+static inline void
+fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
{
- memset(name, 0, sizeof *name);
- name->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(f);
- name->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(f);
- return name;
+ memset(res, 0, sizeof(*res));
+ res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
+ res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
+}
+
+/*
+ * Return true if resource is for object identified by FID.
+ */
+static inline int fid_res_name_eq(const struct lu_fid *fid,
+ const struct ldlm_res_id *res)
+{
+ return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
+ res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
+}
+
+/*
+ * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
+ */
+static inline void
+fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
+{
+ fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
+ fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
+ fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
+ LASSERT(fid_res_name_eq(fid, res));
}
/*
* Build (DLM) resource identifier from global quota FID and quota ID.
*/
-static inline struct ldlm_res_id *
-fid_build_quota_resid(const struct lu_fid *glb_fid, union lquota_id *qid,
+static inline void
+fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
struct ldlm_res_id *res)
{
fid_build_reg_res_name(glb_fid, res);
res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
- return res;
}
/*
* Extract global FID and quota ID from resource name
*/
-static inline void fid_extract_quota_resid(struct ldlm_res_id *res,
- struct lu_fid *glb_fid,
- union lquota_id *qid)
+static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
+ union lquota_id *qid,
+ const struct ldlm_res_id *res)
{
- glb_fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
- glb_fid->f_oid = (__u32)res->name[LUSTRE_RES_ID_VER_OID_OFF];
- glb_fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
-
+ fid_extract_from_res_name(glb_fid, res);
qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF];
qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF];
qid->qid_fid.f_ver =
(__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
}
-/*
- * Return true if resource is for object identified by fid.
+static inline void
+fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
+ struct ldlm_res_id *res)
+{
+ fid_build_reg_res_name(fid, res);
+ res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
+}
+
+/**
+ * Build DLM resource name from object id & seq, which will be removed
+ * finally, when we replace ost_id with FID in data stack.
+ *
+ * Currently, resid from the old client, whose res[0] = object_id,
+ * res[1] = object_seq, is just oposite with Metatdata
+ * resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
+ * To unifiy the resid identification, we will reverse the data
+ * resid to keep it same with Metadata resid, i.e.
+ *
+ * For resid from the old client,
+ * res[0] = objid, res[1] = 0, still keep the original order,
+ * for compatiblity.
+ *
+ * For new resid
+ * res will be built from normal FID directly, i.e. res[0] = f_seq,
+ * res[1] = f_oid + f_ver.
*/
-static inline int fid_res_name_eq(const struct lu_fid *f,
- const struct ldlm_res_id *name)
+static inline void ostid_build_res_name(const struct ost_id *oi,
+ struct ldlm_res_id *name)
{
- return name->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(f) &&
- name->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(f);
+ memset(name, 0, sizeof *name);
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
+ name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
+ } else {
+ fid_build_reg_res_name(&oi->oi_fid, name);
+ }
+}
+
+/**
+ * Return true if the resource is for the object identified by this id & group.
+ */
+static inline bool ostid_res_name_eq(const struct ost_id *oi,
+ const struct ldlm_res_id *name)
+{
+ /* Note: it is just a trick here to save some effort, probably the
+ * correct way would be turn them into the FID and compare */
+ if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_id(oi) &&
+ name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_seq(oi);
+ } else {
+ return name->name[LUSTRE_RES_ID_SEQ_OFF] == ostid_seq(oi) &&
+ name->name[LUSTRE_RES_ID_VER_OID_OFF] == ostid_id(oi);
+ }
+}
+
+/**
+ * Note: we need check oi_seq to decide where to set oi_id,
+ * so oi_seq should always be set ahead of oi_id.
+ */
+static inline int ostid_set_id(struct ost_id *oi, __u64 oid)
+{
+ if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
+ if (oid >= IDIF_MAX_OID)
+ return -E2BIG;
+ oi->oi.oi_id = oid;
+ } else if (fid_is_idif(&oi->oi_fid)) {
+ if (oid >= IDIF_MAX_OID)
+ return -E2BIG;
+ oi->oi_fid.f_seq = fid_idif_seq(oid,
+ fid_idif_ost_idx(&oi->oi_fid));
+ oi->oi_fid.f_oid = oid;
+ oi->oi_fid.f_ver = oid >> 48;
+ } else {
+ if (oid >= OBIF_MAX_OID)
+ return -E2BIG;
+ oi->oi_fid.f_oid = oid;
+ }
+ return 0;
}
-/* reverse function of fid_build_reg_res_name() */
-static inline void fid_build_from_res_name(struct lu_fid *f,
- const struct ldlm_res_id *name)
+/* pack any OST FID into an ostid (id/seq) for the wire/disk */
+static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
{
- fid_zero(f);
- f->f_seq = name->name[LUSTRE_RES_ID_SEQ_OFF];
- f->f_oid = name->name[LUSTRE_RES_ID_VER_OID_OFF] & 0xffffffff;
- f->f_ver = name->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32;
- LASSERT(fid_res_name_eq(f, name));
+ int rc = 0;
+
+ if (fid_seq_is_igif(fid->f_seq))
+ return -EBADF;
+
+ if (fid_is_idif(fid)) {
+ ostid_set_seq_mdt0(ostid);
+ rc = ostid_set_id(ostid, fid_idif_id(fid_seq(fid),
+ fid_oid(fid), fid_ver(fid)));
+ } else {
+ ostid->oi_fid = *fid;
+ }
+
+ return rc;
}
-static inline struct ldlm_res_id *
-fid_build_pdo_res_name(const struct lu_fid *f,
- unsigned int hash,
- struct ldlm_res_id *name)
+/* The same as osc_build_res_name() */
+static inline void ost_fid_build_resid(const struct lu_fid *fid,
+ struct ldlm_res_id *resname)
{
- fid_build_reg_res_name(f, name);
- name->name[LUSTRE_RES_ID_HSH_OFF] = hash;
- return name;
+ if (fid_is_mdt0(fid) || fid_is_idif(fid)) {
+ struct ost_id oi;
+ oi.oi.oi_id = 0; /* gcc 4.7.2 complains otherwise */
+ if (fid_to_ostid(fid, &oi) != 0)
+ return;
+ ostid_build_res_name(&oi, resname);
+ } else {
+ fid_build_reg_res_name(fid, resname);
+ }
}
+static inline void ost_fid_from_resid(struct lu_fid *fid,
+ const struct ldlm_res_id *name,
+ int ost_idx)
+{
+ if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
+ /* old resid */
+ struct ost_id oi;
+
+ memset(&oi, 0, sizeof(oi));
+ ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
+ if (ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF])) {
+ CERROR("Bad %llu to set " DOSTID "\n",
+ name->name[LUSTRE_RES_ID_SEQ_OFF], POSTID(&oi));
+ }
+ ostid_to_fid(fid, &oi, ost_idx);
+ } else {
+ /* new resid */
+ fid_extract_from_res_name(fid, name);
+ }
+}
/**
* Flatten 128-bit FID values into a 64-bit value for use as an inode number.
*/
static inline __u64 fid_flatten(const struct lu_fid *fid)
{
- __u64 ino;
- __u64 seq;
+ __u64 ino;
+ __u64 seq;
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- RETURN(ino);
- }
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
- seq = fid_seq(fid);
+ seq = fid_seq(fid);
- ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
+ ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
- RETURN(ino ? ino : fid_oid(fid));
+ return ino ?: fid_oid(fid);
}
static inline __u32 fid_hash(const struct lu_fid *f, int bits)
{
- /* all objects with same id and different versions will belong to same
- * collisions list. */
- return cfs_hash_long(fid_flatten(f), bits);
+ /* all objects with same id and different versions will belong to same
+ * collisions list. */
+ return hash_long(fid_flatten(f), bits);
}
/**
* map fid to 32 bit value for ino on 32bit systems. */
static inline __u32 fid_flatten32(const struct lu_fid *fid)
{
- __u32 ino;
- __u64 seq;
+ __u32 ino;
+ __u64 seq;
+
+ if (fid_is_igif(fid)) {
+ ino = lu_igif_ino(fid);
+ return ino;
+ }
+
+ seq = fid_seq(fid) - FID_SEQ_START;
+
+ /* Map the high bits of the OID into higher bits of the inode number so
+ * that inodes generated at about the same time have a reduced chance
+ * of collisions. This will give a period of 2^12 = 1024 unique clients
+ * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
+ * (from OID), or up to 128M inodes without collisions for new files. */
+ ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
+ (seq >> (64 - (40-8)) & 0xffffff00) +
+ (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
+
+ return ino ?: fid_oid(fid);
+}
+
+static inline int
+lu_fid_diff(const struct lu_fid *fid1, const struct lu_fid *fid2)
+{
+ LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
+ PFID(fid1), PFID(fid2));
- if (fid_is_igif(fid)) {
- ino = lu_igif_ino(fid);
- RETURN(ino);
- }
+ if (fid_is_idif(fid1) && fid_is_idif(fid2))
+ return fid_idif_id(fid1->f_seq, fid1->f_oid, fid1->f_ver) -
+ fid_idif_id(fid2->f_seq, fid2->f_oid, fid2->f_ver);
- seq = fid_seq(fid) - FID_SEQ_START;
+ return fid_oid(fid1) - fid_oid(fid2);
+}
- /* Map the high bits of the OID into higher bits of the inode number so
- * that inodes generated at about the same time have a reduced chance
- * of collisions. This will give a period of 2^12 = 1024 unique clients
- * (from SEQ) and up to min(LUSTRE_SEQ_MAX_WIDTH, 2^20) = 128k objects
- * (from OID), or up to 128M inodes without collisions for new files. */
- ino = ((seq & 0x000fffffULL) << 12) + ((seq >> 8) & 0xfffff000) +
- (seq >> (64 - (40-8)) & 0xffffff00) +
- (fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
+static inline int fid_set_id(struct lu_fid *fid, u64 oid)
+{
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
- RETURN(ino ? ino : fid_oid(fid));
+ if (fid_is_idif(fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
+ fid->f_oid = oid;
+ fid->f_ver = oid >> 48;
+ } else {
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set REG "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_oid = oid;
+ }
+ return 0;
}
#define LUSTRE_SEQ_SRV_NAME "seq_srv"
#define LUSTRE_SEQ_CTL_NAME "seq_ctl"
/* Range common stuff */
-static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = cpu_to_le64(src->lsr_start);
dst->lsr_end = cpu_to_le64(src->lsr_end);
dst->lsr_flags = cpu_to_le32(src->lsr_flags);
}
-static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = le64_to_cpu(src->lsr_start);
dst->lsr_end = le64_to_cpu(src->lsr_end);
dst->lsr_flags = le32_to_cpu(src->lsr_flags);
}
-static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = cpu_to_be64(src->lsr_start);
dst->lsr_end = cpu_to_be64(src->lsr_end);
dst->lsr_flags = cpu_to_be32(src->lsr_flags);
}
-static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = be64_to_cpu(src->lsr_start);
dst->lsr_end = be64_to_cpu(src->lsr_end);
dst->lsr_flags = be32_to_cpu(src->lsr_flags);
}
+static inline void range_array_cpu_to_le(struct lu_seq_range_array *dst,
+ const struct lu_seq_range_array *src)
+{
+ __u32 i;
+
+ for (i = 0; i < src->lsra_count; i++)
+ range_cpu_to_le(&dst->lsra_lsr[i], &src->lsra_lsr[i]);
+
+ dst->lsra_count = cpu_to_le32(src->lsra_count);
+}
+
+static inline void range_array_le_to_cpu(struct lu_seq_range_array *dst,
+ const struct lu_seq_range_array *src)
+{
+ __u32 i;
+
+ dst->lsra_count = le32_to_cpu(src->lsra_count);
+ for (i = 0; i < dst->lsra_count; i++)
+ range_le_to_cpu(&dst->lsra_lsr[i], &src->lsra_lsr[i]);
+}
+
/** @} fid */
-#endif /* __LINUX_FID_H */
+#endif /* __LUSTRE_FID_H */