* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
extern const struct lu_fid LUSTRE_BFL_FID;
extern const struct lu_fid LU_OBF_FID;
+extern const struct lu_fid LU_LPF_FID;
extern const struct lu_fid LU_DOT_LUSTRE_FID;
+extern const struct lu_fid LU_BACKEND_LPF_FID;
enum {
/*
ACCT_GROUP_OID = 16UL,
LFSCK_BOOKMARK_OID = 17UL,
OTABLE_IT_OID = 18UL,
+ OSD_LPF_OID = 19UL,
/* These two definitions are obsolete
* OFD_GROUP0_LAST_OID = 20UL,
* OFD_GROUP4K_LAST_OID = 20UL+4096,
MDD_LOV_OBJ_OSEQ = 4121UL,
LFSCK_NAMESPACE_OID = 4122UL,
REMOTE_PARENT_DIR_OID = 4123UL,
+ /* This definition is obsolete
+ * SLAVE_LLOG_CATALOGS_OID = 4124UL,
+ */
};
static inline void lu_local_obj_fid(struct lu_fid *fid, __u32 oid)
static inline int fid_is_root(const struct lu_fid *fid)
{
return unlikely((fid_seq(fid) == FID_SEQ_ROOT &&
- fid_oid(fid) == 1));
+ fid_oid(fid) == FID_OID_ROOT));
}
static inline int fid_is_dot_lustre(const struct lu_fid *fid)
* object or not. It is caller's duty to check more if needed. */
return (!fid_is_last_id(fid) &&
(fid_seq_is_norm(seq) || fid_seq_is_igif(seq))) ||
- fid_is_root(fid) || fid_is_dot_lustre(fid);
+ fid_is_root(fid) || fid_seq_is_dot(seq);
}
static inline int fid_seq_in_fldb(__u64 seq)
fid->f_ver = 0;
}
+static inline bool fid_is_md_operative(const struct lu_fid *fid)
+{
+ return fid_is_mdt0(fid) || fid_is_igif(fid) ||
+ fid_is_norm(fid) || fid_is_root(fid);
+}
+
/* seq client type */
enum lu_cli_type {
LUSTRE_SEQ_METADATA = 1,
struct lu_seq_range lcs_space;
/* Seq related proc */
- cfs_proc_dir_entry_t *lcs_proc_dir;
+ struct proc_dir_entry *lcs_proc_dir;
/* This holds last allocated fid in last obtained seq */
struct lu_fid lcs_fid;
struct dt_object *lss_obj;
/* Seq related proc */
- cfs_proc_dir_entry_t *lss_proc_dir;
+ struct proc_dir_entry *lss_proc_dir;
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
- /* Client interafce to request controller */
+ /* Client interface to request controller */
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
struct seq_server_site *lss_site;
};
+struct seq_server_site {
+ struct lu_site *ss_lu;
+ /**
+ * mds number of this site.
+ */
+ u32 ss_node_id;
+ /**
+ * Fid location database
+ */
+ struct lu_server_fld *ss_server_fld;
+ struct lu_client_fld *ss_client_fld;
+
+ /**
+ * Server Seq Manager
+ */
+ struct lu_server_seq *ss_server_seq;
+
+ /**
+ * Controller Seq Manager
+ */
+ struct lu_server_seq *ss_control_seq;
+ struct obd_export *ss_control_exp;
+
+ /**
+ * Client Seq Manager
+ */
+ struct lu_client_seq *ss_client_seq;
+};
+
/* Server methods */
int seq_server_init(const struct lu_env *env,
int seq_client_alloc_fid(const struct lu_env *env, struct lu_client_seq *seq,
struct lu_fid *fid);
int seq_client_get_seq(const struct lu_env *env, struct lu_client_seq *seq,
- seqno_t *seqnr);
+ u64 *seqnr);
int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss);
/* Fids common stuff */
int fid_is_local(const struct lu_env *env,
* but was moved into name[1] along with the OID to avoid consuming the
* renaming name[2,3] fields that need to be used for the quota identifier.
*/
-static inline struct ldlm_res_id *
+static inline void
fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
{
memset(res, 0, sizeof(*res));
res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
-
- return res;
}
/*
/*
* Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
*/
-static inline struct lu_fid *
+static inline void
fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
{
fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
LASSERT(fid_res_name_eq(fid, res));
-
- return fid;
}
/*
* Build (DLM) resource identifier from global quota FID and quota ID.
*/
-static inline struct ldlm_res_id *
+static inline void
fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
struct ldlm_res_id *res)
{
fid_build_reg_res_name(glb_fid, res);
res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
-
- return res;
}
/*
(__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
}
-static inline struct ldlm_res_id *
+static inline void
fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
struct ldlm_res_id *res)
{
fid_build_reg_res_name(fid, res);
res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
-
- return res;
}
/**
* res will be built from normal FID directly, i.e. res[0] = f_seq,
* res[1] = f_oid + f_ver.
*/
-static inline void ostid_build_res_name(struct ost_id *oi,
+static inline void ostid_build_res_name(const struct ost_id *oi,
struct ldlm_res_id *name)
{
memset(name, 0, sizeof *name);
/**
* Return true if the resource is for the object identified by this id & group.
*/
-static inline int ostid_res_name_eq(struct ost_id *oi,
- struct ldlm_res_id *name)
+static inline bool ostid_res_name_eq(const struct ost_id *oi,
+ const struct ldlm_res_id *name)
{
/* Note: it is just a trick here to save some effort, probably the
* correct way would be turn them into the FID and compare */
RETURN(ino ? ino : fid_oid(fid));
}
-static inline int lu_fid_diff(struct lu_fid *fid1, struct lu_fid *fid2)
+static inline int
+lu_fid_diff(const struct lu_fid *fid1, const struct lu_fid *fid2)
{
LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
PFID(fid1), PFID(fid2));
#define LUSTRE_SEQ_CTL_NAME "seq_ctl"
/* Range common stuff */
-static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = cpu_to_le64(src->lsr_start);
dst->lsr_end = cpu_to_le64(src->lsr_end);
dst->lsr_flags = cpu_to_le32(src->lsr_flags);
}
-static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = le64_to_cpu(src->lsr_start);
dst->lsr_end = le64_to_cpu(src->lsr_end);
dst->lsr_flags = le32_to_cpu(src->lsr_flags);
}
-static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = cpu_to_be64(src->lsr_start);
dst->lsr_end = cpu_to_be64(src->lsr_end);
dst->lsr_flags = cpu_to_be32(src->lsr_flags);
}
-static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+static inline void
+range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
{
dst->lsr_start = be64_to_cpu(src->lsr_start);
dst->lsr_end = be64_to_cpu(src->lsr_end);
static inline void range_array_cpu_to_le(struct lu_seq_range_array *dst,
const struct lu_seq_range_array *src)
{
- int i;
+ __u32 i;
for (i = 0; i < src->lsra_count; i++)
range_cpu_to_le(&dst->lsra_lsr[i], &src->lsra_lsr[i]);
static inline void range_array_le_to_cpu(struct lu_seq_range_array *dst,
const struct lu_seq_range_array *src)
{
- int i;
+ __u32 i;
dst->lsra_count = le32_to_cpu(src->lsra_count);
for (i = 0; i < dst->lsra_count; i++)