#ifndef __LINUX_FID_H
#define __LINUX_FID_H
+/** \defgroup fid fid
+ *
+ * @{
+ */
+
/*
* struct lu_fid
*/
struct lu_context;
/* Whole sequences space range and zero range definitions */
-extern const struct lu_range LUSTRE_SEQ_SPACE_RANGE;
-extern const struct lu_range LUSTRE_SEQ_ZERO_RANGE;
+extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
+extern const struct lu_seq_range LUSTRE_SEQ_ZERO_RANGE;
extern const struct lu_fid LUSTRE_BFL_FID;
+extern const struct lu_fid LU_OBF_FID;
+extern const struct lu_fid LU_DOT_LUSTRE_FID;
enum {
/*
* This is how may FIDs may be allocated in one sequence. 16384 for
* now.
*/
- LUSTRE_SEQ_MAX_WIDTH = 0x0000000000004000ULL,
+ LUSTRE_SEQ_MAX_WIDTH = 0x0000000000000400ULL,
/*
* How many sequences may be allocate for meta-sequence (this is 128
* This is how many sequences may be in one super-sequence allocated to
* MDTs.
*/
- LUSTRE_SEQ_SUPER_WIDTH = (LUSTRE_SEQ_META_WIDTH * LUSTRE_SEQ_META_WIDTH)
+ LUSTRE_SEQ_SUPER_WIDTH = ((1<< 30) * LUSTRE_SEQ_META_WIDTH)
};
/** special fid seq: used for local object create. */
#define FID_SEQ_LOCAL_FILE (FID_SEQ_START + 1)
+/** special fid seq: used for .lustre objects. */
+#define LU_DOT_LUSTRE_SEQ (FID_SEQ_START + 0x02ULL)
+
+/* Note that reserved SEQ numbers below 12 will conflict with ldiskfs
+ * inodes in the IGIF namespace, so these reserved SEQ numbers must be
+ * used sparingly until ldiskfs-based MDT backends and/or IGIF FIDs
+ * have been completely removed. */
+
+/** fid sequence for distributed fs objects */
+#define FID_SEQ_DISTRIBUTED_START (FID_SEQ_START + 0x400ULL)
+
/** special OID for local objects */
enum {
/** \see osd_oi_index_create */
- OSD_OI_FID_SMALL_OID = 1UL,
- OSD_OI_FID_OTHER_OID = 2UL,
+ OSD_OI_FID_16_OID = 2UL,
/** \see fld_mod_init */
FLD_INDEX_OID = 3UL,
/** \see fid_mod_init */
struct lu_client_seq {
/* Sequence-controller export. */
struct obd_export *lcs_exp;
- struct semaphore lcs_sem;
+ cfs_semaphore_t lcs_sem;
/*
* Range of allowed for allocation sequeces. When using lu_client_seq on
* clients, this contains meta-sequence range. And for servers this
* contains super-sequence range.
*/
- struct lu_range lcs_space;
+ struct lu_seq_range lcs_space;
/* Seq related proc */
cfs_proc_dir_entry_t *lcs_proc_dir;
/* server sequence manager interface */
struct lu_server_seq {
/* Available sequences space */
- struct lu_range lss_space;
+ struct lu_seq_range lss_space;
/*
* Device for server side seq manager needs (saving sequences to backing
struct lu_client_seq *lss_cli;
/* Semaphore for protecting allocation */
- struct semaphore lss_sem;
+ cfs_semaphore_t lss_sem;
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
* LUSTRE_SEQ_SUPER_WIDTH and LUSTRE_SEQ_META_WIDTH.
*/
__u64 lss_width;
+
+ /**
+ * Pointer to site object, required to access site fld.
+ */
+ struct md_site *lss_site;
};
int seq_query(struct com_thread_info *info);
struct dt_device *dev,
const char *prefix,
enum lu_mgr_type type,
+ struct md_site *ls,
const struct lu_env *env);
void seq_server_fini(struct lu_server_seq *seq,
const struct lu_env *env);
int seq_server_alloc_super(struct lu_server_seq *seq,
- struct lu_range *in,
- struct lu_range *out,
+ struct lu_seq_range *in,
+ struct lu_seq_range *out,
const struct lu_env *env);
int seq_server_alloc_meta(struct lu_server_seq *seq,
- struct lu_range *in,
- struct lu_range *out,
+ struct lu_seq_range *in,
+ struct lu_seq_range *out,
const struct lu_env *env);
int seq_server_set_cli(struct lu_server_seq *seq,
struct lu_fid *fid);
/* Fids common stuff */
-int fid_is_local(struct lu_site *site, const struct lu_fid *fid);
+int fid_is_local(const struct lu_env *env,
+ struct lu_site *site, const struct lu_fid *fid);
/* fid locking */
#define LUSTRE_SEQ_CTL_NAME "seq_ctl"
/* Range common stuff */
-void range_cpu_to_le(struct lu_range *dst, const struct lu_range *src);
-void range_cpu_to_be(struct lu_range *dst, const struct lu_range *src);
-void range_le_to_cpu(struct lu_range *dst, const struct lu_range *src);
-void range_be_to_cpu(struct lu_range *dst, const struct lu_range *src);
+static inline void range_cpu_to_le(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = cpu_to_le64(src->lsr_start);
+ dst->lsr_end = cpu_to_le64(src->lsr_end);
+ dst->lsr_mdt = cpu_to_le32(src->lsr_mdt);
+}
+
+static inline void range_le_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = le64_to_cpu(src->lsr_start);
+ dst->lsr_end = le64_to_cpu(src->lsr_end);
+ dst->lsr_mdt = le32_to_cpu(src->lsr_mdt);
+}
+
+static inline void range_cpu_to_be(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = cpu_to_be64(src->lsr_start);
+ dst->lsr_end = cpu_to_be64(src->lsr_end);
+ dst->lsr_mdt = cpu_to_be32(src->lsr_mdt);
+}
+
+static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq_range *src)
+{
+ dst->lsr_start = be64_to_cpu(src->lsr_start);
+ dst->lsr_end = be64_to_cpu(src->lsr_end);
+ dst->lsr_mdt = be32_to_cpu(src->lsr_mdt);
+}
+
+/** @} fid */
#endif /* __LINUX_FID_H */