struct proc_dir_entry;
struct lustre_cfg;
struct lprocfs_stats;
+struct obd_type;
/** \defgroup lu lu
* lu_* data-types represent server-side entities shared by data and meta-data
*/
const struct lu_device_type_operations *ldt_ops;
/**
- * \todo XXX: temporary pointer to associated obd_type.
- */
- struct obd_type *ldt_obd_type;
- /**
* \todo XXX: temporary: context tags used by obd_*() calls.
*/
__u32 ldt_ctx_tags;
/**
* Mark this object has already been taken out of cache.
*/
- LU_OBJECT_UNHASHED = 1,
+ LU_OBJECT_UNHASHED = 1,
+ /**
+ * Object is initialized, when object is found in cache, it may not be
+ * intialized yet, the object allocator will initialize it.
+ */
+ LU_OBJECT_INITED = 2
};
enum lu_object_header_attr {
return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
}
+/**
+ * Return true if object is initialized.
+ */
+static inline int lu_object_is_inited(const struct lu_object_header *h)
+{
+ return test_bit(LU_OBJECT_INITED, &h->loh_flags);
+}
+
void lu_object_put(const struct lu_env *env, struct lu_object *o);
void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o);
void lu_object_unhash(const struct lu_env *env, struct lu_object *o);
return lu_device_is_cl(o->lo_dev);
}
-/* Generic subset of OSTs */
-struct ost_pool {
+/* Generic subset of tgts */
+struct lu_tgt_pool {
__u32 *op_array; /* array of index of
* lov_obd->lov_tgts */
- unsigned int op_count; /* number of OSTs in the array */
- unsigned int op_size; /* allocated size of lp_array */
- struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
+ unsigned int op_count; /* number of tgts in the array */
+ unsigned int op_size; /* allocated size of op_array */
+ struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
};
/* round-robin QoS data for LOD/LMV */
__u32 lqr_start_idx; /* start index of new inode */
__u32 lqr_offset_idx;/* aliasing for start_idx */
int lqr_start_count;/* reseed counter */
- struct ost_pool lqr_pool; /* round-robin optimized list */
+ struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
unsigned long lqr_dirty:1; /* recalc round-robin list */
};
ltd_connecting:1; /* target is connecting */
};
+/* number of pointers at 1st level */
+#define TGT_PTRS (PAGE_SIZE / sizeof(void *))
+/* number of pointers at 2nd level */
+#define TGT_PTRS_PER_BLOCK (PAGE_SIZE / sizeof(void *))
+
+struct lu_tgt_desc_idx {
+ struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
+};
+
/* QoS data for LOD/LMV */
struct lu_qos {
struct list_head lq_svr_list; /* lu_svr_qos list */
lq_reset:1; /* zero current penalties */
};
-int lqos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
-int lqos_del_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
+struct lu_tgt_descs {
+ union {
+ struct lov_desc ltd_lov_desc;
+ struct lmv_desc ltd_lmv_desc;
+ };
+ /* list of known TGTs */
+ struct lu_tgt_desc_idx *ltd_tgt_idx[TGT_PTRS];
+ /* Size of the lu_tgts array, granted to be a power of 2 */
+ __u32 ltd_tgts_size;
+ /* bitmap of TGTs available */
+ struct cfs_bitmap *ltd_tgt_bitmap;
+ /* TGTs scheduled to be deleted */
+ __u32 ltd_death_row;
+ /* Table refcount used for delayed deletion */
+ int ltd_refcount;
+ /* mutex to serialize concurrent updates to the tgt table */
+ struct mutex ltd_mutex;
+ /* read/write semaphore used for array relocation */
+ struct rw_semaphore ltd_rw_sem;
+ /* QoS */
+ struct lu_qos ltd_qos;
+ /* all tgts in a packed array */
+ struct lu_tgt_pool ltd_tgt_pool;
+ /* true if tgt is MDT */
+ bool ltd_is_mdt;
+};
+
+#define LTD_TGT(ltd, index) \
+ (ltd)->ltd_tgt_idx[(index) / \
+ TGT_PTRS_PER_BLOCK]->ldi_tgt[(index) % TGT_PTRS_PER_BLOCK]
+
+u64 lu_prandom_u64_max(u64 ep_ro);
+void lu_qos_rr_init(struct lu_qos_rr *lqr);
+int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
+void lu_tgt_qos_weight_calc(struct lu_tgt_desc *tgt);
+
+int lu_tgt_descs_init(struct lu_tgt_descs *ltd, bool is_mdt);
+void lu_tgt_descs_fini(struct lu_tgt_descs *ltd);
+int ltd_add_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
+void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
+bool ltd_qos_is_usable(struct lu_tgt_descs *ltd);
+int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd);
+int ltd_qos_update(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt,
+ __u64 *total_wt);
+
+static inline struct lu_tgt_desc *ltd_first_tgt(struct lu_tgt_descs *ltd)
+{
+ int index;
+
+ index = find_first_bit(ltd->ltd_tgt_bitmap->data,
+ ltd->ltd_tgt_bitmap->size);
+ return (index < ltd->ltd_tgt_bitmap->size) ? LTD_TGT(ltd, index) : NULL;
+}
+
+static inline struct lu_tgt_desc *ltd_next_tgt(struct lu_tgt_descs *ltd,
+ struct lu_tgt_desc *tgt)
+{
+ int index;
+
+ if (!tgt)
+ return NULL;
+
+ index = tgt->ltd_index;
+ LASSERT(index < ltd->ltd_tgt_bitmap->size);
+ index = find_next_bit(ltd->ltd_tgt_bitmap->data,
+ ltd->ltd_tgt_bitmap->size, index + 1);
+ return (index < ltd->ltd_tgt_bitmap->size) ? LTD_TGT(ltd, index) : NULL;
+}
+
+#define ltd_foreach_tgt(ltd, tgt) \
+ for (tgt = ltd_first_tgt(ltd); tgt; tgt = ltd_next_tgt(ltd, tgt))
+
+#define ltd_foreach_tgt_safe(ltd, tgt, tmp) \
+ for (tgt = ltd_first_tgt(ltd), tmp = ltd_next_tgt(ltd, tgt); tgt; \
+ tgt = tmp, tmp = ltd_next_tgt(ltd, tgt))
/** @} lu */
#endif /* __LUSTRE_LU_OBJECT_H */