#ifndef __LUSTRE_LU_OBJECT_H
#define __LUSTRE_LU_OBJECT_H
+#ifdef HAVE_LINUX_STDARG_HEADER
+#include <linux/stdarg.h>
+#else
#include <stdarg.h>
+#endif
#include <libcfs/libcfs.h>
#include <uapi/linux/lustre/lustre_idl.h>
#include <lu_ref.h>
#include <linux/percpu_counter.h>
#include <linux/rhashtable.h>
#include <linux/ctype.h>
-#include <obd_target.h>
struct seq_file;
struct proc_dir_entry;
return o->lo_header->loh_attr & S_IFMT;
}
+static inline void lu_object_ref_add_atomic(struct lu_object *o,
+ const char *scope,
+ const void *source)
+{
+ lu_ref_add_atomic(&o->lo_header->loh_reference, scope, source);
+}
+
static inline void lu_object_ref_add(struct lu_object *o,
const char *scope,
const void *source)
LU_XATTR_CREATE = BIT(1),
LU_XATTR_MERGE = BIT(2),
LU_XATTR_SPLIT = BIT(3),
+ LU_XATTR_PURGE = BIT(4),
};
/** @} helpers */
return lu_device_is_cl(o->lo_dev);
}
+/* Generic subset of tgts */
+struct lu_tgt_pool {
+ __u32 *op_array; /* array of index of
+ * lov_obd->lov_tgts
+ */
+ unsigned int op_count; /* number of tgts in the array */
+ unsigned int op_size; /* allocated size of op_array */
+ struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
+};
+
+int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
+int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
+int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
+void lu_tgt_pool_free(struct lu_tgt_pool *op);
+int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
+int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
+
/* bitflags used in rr / qos allocation */
enum lq_flag {
LQ_DIRTY = 0, /* recalc qos data */
LQ_SAME_SPACE, /* the OSTs all have approx.
* the same space avail */
LQ_RESET, /* zero current penalties */
+ LQ_SF_PROGRESS, /* statfs op in progress */
};
+#ifdef HAVE_SERVER_SUPPORT
/* round-robin QoS data for LOD/LMV */
struct lu_qos_rr {
spinlock_t lqr_alloc; /* protect allocation index */
- __u32 lqr_start_idx; /* start index of new inode */
+ atomic_t lqr_start_idx; /* start index of new inode */
__u32 lqr_offset_idx;/* aliasing for start_idx */
int lqr_start_count;/* reseed counter */
struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
unsigned long lqr_flags;
};
+static inline void lu_qos_rr_init(struct lu_qos_rr *lqr)
+{
+ spin_lock_init(&lqr->lqr_alloc);
+ set_bit(LQ_DIRTY, &lqr->lqr_flags);
+}
+
+#endif /* HAVE_SERVER_SUPPORT */
+
/* QoS data per MDS/OSS */
struct lu_svr_qos {
struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
struct list_head lsq_svr_list; /* link to lq_svr_list */
__u64 lsq_bavail; /* total bytes avail on svr */
- __u64 lsq_iavail; /* tital inode avail on svr */
+ __u64 lsq_iavail; /* total inode avail on svr */
__u64 lsq_penalty; /* current penalty */
__u64 lsq_penalty_per_obj; /* penalty decrease
* every obj*/
__u64 ltq_penalty; /* current penalty */
__u64 ltq_penalty_per_obj; /* penalty decrease
* every obj*/
+ __u64 ltq_avail; /* bytes/inode avail */
__u64 ltq_weight; /* net weighting */
time64_t ltq_used; /* last used time, seconds */
bool ltq_usable:1; /* usable for striping */
};
/* target descriptor */
+#define LOV_QOS_DEF_THRESHOLD_RR_PCT 17
+#define LMV_QOS_DEF_THRESHOLD_RR_PCT 5
+
+#define LOV_QOS_DEF_PRIO_FREE 90
+#define LMV_QOS_DEF_PRIO_FREE 90
+
struct lu_tgt_desc {
union {
struct dt_device *ltd_tgt;
struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
};
+
/* QoS data for LOD/LMV */
+#define QOS_THRESHOLD_MAX 256 /* should be power of two */
struct lu_qos {
struct list_head lq_svr_list; /* lu_svr_qos list */
struct rw_semaphore lq_rw_sem;
__u32 lq_active_svr_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
+#ifdef HAVE_SERVER_SUPPORT
struct lu_qos_rr lq_rr; /* round robin qos data */
+#endif
unsigned long lq_flags;
#if 0
unsigned long lq_dirty:1, /* recalc qos data */
ldi_tgt[(index) % TGT_PTRS_PER_BLOCK]
u64 lu_prandom_u64_max(u64 ep_ro);
-void lu_qos_rr_init(struct lu_qos_rr *lqr);
int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
void lu_tgt_qos_weight_calc(struct lu_tgt_desc *tgt);
void lu_tgt_descs_fini(struct lu_tgt_descs *ltd);
int ltd_add_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
-bool ltd_qos_is_usable(struct lu_tgt_descs *ltd);
int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd);
int ltd_qos_update(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt,
__u64 *total_wt);
+/**
+ * Whether MDT inode and space usages are balanced.
+ */
+static inline bool ltd_qos_is_balanced(struct lu_tgt_descs *ltd)
+{
+ return !test_bit(LQ_DIRTY, <d->ltd_qos.lq_flags) &&
+ test_bit(LQ_SAME_SPACE, <d->ltd_qos.lq_flags);
+}
+
+/**
+ * Whether QoS data is up-to-date and QoS can be applied.
+ */
+static inline bool ltd_qos_is_usable(struct lu_tgt_descs *ltd)
+{
+ if (ltd_qos_is_balanced(ltd))
+ return false;
+
+ if (ltd->ltd_lov_desc.ld_active_tgt_count < 2)
+ return false;
+
+ return true;
+}
+
static inline struct lu_tgt_desc *ltd_first_tgt(struct lu_tgt_descs *ltd)
{
int index;