*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
#ifndef __LUSTRE_LU_OBJECT_H
#define __LUSTRE_LU_OBJECT_H
+#ifdef HAVE_LINUX_STDARG_HEADER
+#include <linux/stdarg.h>
+#else
#include <stdarg.h>
+#endif
#include <libcfs/libcfs.h>
#include <uapi/linux/lustre/lustre_idl.h>
#include <lu_ref.h>
#include <linux/percpu_counter.h>
+#include <linux/rhashtable.h>
#include <linux/ctype.h>
-#include <obd_target.h>
struct seq_file;
struct proc_dir_entry;
* intialized yet, the object allocator will initialize it.
*/
LU_OBJECT_INITED = 2,
- /**
- * Object is being purged, so mustn't be returned by
- * htable_lookup()
- */
- LU_OBJECT_PURGING = 3,
};
enum lu_object_header_attr {
* it is created for things like not-yet-existing child created by mkdir or
* create calls. lu_object_operations::loo_exists() can be used to check
* whether object is backed by persistent storage entity.
+ * Any object containing this structre which might be placed in an
+ * rhashtable via loh_hash MUST be freed using call_rcu() or rcu_kfree().
*/
struct lu_object_header {
/**
*/
__u32 loh_attr;
/**
- * Linkage into per-site hash table. Protected by lu_site::ls_guard.
+ * Linkage into per-site hash table.
*/
- struct hlist_node loh_hash;
+ struct rhash_head loh_hash;
/**
* Linkage into per-site LRU list. Protected by lu_site::ls_guard.
*/
/**
* objects hash table
*/
- struct cfs_hash *ls_obj_hash;
+ struct rhashtable ls_obj_hash;
/*
* buckets for summary data
*/
void lu_device_fini (struct lu_device *d);
int lu_object_header_init(struct lu_object_header *h);
void lu_object_header_fini(struct lu_object_header *h);
+void lu_object_header_free(struct lu_object_header *h);
int lu_object_init (struct lu_object *o,
struct lu_object_header *h, struct lu_device *d);
void lu_object_fini (struct lu_object *o);
void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
void lu_object_add (struct lu_object *before, struct lu_object *o);
-
+struct lu_object *lu_object_get_first(struct lu_object_header *h,
+ struct lu_device *dev);
void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d);
void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d);
return lu_site_purge_objects(env, s, nr, 1);
}
-void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
- lu_printer_t printer);
+void lu_site_print(const struct lu_env *env, struct lu_site *s, atomic_t *ref,
+ int msg_flags, lu_printer_t printer);
struct lu_object *lu_object_find(const struct lu_env *env,
struct lu_device *dev, const struct lu_fid *f,
const struct lu_object_conf *conf);
static inline struct lu_object *lu_object_top(struct lu_object_header *h)
{
LASSERT(!list_empty(&h->loh_layers));
- return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
+ return container_of(h->loh_layers.next, struct lu_object, lo_linkage);
}
/**
*/
static inline struct lu_object *lu_object_next(const struct lu_object *o)
{
- return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
+ return container_of(o->lo_linkage.next, struct lu_object, lo_linkage);
}
/**
return o->lo_header->loh_attr & S_IFMT;
}
+static inline void lu_object_ref_add_atomic(struct lu_object *o,
+ const char *scope,
+ const void *source)
+{
+ lu_ref_add_atomic(&o->lo_header->loh_reference, scope, source);
+}
+
static inline void lu_object_ref_add(struct lu_object *o,
const char *scope,
const void *source)
LU_XATTR_CREATE = BIT(1),
LU_XATTR_MERGE = BIT(2),
LU_XATTR_SPLIT = BIT(3),
+ LU_XATTR_PURGE = BIT(4),
};
/** @} helpers */
void lu_context_key_degister(struct lu_context_key *key);
void *lu_context_key_get (const struct lu_context *ctx,
const struct lu_context_key *key);
-void lu_context_key_quiesce (struct lu_context_key *key);
-void lu_context_key_revive (struct lu_context_key *key);
+void lu_context_key_quiesce(struct lu_device_type *t,
+ struct lu_context_key *key);
+void lu_context_key_revive(struct lu_context_key *key);
/*
} \
struct __##mod##_dummy_type_start {;}
-#define LU_TYPE_STOP(mod, ...) \
- static void mod##_type_stop(struct lu_device_type *t) \
- { \
- lu_context_key_quiesce_many(__VA_ARGS__, NULL); \
- } \
- struct __##mod##_dummy_type_stop {;}
+#define LU_TYPE_STOP(mod, ...) \
+ static void mod##_type_stop(struct lu_device_type *t) \
+ { \
+ lu_context_key_quiesce_many(t, __VA_ARGS__, NULL); \
+ } \
+ struct __##mod##_dummy_type_stop { }
int lu_context_key_register_many(struct lu_context_key *k, ...);
void lu_context_key_degister_many(struct lu_context_key *k, ...);
void lu_context_key_revive_many (struct lu_context_key *k, ...);
-void lu_context_key_quiesce_many (struct lu_context_key *k, ...);
+void lu_context_key_quiesce_many(struct lu_device_type *t,
+ struct lu_context_key *k, ...);
/*
* update/clear ctx/ses tags.
return lu_device_is_cl(o->lo_dev);
}
+/* Generic subset of tgts */
+struct lu_tgt_pool {
+ __u32 *op_array; /* array of index of
+ * lov_obd->lov_tgts
+ */
+ unsigned int op_count; /* number of tgts in the array */
+ unsigned int op_size; /* allocated size of op_array */
+ struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
+};
+
+int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
+int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
+int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
+void lu_tgt_pool_free(struct lu_tgt_pool *op);
+int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
+int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
+
+/* bitflags used in rr / qos allocation */
+enum lq_flag {
+ LQ_DIRTY = 0, /* recalc qos data */
+ LQ_SAME_SPACE, /* the OSTs all have approx.
+ * the same space avail */
+ LQ_RESET, /* zero current penalties */
+ LQ_SF_PROGRESS, /* statfs op in progress */
+};
+
+#ifdef HAVE_SERVER_SUPPORT
/* round-robin QoS data for LOD/LMV */
struct lu_qos_rr {
spinlock_t lqr_alloc; /* protect allocation index */
- __u32 lqr_start_idx; /* start index of new inode */
+ atomic_t lqr_start_idx; /* start index of new inode */
__u32 lqr_offset_idx;/* aliasing for start_idx */
int lqr_start_count;/* reseed counter */
struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
- unsigned long lqr_dirty:1; /* recalc round-robin list */
+ unsigned long lqr_flags;
};
+static inline void lu_qos_rr_init(struct lu_qos_rr *lqr)
+{
+ spin_lock_init(&lqr->lqr_alloc);
+ set_bit(LQ_DIRTY, &lqr->lqr_flags);
+}
+
+#endif /* HAVE_SERVER_SUPPORT */
+
/* QoS data per MDS/OSS */
struct lu_svr_qos {
struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
struct list_head lsq_svr_list; /* link to lq_svr_list */
__u64 lsq_bavail; /* total bytes avail on svr */
- __u64 lsq_iavail; /* tital inode avail on svr */
+ __u64 lsq_iavail; /* total inode avail on svr */
__u64 lsq_penalty; /* current penalty */
__u64 lsq_penalty_per_obj; /* penalty decrease
* every obj*/
__u64 ltq_penalty; /* current penalty */
__u64 ltq_penalty_per_obj; /* penalty decrease
* every obj*/
+ __u64 ltq_avail; /* bytes/inode avail */
__u64 ltq_weight; /* net weighting */
time64_t ltq_used; /* last used time, seconds */
bool ltq_usable:1; /* usable for striping */
};
/* target descriptor */
+#define LOV_QOS_DEF_THRESHOLD_RR_PCT 17
+#define LMV_QOS_DEF_THRESHOLD_RR_PCT 5
+
+#define LOV_QOS_DEF_PRIO_FREE 90
+#define LMV_QOS_DEF_PRIO_FREE 90
+
struct lu_tgt_desc {
union {
struct dt_device *ltd_tgt;
ltd_connecting:1; /* target is connecting */
};
-/* number of pointers at 1st level */
-#define TGT_PTRS (PAGE_SIZE / sizeof(void *))
/* number of pointers at 2nd level */
#define TGT_PTRS_PER_BLOCK (PAGE_SIZE / sizeof(void *))
+/* number of pointers at 1st level - only need as many as max OST/MDT count */
+#define TGT_PTRS ((LOV_ALL_STRIPES + 1) / TGT_PTRS_PER_BLOCK)
struct lu_tgt_desc_idx {
struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
};
+
/* QoS data for LOD/LMV */
+#define QOS_THRESHOLD_MAX 256 /* should be power of two */
struct lu_qos {
struct list_head lq_svr_list; /* lu_svr_qos list */
struct rw_semaphore lq_rw_sem;
__u32 lq_active_svr_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
+#ifdef HAVE_SERVER_SUPPORT
struct lu_qos_rr lq_rr; /* round robin qos data */
+#endif
+ unsigned long lq_flags;
+#if 0
unsigned long lq_dirty:1, /* recalc qos data */
lq_same_space:1,/* the servers all have approx.
* the same space avail */
lq_reset:1; /* zero current penalties */
+#endif
};
struct lu_tgt_descs {
/* Size of the lu_tgts array, granted to be a power of 2 */
__u32 ltd_tgts_size;
/* bitmap of TGTs available */
- struct cfs_bitmap *ltd_tgt_bitmap;
+ unsigned long *ltd_tgt_bitmap;
/* TGTs scheduled to be deleted */
__u32 ltd_death_row;
/* Table refcount used for delayed deletion */
};
#define LTD_TGT(ltd, index) \
- (ltd)->ltd_tgt_idx[(index) / \
- TGT_PTRS_PER_BLOCK]->ldi_tgt[(index) % TGT_PTRS_PER_BLOCK]
+ (ltd)->ltd_tgt_idx[(index) / TGT_PTRS_PER_BLOCK]-> \
+ ldi_tgt[(index) % TGT_PTRS_PER_BLOCK]
u64 lu_prandom_u64_max(u64 ep_ro);
-void lu_qos_rr_init(struct lu_qos_rr *lqr);
int lu_qos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
void lu_tgt_qos_weight_calc(struct lu_tgt_desc *tgt);
void lu_tgt_descs_fini(struct lu_tgt_descs *ltd);
int ltd_add_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
void ltd_del_tgt(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt);
-bool ltd_qos_is_usable(struct lu_tgt_descs *ltd);
int ltd_qos_penalties_calc(struct lu_tgt_descs *ltd);
int ltd_qos_update(struct lu_tgt_descs *ltd, struct lu_tgt_desc *tgt,
__u64 *total_wt);
+/**
+ * Whether MDT inode and space usages are balanced.
+ */
+static inline bool ltd_qos_is_balanced(struct lu_tgt_descs *ltd)
+{
+ return !test_bit(LQ_DIRTY, <d->ltd_qos.lq_flags) &&
+ test_bit(LQ_SAME_SPACE, <d->ltd_qos.lq_flags);
+}
+
+/**
+ * Whether QoS data is up-to-date and QoS can be applied.
+ */
+static inline bool ltd_qos_is_usable(struct lu_tgt_descs *ltd)
+{
+ if (ltd_qos_is_balanced(ltd))
+ return false;
+
+ if (ltd->ltd_lov_desc.ld_active_tgt_count < 2)
+ return false;
+
+ return true;
+}
+
static inline struct lu_tgt_desc *ltd_first_tgt(struct lu_tgt_descs *ltd)
{
int index;
- index = find_first_bit(ltd->ltd_tgt_bitmap->data,
- ltd->ltd_tgt_bitmap->size);
- return (index < ltd->ltd_tgt_bitmap->size) ? LTD_TGT(ltd, index) : NULL;
+ index = find_first_bit(ltd->ltd_tgt_bitmap,
+ ltd->ltd_tgts_size);
+ return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
}
static inline struct lu_tgt_desc *ltd_next_tgt(struct lu_tgt_descs *ltd,
return NULL;
index = tgt->ltd_index;
- LASSERT(index < ltd->ltd_tgt_bitmap->size);
- index = find_next_bit(ltd->ltd_tgt_bitmap->data,
- ltd->ltd_tgt_bitmap->size, index + 1);
- return (index < ltd->ltd_tgt_bitmap->size) ? LTD_TGT(ltd, index) : NULL;
+ LASSERT(index < ltd->ltd_tgts_size);
+ index = find_next_bit(ltd->ltd_tgt_bitmap,
+ ltd->ltd_tgts_size, index + 1);
+ return (index < ltd->ltd_tgts_size) ? LTD_TGT(ltd, index) : NULL;
}
#define ltd_foreach_tgt(ltd, tgt) \