#ifndef __LUSTRE_LU_OBJECT_H
#define __LUSTRE_LU_OBJECT_H
+#ifdef HAVE_LINUX_STDARG_HEADER
+#include <linux/stdarg.h>
+#else
#include <stdarg.h>
+#endif
#include <libcfs/libcfs.h>
#include <uapi/linux/lustre/lustre_idl.h>
#include <lu_ref.h>
return o->lo_header->loh_attr & S_IFMT;
}
+static inline void lu_object_ref_add_atomic(struct lu_object *o,
+ const char *scope,
+ const void *source)
+{
+ lu_ref_add_atomic(&o->lo_header->loh_reference, scope, source);
+}
+
static inline void lu_object_ref_add(struct lu_object *o,
const char *scope,
const void *source)
struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
};
-int tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
-int tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
-int tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
-int tgt_pool_free(struct lu_tgt_pool *op);
-int tgt_check_index(int idx, struct lu_tgt_pool *osts);
-int tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
+int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
+int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
+int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
+void lu_tgt_pool_free(struct lu_tgt_pool *op);
+int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
+int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
/* bitflags used in rr / qos allocation */
enum lq_flag {
LQ_SAME_SPACE, /* the OSTs all have approx.
* the same space avail */
LQ_RESET, /* zero current penalties */
+ LQ_SF_PROGRESS, /* statfs op in progress */
};
#ifdef HAVE_SERVER_SUPPORT
/* round-robin QoS data for LOD/LMV */
struct lu_qos_rr {
spinlock_t lqr_alloc; /* protect allocation index */
- __u32 lqr_start_idx; /* start index of new inode */
+ atomic_t lqr_start_idx; /* start index of new inode */
__u32 lqr_offset_idx;/* aliasing for start_idx */
int lqr_start_count;/* reseed counter */
struct lu_tgt_pool lqr_pool; /* round-robin optimized list */
struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
struct list_head lsq_svr_list; /* link to lq_svr_list */
__u64 lsq_bavail; /* total bytes avail on svr */
- __u64 lsq_iavail; /* tital inode avail on svr */
+ __u64 lsq_iavail; /* total inode avail on svr */
__u64 lsq_penalty; /* current penalty */
__u64 lsq_penalty_per_obj; /* penalty decrease
* every obj*/
__u64 ltq_penalty; /* current penalty */
__u64 ltq_penalty_per_obj; /* penalty decrease
* every obj*/
+ __u64 ltq_avail; /* bytes/inode avail */
__u64 ltq_weight; /* net weighting */
time64_t ltq_used; /* last used time, seconds */
bool ltq_usable:1; /* usable for striping */
struct lu_tgt_desc *ldi_tgt[TGT_PTRS_PER_BLOCK];
};
+
/* QoS data for LOD/LMV */
+#define QOS_THRESHOLD_MAX 256 /* should be power of two */
struct lu_qos {
struct list_head lq_svr_list; /* lu_svr_qos list */
struct rw_semaphore lq_rw_sem;