struct rw_semaphore op_rw_sem; /* to protect lu_tgt_pool use */
};
-int tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
-int tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
-int tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
-int tgt_pool_free(struct lu_tgt_pool *op);
-int tgt_check_index(int idx, struct lu_tgt_pool *osts);
-int tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
+int lu_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count);
+int lu_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count);
+int lu_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx);
+int lu_tgt_pool_free(struct lu_tgt_pool *op);
+int lu_tgt_check_index(int idx, struct lu_tgt_pool *osts);
+int lu_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count);
/* bitflags used in rr / qos allocation */
enum lq_flag {
struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
struct list_head lsq_svr_list; /* link to lq_svr_list */
__u64 lsq_bavail; /* total bytes avail on svr */
- __u64 lsq_iavail; /* tital inode avail on svr */
+ __u64 lsq_iavail; /* total inode avail on svr */
__u64 lsq_penalty; /* current penalty */
__u64 lsq_penalty_per_obj; /* penalty decrease
* every obj*/
__u64 ltq_penalty; /* current penalty */
__u64 ltq_penalty_per_obj; /* penalty decrease
* every obj*/
+ __u64 ltq_avail; /* bytes/inode avail */
__u64 ltq_weight; /* net weighting */
time64_t ltq_used; /* last used time, seconds */
bool ltq_usable:1; /* usable for striping */