struct fld;
-struct lu_site_bkt_data {
- /**
- * number of object in this bucket on the lsb_lru list.
- */
- long lsb_lru_len;
- /**
- * LRU list, updated on each access to object. Protected by
- * bucket lock of lu_site::ls_obj_hash.
- *
- * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
- * moved to the lu_site::ls_lru.prev (this is due to the non-existence
- * of list_for_each_entry_safe_reverse()).
- */
- struct list_head lsb_lru;
- /**
- * Wait-queue signaled when an object in this site is ultimately
- * destroyed (lu_object_free()). It is used by lu_object_find() to
- * wait before re-trying when object in the process of destruction is
- * found in the hash table.
- *
- * \see htable_lookup().
- */
- wait_queue_head_t lsb_marche_funebre;
-};
-
enum {
LU_SS_CREATED = 0,
LU_SS_CACHE_HIT,
struct percpu_counter ls_lru_len_counter;
};
-static inline struct lu_site_bkt_data *
-lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
-{
- struct cfs_hash_bd bd;
-
- cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
- return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
-}
+wait_queue_head_t *
+lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid);
static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
{
LU_XATTR_REPLACE = (1 << 0),
LU_XATTR_CREATE = (1 << 1),
LU_XATTR_MERGE = (1 << 2),
+ LU_XATTR_SPLIT = (1 << 3),
};
/** @} helpers */
enum lu_context_state {
LCS_INITIALIZED = 1,
LCS_ENTERED,
+ LCS_LEAVING,
LCS_LEFT,
LCS_FINALIZED
};
int lu_env_refill(struct lu_env *env);
int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, __u32 stags);
+struct lu_env *lu_env_find(void);
+int lu_env_add(struct lu_env *env);
+void lu_env_remove(struct lu_env *env);
+
/** @} lu_context */
/**
int ln_namelen;
};
+static inline bool name_is_dot_or_dotdot(const char *name, int namelen)
+{
+ return name[0] == '.' &&
+ (namelen == 1 || (namelen == 2 && name[1] == '.'));
+}
+
+static inline bool lu_name_is_dot_or_dotdot(const struct lu_name *lname)
+{
+ return name_is_dot_or_dotdot(lname->ln_name, lname->ln_namelen);
+}
+
+static inline bool lu_name_is_valid_len(const char *name, size_t name_len)
+{
+ return name != NULL &&
+ name_len > 0 &&
+ name_len < INT_MAX &&
+ strlen(name) == name_len &&
+ memchr(name, '/', name_len) == NULL;
+}
+
/**
* Validate names (path components)
*
*/
static inline bool lu_name_is_valid_2(const char *name, size_t name_len)
{
- return name != NULL &&
- name_len > 0 &&
- name_len < INT_MAX &&
- name[name_len] == '\0' &&
- strlen(name) == name_len &&
- memchr(name, '/', name_len) == NULL;
+ return lu_name_is_valid_len(name, name_len) && name[name_len] == '\0';
}
static inline bool lu_name_is_valid(const struct lu_name *ln)
return lu_device_is_cl(o->lo_dev);
}
+/* Generic subset of OSTs */
+struct ost_pool {
+ __u32 *op_array; /* array of index of
+ * lov_obd->lov_tgts */
+ unsigned int op_count; /* number of OSTs in the array */
+ unsigned int op_size; /* allocated size of lp_array */
+ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
+};
+
+/* round-robin QoS data for LOD/LMV */
+struct lu_qos_rr {
+ spinlock_t lqr_alloc; /* protect allocation index */
+ __u32 lqr_start_idx; /* start index of new inode */
+ __u32 lqr_offset_idx;/* aliasing for start_idx */
+ int lqr_start_count;/* reseed counter */
+ struct ost_pool lqr_pool; /* round-robin optimized list */
+ unsigned long lqr_dirty:1; /* recalc round-robin list */
+};
+
+/* QoS data per MDS/OSS */
+struct lu_svr_qos {
+ struct obd_uuid lsq_uuid; /* ptlrpc's c_remote_uuid */
+ struct list_head lsq_svr_list; /* link to lq_svr_list */
+ __u64 lsq_bavail; /* total bytes avail on svr */
+ __u64 lsq_iavail; /* tital inode avail on svr */
+ __u64 lsq_penalty; /* current penalty */
+ __u64 lsq_penalty_per_obj; /* penalty decrease
+ * every obj*/
+ time64_t lsq_used; /* last used time, seconds */
+ __u32 lsq_tgt_count; /* number of tgts on this svr */
+ __u32 lsq_id; /* unique svr id */
+};
+
+/* QoS data per MDT/OST */
+struct lu_tgt_qos {
+ struct lu_svr_qos *ltq_svr; /* svr info */
+ __u64 ltq_penalty; /* current penalty */
+ __u64 ltq_penalty_per_obj; /* penalty decrease
+ * every obj*/
+ __u64 ltq_weight; /* net weighting */
+ time64_t ltq_used; /* last used time, seconds */
+ bool ltq_usable:1; /* usable for striping */
+};
+
+/* target descriptor */
+struct lu_tgt_desc {
+ union {
+ struct dt_device *ltd_tgt;
+ struct obd_device *ltd_obd;
+ };
+ struct obd_export *ltd_exp;
+ struct obd_uuid ltd_uuid;
+ __u32 ltd_index;
+ __u32 ltd_gen;
+ struct list_head ltd_kill;
+ struct ptlrpc_thread *ltd_recovery_thread;
+ struct mutex ltd_fid_mutex;
+ struct lu_tgt_qos ltd_qos; /* qos info per target */
+ struct obd_statfs ltd_statfs;
+ time64_t ltd_statfs_age;
+ unsigned long ltd_active:1,/* is this target up for requests */
+ ltd_activate:1,/* should target be activated */
+ ltd_reap:1, /* should this target be deleted */
+ ltd_got_update_log:1, /* Already got update log */
+ ltd_connecting:1; /* target is connecting */
+};
+
+/* QoS data for LOD/LMV */
+struct lu_qos {
+ struct list_head lq_svr_list; /* lu_svr_qos list */
+ struct rw_semaphore lq_rw_sem;
+ __u32 lq_active_svr_count;
+ unsigned int lq_prio_free; /* priority for free space */
+ unsigned int lq_threshold_rr;/* priority for rr */
+ struct lu_qos_rr lq_rr; /* round robin qos data */
+ unsigned long lq_dirty:1, /* recalc qos data */
+ lq_same_space:1,/* the servers all have approx.
+ * the same space avail */
+ lq_reset:1; /* zero current penalties */
+};
+
+void lu_qos_rr_init(struct lu_qos_rr *lqr);
+int lqos_add_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
+int lqos_del_tgt(struct lu_qos *qos, struct lu_tgt_desc *ltd);
+u64 lu_prandom_u64_max(u64 ep_ro);
+
/** @} lu */
#endif /* __LUSTRE_LU_OBJECT_H */