*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/*
* lustre/lod/lod_pool.c
#include <libcfs/libcfs.h>
#include <libcfs/linux/linux-hash.h>
+#include <libcfs/linux/linux-fs.h>
#include <obd.h>
#include "lod_internal.h"
if (atomic_dec_and_test(&pool->pool_refcount)) {
LASSERT(list_empty(&pool->pool_list));
LASSERT(pool->pool_proc_entry == NULL);
- lod_tgt_pool_free(&(pool->pool_rr.lqr_pool));
- lod_tgt_pool_free(&(pool->pool_obds));
+ lu_tgt_pool_free(&(pool->pool_rr.lqr_pool));
+ lu_tgt_pool_free(&(pool->pool_obds));
kfree_rcu(pool, pool_rcu);
EXIT;
}
LASSERTF(iter->lpi_magic == POOL_IT_MAGIC, "%08X\n", iter->lpi_magic);
+ (*pos)++;
/* test if end of file */
- if (*pos >= pool_tgt_count(iter->lpi_pool))
+ if (*pos > pool_tgt_count(iter->lpi_pool))
return NULL;
- OBD_FAIL_TIMEOUT(OBD_FAIL_OST_LIST_ASSERT, cfs_fail_val);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OST_LIST_ASSERT, cfs_fail_val);
/* iterate to find a non empty entry */
prev_idx = iter->lpi_idx;
iter->lpi_idx = prev_idx; /* we stay on the last entry */
return NULL;
}
- (*pos)++;
+
/* return != NULL to continue */
return iter;
}
rc = seq_open(file, &pool_proc_ops);
if (!rc) {
struct seq_file *seq = file->private_data;
- seq->private = PDE_DATA(inode);
+ seq->private = pde_data(inode);
}
return rc;
}
-static struct file_operations pool_proc_operations = {
- .open = pool_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+const static struct proc_ops pool_proc_operations = {
+ .proc_open = pool_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
/**
lod_pool_putref(pool);
}
-/**
- * Initialize the pool data structures at startup.
- *
- * Allocate and initialize the pool data structures with the specified
- * array size. If pool count is not specified (\a count == 0), then
- * POOL_INIT_COUNT will be used. Allocating a non-zero initial array
- * size avoids the need to reallocate as new pools are added.
- *
- * \param[in] op pool structure
- * \param[in] count initial size of the target op_array[] array
- *
- * \retval 0 indicates successful pool initialization
- * \retval negative error number on failure
- */
-#define POOL_INIT_COUNT 2
-int lod_tgt_pool_init(struct lu_tgt_pool *op, unsigned int count)
+static void pools_hash_exit(void *vpool, void *data)
{
- ENTRY;
+ struct pool_desc *pool = vpool;
- if (count == 0)
- count = POOL_INIT_COUNT;
- op->op_array = NULL;
- op->op_count = 0;
- init_rwsem(&op->op_rw_sem);
- op->op_size = count * sizeof(op->op_array[0]);
- OBD_ALLOC(op->op_array, op->op_size);
- if (op->op_array == NULL) {
- op->op_size = 0;
- RETURN(-ENOMEM);
- }
- EXIT;
- return 0;
+ lod_pool_putref(pool);
}
-/**
- * Increase the op_array size to hold more targets in this pool.
- *
- * The size is increased to at least \a min_count, but may be larger
- * for an existing pool since ->op_array[] is growing exponentially.
- * Caller must hold write op_rwlock.
- *
- * \param[in] op pool structure
- * \param[in] min_count minimum number of entries to handle
- *
- * \retval 0 on success
- * \retval negative error number on failure.
- */
-int lod_tgt_pool_extend(struct lu_tgt_pool *op, unsigned int min_count)
+int lod_pool_hash_init(struct rhashtable *tbl)
{
- __u32 *new;
- __u32 new_size;
-
- LASSERT(min_count != 0);
-
- if (op->op_count * sizeof(op->op_array[0]) < op->op_size)
- return 0;
-
- new_size = max_t(__u32, min_count * sizeof(op->op_array[0]),
- 2 * op->op_size);
- OBD_ALLOC(new, new_size);
- if (new == NULL)
- return -ENOMEM;
-
- /* copy old array to new one */
- memcpy(new, op->op_array, op->op_size);
- OBD_FREE(op->op_array, op->op_size);
- op->op_array = new;
- op->op_size = new_size;
-
- return 0;
+ return rhashtable_init(tbl, &pools_hash_params);
}
-/**
- * Add a new target to an existing pool.
- *
- * Add a new target device to the pool previously created and returned by
- * lod_pool_new(). Each target can only be in each pool at most one time.
- *
- * \param[in] op target pool to add new entry
- * \param[in] idx pool index number to add to the \a op array
- * \param[in] min_count minimum number of entries to expect in the pool
- *
- * \retval 0 if target could be added to the pool
- * \retval negative error if target \a idx was not added
- */
-int lod_tgt_pool_add(struct lu_tgt_pool *op, __u32 idx, unsigned int min_count)
+void lod_pool_hash_destroy(struct rhashtable *tbl)
{
- unsigned int i;
- int rc = 0;
- ENTRY;
-
- down_write(&op->op_rw_sem);
-
- rc = lod_tgt_pool_extend(op, min_count);
- if (rc)
- GOTO(out, rc);
-
- /* search ost in pool array */
- for (i = 0; i < op->op_count; i++) {
- if (op->op_array[i] == idx)
- GOTO(out, rc = -EEXIST);
- }
- /* ost not found we add it */
- op->op_array[op->op_count] = idx;
- op->op_count++;
- EXIT;
-out:
- up_write(&op->op_rw_sem);
- return rc;
+ rhashtable_free_and_destroy(tbl, pools_hash_exit, NULL);
}
-/**
- * Remove an existing pool from the system.
- *
- * The specified pool must have previously been allocated by
- * lod_pool_new() and not have any target members in the pool.
- * If the removed target is not the last, compact the array
- * to remove empty spaces.
- *
- * \param[in] op pointer to the original data structure
- * \param[in] idx target index to be removed
- *
- * \retval 0 on success
- * \retval negative error number on failure
- */
-int lod_tgt_pool_remove(struct lu_tgt_pool *op, __u32 idx)
+bool lod_pool_exists(struct lod_device *lod, char *poolname)
{
- unsigned int i;
- ENTRY;
-
- down_write(&op->op_rw_sem);
-
- for (i = 0; i < op->op_count; i++) {
- if (op->op_array[i] == idx) {
- memmove(&op->op_array[i], &op->op_array[i + 1],
- (op->op_count - i - 1) *
- sizeof(op->op_array[0]));
- op->op_count--;
- up_write(&op->op_rw_sem);
- EXIT;
- return 0;
- }
- }
+ struct pool_desc *pool;
- up_write(&op->op_rw_sem);
- RETURN(-EINVAL);
+ rcu_read_lock();
+ pool = rhashtable_lookup(&lod->lod_pools_hash_body,
+ poolname,
+ pools_hash_params);
+ rcu_read_unlock();
+ return pool != NULL;
}
-/**
- * Free the pool after it was emptied and removed from /proc.
- *
- * Note that all of the child/target entries referenced by this pool
- * must have been removed by lod_ost_pool_remove() before it can be
- * deleted from memory.
- *
- * \param[in] op pool to be freed.
- *
- * \retval 0 on success or if pool was already freed
- */
-int lod_tgt_pool_free(struct lu_tgt_pool *op)
+struct pool_desc *lod_pool_find(struct lod_device *lod, char *poolname)
{
- ENTRY;
-
- if (op->op_size == 0)
- RETURN(0);
-
- down_write(&op->op_rw_sem);
-
- OBD_FREE(op->op_array, op->op_size);
- op->op_array = NULL;
- op->op_count = 0;
- op->op_size = 0;
+ struct pool_desc *pool;
- up_write(&op->op_rw_sem);
- RETURN(0);
+ rcu_read_lock();
+ pool = rhashtable_lookup(&lod->lod_pools_hash_body,
+ poolname,
+ pools_hash_params);
+ if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
+ pool = NULL;
+ rcu_read_unlock();
+ return pool;
}
-static void pools_hash_exit(void *vpool, void *data)
+static int lod_ost_pool_weights_seq_show(struct seq_file *m, void *data)
{
- struct pool_desc *pool = vpool;
+ struct pool_desc *pool = m->private;
+ struct lod_device *lod = lu2lod_dev(pool->pool_lobd->obd_lu_dev);
- lod_pool_putref(pool);
+ return lod_tgt_weights_seq_show(m, lod, &pool->pool_obds, false);
}
-int lod_pool_hash_init(struct rhashtable *tbl)
+static ssize_t
+lod_ost_pool_weights_seq_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *off)
{
- return rhashtable_init(tbl, &pools_hash_params);
-}
+ struct seq_file *m = file->private_data;
+ struct pool_desc *pool = m->private;
+ struct lod_device *lod = lu2lod_dev(pool->pool_lobd->obd_lu_dev);
-void lod_pool_hash_destroy(struct rhashtable *tbl)
-{
- rhashtable_free_and_destroy(tbl, pools_hash_exit, NULL);
+ return lod_tgt_weights_seq_write(m, buf, count, lod, &pool->pool_obds,
+ false);
}
+LDEBUGFS_SEQ_FOPS(lod_ost_pool_weights);
+
+static struct ldebugfs_vars ldebugfs_lod_pool_vars[] = {
+ { .name = "qos_ost_weights",
+ .fops = &lod_ost_pool_weights_fops,
+ .proc_mode = 0444 },
+ { 0 }
+};
/**
* Allocate a new pool for the specified device.
RETURN(-ENAMETOOLONG);
/* OBD_ALLOC_* doesn't work with direct kfree_rcu use */
- new_pool = kmalloc(sizeof(*new_pool), GFP_KERNEL);
+ new_pool = kmalloc(sizeof(*new_pool), __GFP_ZERO);
if (new_pool == NULL)
RETURN(-ENOMEM);
strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name));
+ new_pool->pool_spill_target[0] = '\0';
+ atomic_set(&new_pool->pool_spill_hit, 0);
new_pool->pool_lobd = obd;
atomic_set(&new_pool->pool_refcount, 1);
- rc = lod_tgt_pool_init(&new_pool->pool_obds, 0);
+ rc = lu_tgt_pool_init(&new_pool->pool_obds, 0);
if (rc)
- GOTO(out_err, rc);
+ GOTO(out_free_pool, rc);
lu_qos_rr_init(&new_pool->pool_rr);
- rc = lod_tgt_pool_init(&new_pool->pool_rr.lqr_pool, 0);
+ rc = lu_tgt_pool_init(&new_pool->pool_rr.lqr_pool, 0);
if (rc)
GOTO(out_free_pool_obds, rc);
new_pool->pool_proc_entry = NULL;
lod_pool_putref(new_pool);
}
+
+ pool_getref(new_pool);
+ new_pool->pool_spill_proc_entry =
+ lprocfs_register(poolname, lod->lod_spill_proc_entry,
+ lprocfs_lod_spill_vars, new_pool);
+ if (IS_ERR(new_pool->pool_spill_proc_entry)) {
+ rc = PTR_ERR(new_pool->pool_spill_proc_entry);
+ new_pool->pool_proc_entry = NULL;
+ lod_pool_putref(new_pool);
+ }
+
CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool,
new_pool->pool_proc_entry);
#endif
GOTO(out_err, rc);
}
+ new_pool->pool_debugfs = debugfs_create_dir(poolname,
+ lod->lod_pool_debugfs);
+ ldebugfs_add_vars(new_pool->pool_debugfs, ldebugfs_lod_pool_vars,
+ new_pool);
+
CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n",
poolname, lod->lod_pool_count);
lod->lod_pool_count--;
spin_unlock(&obd->obd_dev_lock);
+ lprocfs_remove(&new_pool->pool_spill_proc_entry);
lprocfs_remove(&new_pool->pool_proc_entry);
- lod_tgt_pool_free(&new_pool->pool_rr.lqr_pool);
+ lu_tgt_pool_free(&new_pool->pool_rr.lqr_pool);
out_free_pool_obds:
- lod_tgt_pool_free(&new_pool->pool_obds);
+ lu_tgt_pool_free(&new_pool->pool_obds);
+out_free_pool:
OBD_FREE_PTR(new_pool);
return rc;
}
if (!pool)
RETURN(-ENOENT);
+ debugfs_remove_recursive(pool->pool_debugfs);
+
if (pool->pool_proc_entry != NULL) {
CDEBUG(D_INFO, "proc entry %p\n", pool->pool_proc_entry);
lprocfs_remove(&pool->pool_proc_entry);
lod_pool_putref(pool);
}
+ if (pool->pool_spill_proc_entry != NULL) {
+ CDEBUG(D_INFO, "proc entry %p\n", pool->pool_spill_proc_entry);
+ lprocfs_remove(&pool->pool_spill_proc_entry);
+ lod_pool_putref(pool);
+ }
spin_lock(&obd->obd_dev_lock);
list_del_init(&pool->pool_list);
int rc = -EINVAL;
ENTRY;
- rcu_read_lock();
- pool = rhashtable_lookup(&lod->lod_pools_hash_body, poolname,
- pools_hash_params);
- if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
- pool = NULL;
- rcu_read_unlock();
+ pool = lod_pool_find(lod, poolname);
if (!pool)
RETURN(-ENOENT);
if (rc)
GOTO(out, rc);
- rc = lod_tgt_pool_add(&pool->pool_obds, tgt->ltd_index,
- lod->lod_ost_count);
+ rc = lu_tgt_pool_add(&pool->pool_obds, tgt->ltd_index,
+ lod->lod_ost_count);
if (rc)
GOTO(out, rc);
- pool->pool_rr.lqr_dirty = 1;
+ set_bit(LQ_DIRTY, &pool->pool_rr.lqr_flags);
CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
ostname, poolname, pool_tgt_count(pool));
ENTRY;
/* lookup and kill hash reference */
- rcu_read_lock();
- pool = rhashtable_lookup(&lod->lod_pools_hash_body, poolname,
- pools_hash_params);
- if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
- pool = NULL;
- rcu_read_unlock();
+ pool = lod_pool_find(lod, poolname);
if (!pool)
RETURN(-ENOENT);
if (rc)
GOTO(out, rc);
- lod_tgt_pool_remove(&pool->pool_obds, ost->ltd_index);
- pool->pool_rr.lqr_dirty = 1;
+ lu_tgt_pool_remove(&pool->pool_obds, ost->ltd_index);
+ set_bit(LQ_DIRTY, &pool->pool_rr.lqr_flags);
CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
poolname);
*/
int lod_check_index_in_pool(__u32 idx, struct pool_desc *pool)
{
- unsigned int i;
int rc;
- ENTRY;
pool_getref(pool);
-
- down_read(&pool_tgt_rw_sem(pool));
-
- for (i = 0; i < pool_tgt_count(pool); i++) {
- if (pool_tgt_array(pool)[i] == idx)
- GOTO(out, rc = 0);
- }
- rc = -ENOENT;
- EXIT;
-out:
- up_read(&pool_tgt_rw_sem(pool));
-
+ rc = lu_tgt_check_index(idx, &pool->pool_obds);
lod_pool_putref(pool);
return rc;
}
{
struct pool_desc *pool;
- pool = NULL;
- if (poolname[0] != '\0') {
- rcu_read_lock();
- pool = rhashtable_lookup(&lod->lod_pools_hash_body, poolname,
- pools_hash_params);
- if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
- pool = NULL;
- rcu_read_unlock();
- if (!pool)
- CDEBUG(D_CONFIG,
- "%s: request for an unknown pool (" LOV_POOLNAMEF ")\n",
- lod->lod_child_exp->exp_obd->obd_name, poolname);
- if (pool != NULL && pool_tgt_count(pool) == 0) {
- CDEBUG(D_CONFIG, "%s: request for an empty pool ("
- LOV_POOLNAMEF")\n",
- lod->lod_child_exp->exp_obd->obd_name, poolname);
- /* pool is ignored, so we remove ref on it */
- lod_pool_putref(pool);
- pool = NULL;
- }
+ if (poolname[0] == '\0' || lov_pool_is_reserved(poolname))
+ return NULL;
+
+ pool = lod_pool_find(lod, poolname);
+ if (!pool)
+ CDEBUG(D_CONFIG,
+ "%s: request for an unknown pool (" LOV_POOLNAMEF ")\n",
+ lod->lod_child_exp->exp_obd->obd_name, poolname);
+ if (pool != NULL && pool_tgt_count(pool) == 0) {
+ CDEBUG(D_CONFIG, "%s: request for an empty pool ("
+ LOV_POOLNAMEF")\n",
+ lod->lod_child_exp->exp_obd->obd_name, poolname);
+ /* pool is ignored, so we remove ref on it */
+ lod_pool_putref(pool);
+ pool = NULL;
}
+
return pool;
}
+void lod_spill_target_refresh(const struct lu_env *env, struct lod_device *lod,
+ struct pool_desc *pool)
+{
+ __u64 avail_bytes = 0, total_bytes = 0;
+ struct lu_tgt_pool *osts;
+ int i;
+
+ if (ktime_get_seconds() < pool->pool_spill_expire)
+ return;
+
+ if (pool->pool_spill_threshold_pct == 0)
+ return;
+
+ lod_qos_statfs_update(env, lod, &lod->lod_ost_descs);
+
+ down_write(&pool_tgt_rw_sem(pool));
+ if (ktime_get_seconds() < pool->pool_spill_expire)
+ goto out_sem;
+ pool->pool_spill_expire = ktime_get_seconds() +
+ lod->lod_ost_descs.ltd_lov_desc.ld_qos_maxage;
+
+ osts = &(pool->pool_obds);
+ for (i = 0; i < osts->op_count; i++) {
+ int idx = osts->op_array[i];
+ struct lod_tgt_desc *tgt;
+ struct obd_statfs *sfs;
+
+ if (!test_bit(idx, lod->lod_ost_bitmap))
+ continue;
+ tgt = OST_TGT(lod, idx);
+ if (!tgt->ltd_active)
+ continue;
+ sfs = &tgt->ltd_statfs;
+
+ avail_bytes += sfs->os_bavail * sfs->os_bsize;
+ total_bytes += sfs->os_blocks * sfs->os_bsize;
+ }
+ if (total_bytes - avail_bytes >=
+ total_bytes * pool->pool_spill_threshold_pct / 100)
+ pool->pool_spill_is_active = true;
+ else
+ pool->pool_spill_is_active = false;
+
+out_sem:
+ up_write(&pool_tgt_rw_sem(pool));
+}
+
+/*
+ * XXX: consider a better schema to detect loops
+ */
+void lod_check_and_spill_pool(const struct lu_env *env, struct lod_device *lod,
+ char **poolname)
+{
+ struct pool_desc *pool;
+
+ if (!poolname || !*poolname || (*poolname)[0] == '\0')
+ return;
+repeat:
+ pool = lod_pool_find(lod, *poolname);
+ if (!pool)
+ return;
+
+ lod_spill_target_refresh(env, lod, pool);
+ if (pool->pool_spill_is_active) {
+ lod_set_pool(poolname, pool->pool_spill_target);
+ atomic_inc(&pool->pool_spill_hit);
+ lod_pool_putref(pool);
+ goto repeat;
+ }
+
+ lod_pool_putref(pool);
+}