*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*/
/*
* lustre/lod/lod_pool.c
#include <libcfs/libcfs.h>
#include <libcfs/linux/linux-hash.h>
+#include <libcfs/linux/linux-fs.h>
#include <obd.h>
#include "lod_internal.h"
if (atomic_dec_and_test(&pool->pool_refcount)) {
LASSERT(list_empty(&pool->pool_list));
LASSERT(pool->pool_proc_entry == NULL);
- tgt_pool_free(&(pool->pool_rr.lqr_pool));
- tgt_pool_free(&(pool->pool_obds));
+ lu_tgt_pool_free(&(pool->pool_rr.lqr_pool));
+ lu_tgt_pool_free(&(pool->pool_obds));
kfree_rcu(pool, pool_rcu);
EXIT;
}
return rc;
}
-static struct file_operations pool_proc_operations = {
- .open = pool_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+const static struct proc_ops pool_proc_operations = {
+ .proc_open = pool_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
/**
rhashtable_free_and_destroy(tbl, pools_hash_exit, NULL);
}
+bool lod_pool_exists(struct lod_device *lod, char *poolname)
+{
+ struct pool_desc *pool;
+
+ rcu_read_lock();
+ pool = rhashtable_lookup(&lod->lod_pools_hash_body,
+ poolname,
+ pools_hash_params);
+ rcu_read_unlock();
+ return pool != NULL;
+}
+
+static struct pool_desc *lod_pool_find(struct lod_device *lod, char *poolname)
+{
+ struct pool_desc *pool;
+
+ rcu_read_lock();
+ pool = rhashtable_lookup(&lod->lod_pools_hash_body,
+ poolname,
+ pools_hash_params);
+ if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
+ pool = NULL;
+ rcu_read_unlock();
+ return pool;
+}
/**
* Allocate a new pool for the specified device.
*
RETURN(-ENOMEM);
strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name));
+ new_pool->pool_spill_expire = 0;
+ new_pool->pool_spill_is_active = false;
+ new_pool->pool_spill_threshold_pct = 0;
+ new_pool->pool_spill_target[0] = '\0';
new_pool->pool_lobd = obd;
atomic_set(&new_pool->pool_refcount, 1);
- rc = tgt_pool_init(&new_pool->pool_obds, 0);
+ rc = lu_tgt_pool_init(&new_pool->pool_obds, 0);
if (rc)
GOTO(out_err, rc);
lu_qos_rr_init(&new_pool->pool_rr);
- rc = tgt_pool_init(&new_pool->pool_rr.lqr_pool, 0);
+ rc = lu_tgt_pool_init(&new_pool->pool_rr.lqr_pool, 0);
if (rc)
GOTO(out_free_pool_obds, rc);
new_pool->pool_proc_entry = NULL;
lod_pool_putref(new_pool);
}
+
+ pool_getref(new_pool);
+ new_pool->pool_spill_proc_entry =
+ lprocfs_register(poolname, lod->lod_spill_proc_entry,
+ lprocfs_lod_spill_vars, new_pool);
+ if (IS_ERR(new_pool->pool_spill_proc_entry)) {
+ rc = PTR_ERR(new_pool->pool_spill_proc_entry);
+ new_pool->pool_proc_entry = NULL;
+ lod_pool_putref(new_pool);
+ }
+
CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool,
new_pool->pool_proc_entry);
#endif
lod->lod_pool_count--;
spin_unlock(&obd->obd_dev_lock);
+ lprocfs_remove(&new_pool->pool_spill_proc_entry);
lprocfs_remove(&new_pool->pool_proc_entry);
- tgt_pool_free(&new_pool->pool_rr.lqr_pool);
+ lu_tgt_pool_free(&new_pool->pool_rr.lqr_pool);
out_free_pool_obds:
- tgt_pool_free(&new_pool->pool_obds);
+ lu_tgt_pool_free(&new_pool->pool_obds);
OBD_FREE_PTR(new_pool);
return rc;
}
lprocfs_remove(&pool->pool_proc_entry);
lod_pool_putref(pool);
}
+ if (pool->pool_spill_proc_entry != NULL) {
+ CDEBUG(D_INFO, "proc entry %p\n", pool->pool_spill_proc_entry);
+ lprocfs_remove(&pool->pool_spill_proc_entry);
+ lod_pool_putref(pool);
+ }
spin_lock(&obd->obd_dev_lock);
list_del_init(&pool->pool_list);
int rc = -EINVAL;
ENTRY;
- rcu_read_lock();
- pool = rhashtable_lookup(&lod->lod_pools_hash_body, poolname,
- pools_hash_params);
- if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
- pool = NULL;
- rcu_read_unlock();
+ pool = lod_pool_find(lod, poolname);
if (!pool)
RETURN(-ENOENT);
if (rc)
GOTO(out, rc);
- rc = tgt_pool_add(&pool->pool_obds, tgt->ltd_index,
- lod->lod_ost_count);
+ rc = lu_tgt_pool_add(&pool->pool_obds, tgt->ltd_index,
+ lod->lod_ost_count);
if (rc)
GOTO(out, rc);
- pool->pool_rr.lqr_dirty = 1;
+ set_bit(LQ_DIRTY, &pool->pool_rr.lqr_flags);
CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
ostname, poolname, pool_tgt_count(pool));
ENTRY;
/* lookup and kill hash reference */
- rcu_read_lock();
- pool = rhashtable_lookup(&lod->lod_pools_hash_body, poolname,
- pools_hash_params);
- if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
- pool = NULL;
- rcu_read_unlock();
+ pool = lod_pool_find(lod, poolname);
if (!pool)
RETURN(-ENOENT);
if (rc)
GOTO(out, rc);
- tgt_pool_remove(&pool->pool_obds, ost->ltd_index);
- pool->pool_rr.lqr_dirty = 1;
+ lu_tgt_pool_remove(&pool->pool_obds, ost->ltd_index);
+ set_bit(LQ_DIRTY, &pool->pool_rr.lqr_flags);
CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
poolname);
int rc;
pool_getref(pool);
- rc = tgt_check_index(idx, &pool->pool_obds);
+ rc = lu_tgt_check_index(idx, &pool->pool_obds);
lod_pool_putref(pool);
return rc;
}
pool = NULL;
if (poolname[0] != '\0') {
- rcu_read_lock();
- pool = rhashtable_lookup(&lod->lod_pools_hash_body, poolname,
- pools_hash_params);
- if (pool && !atomic_inc_not_zero(&pool->pool_refcount))
- pool = NULL;
- rcu_read_unlock();
+ pool = lod_pool_find(lod, poolname);
if (!pool)
CDEBUG(D_CONFIG,
"%s: request for an unknown pool (" LOV_POOLNAMEF ")\n",
return pool;
}
+void lod_spill_target_refresh(const struct lu_env *env, struct lod_device *lod,
+ struct pool_desc *pool)
+{
+ __u64 avail_bytes = 0, total_bytes = 0;
+ struct lu_tgt_pool *osts;
+ int i;
+
+ if (ktime_get_seconds() < pool->pool_spill_expire)
+ return;
+
+ if (pool->pool_spill_threshold_pct == 0)
+ return;
+
+ lod_qos_statfs_update(env, lod, &lod->lod_ost_descs);
+
+ down_write(&pool_tgt_rw_sem(pool));
+ if (ktime_get_seconds() < pool->pool_spill_expire)
+ goto out_sem;
+ pool->pool_spill_expire = ktime_get_seconds() +
+ lod->lod_ost_descs.ltd_lov_desc.ld_qos_maxage;
+
+ osts = &(pool->pool_obds);
+ for (i = 0; i < osts->op_count; i++) {
+ int idx = osts->op_array[i];
+ struct lod_tgt_desc *tgt;
+ struct obd_statfs *sfs;
+
+ if (!test_bit(idx, lod->lod_ost_bitmap))
+ continue;
+ tgt = OST_TGT(lod, idx);
+ if (tgt->ltd_active == 0)
+ continue;
+ sfs = &tgt->ltd_statfs;
+
+ avail_bytes += sfs->os_bavail * sfs->os_bsize;
+ total_bytes += sfs->os_blocks * sfs->os_bsize;
+ }
+ if (total_bytes - avail_bytes >=
+ total_bytes * pool->pool_spill_threshold_pct / 100)
+ pool->pool_spill_is_active = true;
+ else
+ pool->pool_spill_is_active = false;
+
+out_sem:
+ up_write(&pool_tgt_rw_sem(pool));
+}
+
+/*
+ * to prevent infinite loops during spilling, lets limit number of passes
+ */
+#define LOD_SPILL_MAX 10
+
+/*
+ * XXX: consider a better schema to detect loops
+ */
+void lod_check_and_spill_pool(const struct lu_env *env, struct lod_device *lod,
+ char **poolname)
+{
+ struct pool_desc *pool;
+ int replaced = 0;
+
+ if (!poolname || !*poolname || (*poolname)[0] == '\0')
+ return;
+repeat:
+ pool = lod_pool_find(lod, *poolname);
+ if (!pool)
+ return;
+
+ lod_spill_target_refresh(env, lod, pool);
+ if (pool->pool_spill_is_active) {
+ if (++replaced >= LOD_SPILL_MAX)
+ CWARN("%s: more than %d levels of pool spill for '%s->%s'\n",
+ lod2obd(lod)->obd_name, LOD_SPILL_MAX,
+ *poolname, pool->pool_spill_target);
+ lod_set_pool(poolname, pool->pool_spill_target);
+ lod_pool_putref(pool);
+ if (replaced >= LOD_SPILL_MAX)
+ return;
+ goto repeat;
+ }
+
+ lod_pool_putref(pool);
+}