* Copyright 2009 Sun Microsystems, Inc. All rights reserved
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define TGT_BAVAIL(i) (OST_TGT(lod,i)->ltd_statfs.os_bavail * \
OST_TGT(lod,i)->ltd_statfs.os_bsize)
-int qos_add_tgt(struct lod_device *lod, struct lod_ost_desc *ost_desc)
+int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc)
{
struct lov_qos_oss *oss = NULL, *temposs;
struct obd_export *exp = ost_desc->ltd_exp;
cfs_list_t *list;
ENTRY;
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
/*
* a bit hacky approach to learn NID of corresponding connection
* but there is no official API to access information like this
lod->lod_qos.lq_rr.lqr_dirty = 1;
out:
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(rc);
}
-int qos_del_tgt(struct lod_device *lod, struct lod_ost_desc *ost_desc)
+int qos_del_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc)
{
struct lov_qos_oss *oss;
int rc = 0;
ENTRY;
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
oss = ost_desc->ltd_qos.ltq_oss;
if (!oss)
GOTO(out, rc = -ENOENT);
lod->lod_qos.lq_dirty = 1;
lod->lod_qos.lq_rr.lqr_dirty = 1;
out:
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(rc);
}
static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
int index, struct obd_statfs *sfs)
{
- struct lod_ost_desc *ost;
+ struct lod_tgt_desc *ost;
int rc;
LASSERT(d);
/* check whether device has changed state (active, inactive) */
if (rc != 0 && ost->ltd_active) {
/* turned inactive? */
- cfs_spin_lock(&d->lod_desc_lock);
+ spin_lock(&d->lod_desc_lock);
if (ost->ltd_active) {
ost->ltd_active = 0;
LASSERT(d->lod_desc.ld_active_tgt_count > 0);
CDEBUG(D_CONFIG, "%s: turns inactive\n",
ost->ltd_exp->exp_obd->obd_name);
}
- cfs_spin_unlock(&d->lod_desc_lock);
+ spin_unlock(&d->lod_desc_lock);
} else if (rc == 0 && ost->ltd_active == 0) {
/* turned active? */
LASSERT(d->lod_desc.ld_active_tgt_count < d->lod_ostnr);
- cfs_spin_lock(&d->lod_desc_lock);
+ spin_lock(&d->lod_desc_lock);
if (ost->ltd_active == 0) {
ost->ltd_active = 1;
d->lod_desc.ld_active_tgt_count++;
CDEBUG(D_CONFIG, "%s: turns active\n",
ost->ltd_exp->exp_obd->obd_name);
}
- cfs_spin_unlock(&d->lod_desc_lock);
+ spin_unlock(&d->lod_desc_lock);
}
return rc;
/* statfs data are quite recent, don't need to refresh it */
RETURN_EXIT;
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
GOTO(out, rc = 0);
obd->obd_osfs_age = cfs_time_current_64();
out:
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
}
/* Recalculate per-object penalties for OSSs and OSTs,
static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts,
__u32 index, __u64 *total_wt)
{
- struct lod_ost_desc *ost;
+ struct lod_tgt_desc *ost;
struct lov_qos_oss *oss;
int j;
ENTRY;
struct lov_qos_rr *lqr)
{
struct lov_qos_oss *oss;
- struct lod_ost_desc *ost;
+ struct lod_tgt_desc *ost;
unsigned placed, real_count;
int i, rc;
ENTRY;
}
/* Do actual allocation. */
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
/*
* Check again. While we were sleeping on @lq_rw_sem something could
*/
if (!lqr->lqr_dirty) {
LASSERT(lqr->lqr_pool.op_size);
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(0);
}
lqr->lqr_pool.op_count = real_count;
rc = lod_ost_pool_extend(&lqr->lqr_pool, real_count);
if (rc) {
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(rc);
}
for (i = 0; i < lqr->lqr_pool.op_count; i++)
}
lqr->lqr_dirty = 0;
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
if (placed != real_count) {
/* This should never happen */
int ost_idx,
struct thandle *th)
{
- struct lod_ost_desc *ost;
+ struct lod_tgt_desc *ost;
struct lu_object *o, *n;
struct lu_device *nd;
struct dt_object *dt;
/* the minimum of 0.1% used blocks and 1GB bytes. */
used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
- 1 << (31 - cfs_ffs(bs)));
+ 1 << (31 - ffs(bs)));
return (msfs->os_bavail < used);
}
pool = lod_find_pool(m, lo->ldo_pool);
if (pool != NULL) {
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
} else {
if (stripe_cnt > 1 && (osts->op_count % stripe_cnt) != 1)
++lqr->lqr_offset_idx;
}
- cfs_down_read(&m->lod_qos.lq_rw_sem);
+ down_read(&m->lod_qos.lq_rw_sem);
ost_start_idx_temp = lqr->lqr_start_idx;
repeat_find:
goto repeat_find;
}
- cfs_up_read(&m->lod_qos.lq_rw_sem);
+ up_read(&m->lod_qos.lq_rw_sem);
if (stripe_idx) {
lo->ldo_stripenr = stripe_idx;
out:
if (pool != NULL) {
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lod_find_pool() */
lod_pool_putref(pool);
}
pool = lod_find_pool(m, lo->ldo_pool);
if (pool != NULL) {
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
} else {
osts = &(m->lod_pool_info);
rc = -EFBIG;
out:
if (pool != NULL) {
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lod_find_pool() */
lod_pool_putref(pool);
}
{
struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs;
- struct lod_ost_desc *ost;
+ struct lod_tgt_desc *ost;
struct dt_object *o;
__u64 total_weight = 0;
int nfound, good_osts, i, rc = 0;
pool = lod_find_pool(m, lo->ldo_pool);
if (pool != NULL) {
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
} else {
osts = &(m->lod_pool_info);
GOTO(out_nolock, rc = -EAGAIN);
/* Do actual allocation, use write lock here. */
- cfs_down_write(&m->lod_qos.lq_rw_sem);
+ down_write(&m->lod_qos.lq_rw_sem);
/*
* Check again, while we were sleeping on @lq_rw_sem things could
}
out:
- cfs_up_write(&m->lod_qos.lq_rw_sem);
+ up_write(&m->lod_qos.lq_rw_sem);
out_nolock:
if (pool != NULL) {
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lod_find_pool() */
lod_pool_putref(pool);
}
v1 = buf->lb_buf;
magic = v1->lmm_magic;
- if (magic == __swab32(LOV_USER_MAGIC_V1))
+ if (magic == __swab32(LOV_USER_MAGIC_V1)) {
lustre_swab_lov_user_md_v1(v1);
- else if (magic == __swab32(LOV_USER_MAGIC_V3))
+ magic = v1->lmm_magic;
+ } else if (magic == __swab32(LOV_USER_MAGIC_V3)) {
+ v3 = buf->lb_buf;
lustre_swab_lov_user_md_v3(v3);
+ magic = v3->lmm_magic;
+ }
if (unlikely(magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)) {
/* try to use as fully defined striping */
v3 = buf->lb_buf;
lod_object_set_pool(lo, v3->lmm_pool_name);
+ /* In the function below, .hs_keycmp resolves to
+ * pool_hashkey_keycmp() */
+ /* coverity[overrun-buffer-val] */
pool = lod_find_pool(d, v3->lmm_pool_name);
if (pool != NULL) {
if (lo->ldo_def_stripe_offset !=
GOTO(out, rc = -ENOMEM);
lo->ldo_stripes_allocated = lo->ldo_stripenr;
- lod_getref(d);
+ lod_getref(&d->lod_ost_descs);
/* XXX: support for non-0 files w/o objects */
if (lo->ldo_def_stripe_offset >= d->lod_desc.ld_tgt_count) {
lod_qos_statfs_update(env, d);
rc = lod_alloc_rr(env, lo, flag, th);
} else
rc = lod_alloc_specific(env, lo, flag, th);
- lod_putref(d);
+ lod_putref(d, &d->lod_ost_descs);
} else {
/*
* lod_qos_parse_config() found supplied buf as a predefined