#define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
-#define TGT_FFREE(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
int qos_add_tgt(struct obd_device *obd, __u32 index)
RETURN(-ENOTCONN);
}
- down_write(&lov->lov_qos.lq_rw_sem);
- mutex_down(&lov->lov_lock);
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_down(&lov->lov_lock);
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (obd_uuid_equals(&oss->lqo_uuid,
&exp->exp_connection->c_remote_uuid)) {
found++;
sizeof(oss->lqo_uuid));
} else {
/* Assume we have to move this one */
- list_del(&oss->lqo_oss_list);
+ cfs_list_del(&oss->lqo_oss_list);
}
oss->lqo_ost_count++;
/* Add sorted by # of OSTs. Find the first entry that we're
bigger than... */
- list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
+ lqo_oss_list) {
if (oss->lqo_ost_count > temposs->lqo_ost_count)
break;
}
/* ...and add before it. If we're the first or smallest, temposs
points to the list head, and we add to the end. */
- list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
+ cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
oss->lqo_ost_count);
out:
- mutex_up(&lov->lov_lock);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_up(&lov->lov_lock);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
int rc = 0;
ENTRY;
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
oss = tgt->ltd_qos.ltq_oss;
if (!oss)
if (oss->lqo_ost_count == 0) {
CDEBUG(D_QOS, "removing OSS %s\n",
obd_uuid2str(&oss->lqo_uuid));
- list_del(&oss->lqo_oss_list);
+ cfs_list_del(&oss->lqo_oss_list);
OBD_FREE_PTR(oss);
}
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
out:
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
GOTO(out, rc = -EAGAIN);
/* find bavail on each OSS */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
oss->lqo_bavail = 0;
}
lov->lov_qos.lq_active_oss_count = 0;
}
/* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
temp = oss->lqo_bavail >> 1;
do_div(temp, oss->lqo_ost_count * num_active);
oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
lov->lov_qos.lq_active_oss_count;
/* Decrease all OSS penalties */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
oss->lqo_penalty = 0;
else
}
/* Do actual allocation. */
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
/*
* Check again. While we were sleeping on @lq_rw_sem something could
*/
if (!lqr->lqr_dirty) {
LASSERT(lqr->lqr_pool.op_size);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(0);
}
lqr->lqr_pool.op_count = real_count;
rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
if (rc) {
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
for (i = 0; i < lqr->lqr_pool.op_count; i++)
/* Place all the OSTs from 1 OSS at the same time. */
placed = 0;
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
int j = 0;
for (i = 0; i < lqr->lqr_pool.op_count; i++) {
if (lov->lov_tgts[src_pool->op_array[i]] &&
}
lqr->lqr_dirty = 0;
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
if (placed != real_count) {
/* This should never happen */
osts = &(lov->lov_packed);
lqr = &(lov->lov_qos.lq_rr);
} else {
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
}
if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
++lqr->lqr_offset_idx;
}
- down_read(&lov->lov_qos.lq_rw_sem);
+ cfs_down_read(&lov->lov_qos.lq_rw_sem);
ost_start_idx_temp = lqr->lqr_start_idx;
repeat_find:
goto repeat_find;
}
- up_read(&lov->lov_qos.lq_rw_sem);
+ cfs_up_read(&lov->lov_qos.lq_rw_sem);
*stripe_cnt = idx_pos - idx_arr;
out:
if (pool != NULL) {
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lov_find_pool() */
lov_pool_putref(pool);
}
if (pool == NULL) {
osts = &(lov->lov_packed);
} else {
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
}
rc = -EFBIG;
out:
if (pool != NULL) {
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lov_find_pool() */
lov_pool_putref(pool);
}
char *poolname, int flags)
{
struct lov_obd *lov = &exp->exp_obd->u.lov;
- static time_t last_warn = 0;
- time_t now = cfs_time_current_sec();
- __u64 total_bavail, total_weight = 0;
- int nfound, good_osts, i, warn = 0, rc = 0;
+ __u64 total_weight = 0;
+ int nfound, good_osts, i, rc = 0;
int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
struct pool_desc *pool;
struct ost_pool *osts;
osts = &(lov->lov_packed);
lqr = &(lov->lov_qos.lq_rr);
} else {
- down_read(&pool_tgt_rw_sem(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
}
GOTO(out_nolock, rc = -EAGAIN);
/* Do actual allocation, use write lock here. */
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
/*
* Check again, while we were sleeping on @lq_rw_sem things could
if (rc)
GOTO(out, rc);
- total_bavail = 0;
good_osts = 0;
- /* Warn users about zero available space/inode every 30 min */
- if (cfs_time_sub(now, last_warn) > 60 * 30)
- warn = 1;
/* Find all the OSTs that are valid stripe candidates */
for (i = 0; i < osts->op_count; i++) {
- __u64 bavail;
-
if (!lov->lov_tgts[osts->op_array[i]] ||
!lov->lov_tgts[osts->op_array[i]]->ltd_active)
continue;
- bavail = TGT_BAVAIL(osts->op_array[i]);
- if (!bavail) {
- if (warn) {
- CDEBUG(D_QOS, "no free space on %s\n",
- obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
- last_warn = now;
- }
- continue;
- }
- if (!TGT_FFREE(osts->op_array[i])) {
- if (warn) {
- CDEBUG(D_QOS, "no free inodes on %s\n",
- obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
- last_warn = now;
- }
- continue;
- }
/* Fail Check before osc_precreate() is called
so we can only 'fail' single OSC. */
lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
qos_calc_weight(lov, osts->op_array[i]);
- total_bavail += bavail;
total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
good_osts++;
if (good_osts < stripe_cnt_min)
GOTO(out, rc = -EAGAIN);
- if (!total_bavail)
- GOTO(out, rc = -ENOSPC);
-
/* We have enough osts */
if (good_osts < *stripe_cnt)
*stripe_cnt = good_osts;
LASSERT(nfound == *stripe_cnt);
out:
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
out_nolock:
if (pool != NULL) {
- up_read(&pool_tgt_rw_sem(pool));
+ cfs_up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lov_find_pool() */
lov_pool_putref(pool);
}
req->rq_stripe = i;
/* create data objects with "parent" OA */
memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
+ req->rq_oi.oi_cb_up = cb_create_update;
/* XXX When we start creating objects on demand, we need to
* make sure that we always create the object on the
void qos_statfs_done(struct lov_obd *lov)
{
LASSERT(lov->lov_qos.lq_statfs_in_progress);
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
lov->lov_qos.lq_statfs_in_progress = 0;
/* wake up any threads waiting for the statfs rpcs to complete */
cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
}
static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
struct lov_obd *lov = &obd->u.lov;
int rc;
ENTRY;
- down_read(&lov->lov_qos.lq_rw_sem);
+ cfs_down_read(&lov->lov_qos.lq_rw_sem);
rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
- up_read(&lov->lov_qos.lq_rw_sem);
+ cfs_up_read(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
/* statfs already in progress */
RETURN_EXIT;
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
if (lov->lov_qos.lq_statfs_in_progress) {
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
GOTO(out, rc = 0);
}
/* no statfs in flight, send rpcs */
lov->lov_qos.lq_statfs_in_progress = 1;
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
if (wait)
CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
GOTO(out_failed, rc = -ENOMEM);
rc = obd_statfs_async(obd, oinfo, max_age, set);
- if (rc || list_empty(&set->set_requests)) {
+ if (rc || cfs_list_empty(&set->set_requests)) {
if (rc)
CWARN("statfs failed with %d\n", rc);
GOTO(out_failed, rc);
GOTO(out, rc);
out_failed:
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
lov->lov_qos.lq_statfs_in_progress = 0;
/* wake up any threads waiting for the statfs rpcs to complete */
cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
wait = 0;
out:
if (set)