* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#include <obd_class.h>
#include <obd_lov.h>
+#include <lustre/lustre_idl.h>
#include "lov_internal.h"
/* #define QOS_DEBUG 1 */
#define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail *\
lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
-#define TGT_FFREE(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
int qos_add_tgt(struct obd_device *obd, __u32 index)
RETURN(-ENOTCONN);
}
- down_write(&lov->lov_qos.lq_rw_sem);
- mutex_down(&lov->lov_lock);
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_down(&lov->lov_lock);
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (obd_uuid_equals(&oss->lqo_uuid,
&exp->exp_connection->c_remote_uuid)) {
found++;
sizeof(oss->lqo_uuid));
} else {
/* Assume we have to move this one */
- list_del(&oss->lqo_oss_list);
+ cfs_list_del(&oss->lqo_oss_list);
}
oss->lqo_ost_count++;
/* Add sorted by # of OSTs. Find the first entry that we're
bigger than... */
- list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(temposs, &lov->lov_qos.lq_oss_list,
+ lqo_oss_list) {
if (oss->lqo_ost_count > temposs->lqo_ost_count)
break;
}
/* ...and add before it. If we're the first or smallest, temposs
points to the list head, and we add to the end. */
- list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
+ cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list);
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
oss->lqo_ost_count);
out:
- mutex_up(&lov->lov_lock);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_mutex_up(&lov->lov_lock);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
-int qos_del_tgt(struct obd_device *obd, __u32 index)
+int qos_del_tgt(struct obd_device *obd, struct lov_tgt_desc *tgt)
{
struct lov_obd *lov = &obd->u.lov;
struct lov_qos_oss *oss;
int rc = 0;
ENTRY;
- if (!lov->lov_tgts[index])
- RETURN(0);
-
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
- oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
+ oss = tgt->ltd_qos.ltq_oss;
if (!oss)
GOTO(out, rc = -ENOENT);
if (oss->lqo_ost_count == 0) {
CDEBUG(D_QOS, "removing OSS %s\n",
obd_uuid2str(&oss->lqo_uuid));
- list_del(&oss->lqo_oss_list);
+ cfs_list_del(&oss->lqo_oss_list);
OBD_FREE_PTR(oss);
}
lov->lov_qos.lq_dirty = 1;
lov->lov_qos.lq_rr.lqr_dirty = 1;
out:
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
__u64 ba_max, ba_min, temp;
__u32 num_active;
int rc, i, prio_wide;
+ time_t now, age;
ENTRY;
if (!lov->lov_qos.lq_dirty)
GOTO(out, rc = -EAGAIN);
/* find bavail on each OSS */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
oss->lqo_bavail = 0;
}
lov->lov_qos.lq_active_oss_count = 0;
ba_min = (__u64)(-1);
ba_max = 0;
+ now = cfs_time_current_sec();
/* Calculate OST penalty per object */
/* (lov ref taken in alloc_qos) */
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj =
(temp * prio_wide) >> 8;
- if (lov->lov_qos.lq_reset == 0)
+ age = (now - lov->lov_tgts[i]->ltd_qos.ltq_used) >> 3;
+ if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
lov->lov_tgts[i]->ltd_qos.ltq_penalty = 0;
+ else if (age > lov->desc.ld_qos_maxage)
+ /* Decay the penalty by half for every 8x the update
+ * interval that the device has been idle. That gives
+ * lots of time for the statfs information to be
+ * updated (which the penalty is only a proxy for),
+ * and avoids penalizing OSS/OSTs under light load. */
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty >>=
+ (age / lov->desc.ld_qos_maxage);
}
num_active = lov->lov_qos.lq_active_oss_count - 1;
}
/* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
temp = oss->lqo_bavail >> 1;
do_div(temp, oss->lqo_ost_count * num_active);
oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8;
- if (lov->lov_qos.lq_reset == 0)
+
+ age = (now - oss->lqo_used) >> 3;
+ if (lov->lov_qos.lq_reset || age > 32 * lov->desc.ld_qos_maxage)
oss->lqo_penalty = 0;
+ else if (age > lov->desc.ld_qos_maxage)
+ /* Decay the penalty by half for every 8x the update
+ * interval that the device has been idle. That gives
+ * lots of time for the statfs information to be
+ * updated (which the penalty is only a proxy for),
+ * and avoids penalizing OSS/OSTs under light load. */
+ oss->lqo_penalty >>= (age / lov->desc.ld_qos_maxage);
}
lov->lov_qos.lq_dirty = 0;
/* If each ost has almost same free space,
* do rr allocation for better creation performance */
lov->lov_qos.lq_same_space = 0;
- temp = ba_max - ba_min;
- ba_min = (ba_min * 51) >> 8; /* 51/256 = .20 */
- if (temp < ba_min) {
- /* Difference is less than 20% */
+ if ((ba_max * (256 - lov->lov_qos.lq_threshold_rr)) >> 8 < ba_min) {
lov->lov_qos.lq_same_space = 1;
/* Reset weights for the next time we enter qos mode */
- lov->lov_qos.lq_reset = 0;
+ lov->lov_qos.lq_reset = 1;
}
rc = 0;
lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
oss->lqo_penalty >>= 1;
+ /* mark the OSS and OST as recently used */
+ lov->lov_tgts[index]->ltd_qos.ltq_used =
+ oss->lqo_used = cfs_time_current_sec();
+
/* Set max penalties for this OST and OSS */
lov->lov_tgts[index]->ltd_qos.ltq_penalty +=
lov->lov_tgts[index]->ltd_qos.ltq_penalty_per_obj *
lov->lov_qos.lq_active_oss_count;
/* Decrease all OSS penalties */
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
oss->lqo_penalty = 0;
else
}
/* Do actual allocation. */
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
/*
* Check again. While we were sleeping on @lq_rw_sem something could
*/
if (!lqr->lqr_dirty) {
LASSERT(lqr->lqr_pool.op_size);
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(0);
}
lqr->lqr_pool.op_count = real_count;
rc = lov_ost_pool_extend(&lqr->lqr_pool, real_count);
if (rc) {
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
RETURN(rc);
}
for (i = 0; i < lqr->lqr_pool.op_count; i++)
/* Place all the OSTs from 1 OSS at the same time. */
placed = 0;
- list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
+ cfs_list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
int j = 0;
for (i = 0; i < lqr->lqr_pool.op_count; i++) {
if (lov->lov_tgts[src_pool->op_array[i]] &&
}
lqr->lqr_dirty = 0;
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
if (placed != real_count) {
/* This should never happen */
}
}
+/**
+ * Check whether we can create the object on the OST(refered by ost_idx)
+ * \retval:
+ * 0: create the object.
+ * other value: did not create the object.
+ */
+static int lov_check_and_create_object(struct lov_obd *lov, int ost_idx,
+ struct lov_stripe_md *lsm,
+ struct lov_request *req,
+ struct obd_trans_info *oti)
+{
+ int stripe;
+ int rc = -EIO;
+ ENTRY;
+
+ CDEBUG(D_QOS, "Check and create on idx %d \n", ost_idx);
+ if (!lov->lov_tgts[ost_idx] ||
+ !lov->lov_tgts[ost_idx]->ltd_active)
+ RETURN(rc);
+
+ /* check if objects has been created on this ost */
+ for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
+ /* already have object at this stripe */
+ if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
+ break;
+ }
+
+ if (stripe >= lsm->lsm_stripe_count) {
+ req->rq_idx = ost_idx;
+ rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
+ req->rq_oi.oi_oa, &req->rq_oi.oi_md,
+ oti);
+ }
+ RETURN(rc);
+}
+
int qos_remedy_create(struct lov_request_set *set, struct lov_request *req)
{
struct lov_stripe_md *lsm = set->set_oi->oi_md;
struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
- unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
- int stripe, i, rc = -EIO;
+ unsigned ost_idx = 0, ost_count;
+ struct pool_desc *pool;
+ struct ost_pool *osts = NULL;
+ int i, rc = -EIO;
ENTRY;
- ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
- for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
- if (!lov->lov_tgts[ost_idx] ||
- !lov->lov_tgts[ost_idx]->ltd_active)
- continue;
- /* check if objects has been created on this ost */
- for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
- if (stripe == req->rq_stripe)
- continue;
- if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
+ /* First check whether we can create the objects on the pool */
+ pool = lov_find_pool(lov, lsm->lsm_pool_name);
+ if (pool != NULL) {
+ cfs_down_read(&pool_tgt_rw_sem(pool));
+ osts = &(pool->pool_obds);
+ ost_count = osts->op_count;
+ for (i = 0; i < ost_count; i++, ost_idx = osts->op_array[i]) {
+ rc = lov_check_and_create_object(lov, ost_idx, lsm, req,
+ set->set_oti);
+ if (rc == 0)
break;
}
+ cfs_up_read(&pool_tgt_rw_sem(pool));
+ lov_pool_putref(pool);
+ RETURN(rc);
+ }
- if (stripe >= lsm->lsm_stripe_count) {
- req->rq_idx = ost_idx;
- rc = obd_create(lov->lov_tgts[ost_idx]->ltd_exp,
- req->rq_oi.oi_oa, &req->rq_oi.oi_md,
- set->set_oti);
- if (!rc)
- break;
- }
+ ost_count = lov->desc.ld_tgt_count;
+ /* Then check whether we can create the objects on other OSTs */
+ ost_idx = (req->rq_idx + lsm->lsm_stripe_count) % ost_count;
+ for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
+ rc = lov_check_and_create_object(lov, ost_idx, lsm, req,
+ set->set_oti);
+
+ if (rc == 0)
+ break;
}
+
RETURN(rc);
}
osts = &(lov->lov_packed);
lqr = &(lov->lov_qos.lq_rr);
} else {
- read_lock(&pool_tgt_rwlock(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
}
if (*stripe_cnt > 1 && (osts->op_count % (*stripe_cnt)) != 1)
++lqr->lqr_offset_idx;
}
- down_read(&lov->lov_qos.lq_rw_sem);
+ cfs_down_read(&lov->lov_qos.lq_rw_sem);
ost_start_idx_temp = lqr->lqr_start_idx;
repeat_find:
goto repeat_find;
}
- up_read(&lov->lov_qos.lq_rw_sem);
+ cfs_up_read(&lov->lov_qos.lq_rw_sem);
*stripe_cnt = idx_pos - idx_arr;
out:
- if (pool != NULL)
- read_unlock(&pool_tgt_rwlock(pool));
+ if (pool != NULL) {
+ cfs_up_read(&pool_tgt_rw_sem(pool));
+ /* put back ref got by lov_find_pool() */
+ lov_pool_putref(pool);
+ }
+
RETURN(rc);
}
if (pool == NULL) {
osts = &(lov->lov_packed);
} else {
- read_lock(&pool_tgt_rwlock(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
}
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
continue;
- /* Drop slow OSCs if we can, but not for requested start idx */
+ /* Drop slow OSCs if we can, but not for requested start idx.
+ *
+ * This means "if OSC is slow and it is not the requested
+ * start OST, then it can be skipped, otherwise skip it only
+ * if it is inactive/recovering/out-of-space." */
if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
- (i != 0 || speed < 2))
+ (i != 0 || speed >= 2))
continue;
*idx_pos = ost_idx;
lsm->lsm_stripe_count);
rc = -EFBIG;
out:
- if (pool != NULL)
- read_unlock(&pool_tgt_rwlock(pool));
+ if (pool != NULL) {
+ cfs_up_read(&pool_tgt_rw_sem(pool));
+ /* put back ref got by lov_find_pool() */
+ lov_pool_putref(pool);
+ }
+
RETURN(rc);
}
char *poolname, int flags)
{
struct lov_obd *lov = &exp->exp_obd->u.lov;
- static time_t last_warn = 0;
- time_t now = cfs_time_current_sec();
- __u64 total_bavail, total_weight = 0;
- int nfound, good_osts, i, warn = 0, rc = 0;
+ __u64 total_weight = 0;
+ int nfound, good_osts, i, rc = 0;
int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
struct pool_desc *pool;
struct ost_pool *osts;
osts = &(lov->lov_packed);
lqr = &(lov->lov_qos.lq_rr);
} else {
- read_lock(&pool_tgt_rwlock(pool));
+ cfs_down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
}
- lov_getref(exp->exp_obd);
+ obd_getref(exp->exp_obd);
+
+ /* wait for fresh statfs info if needed, the rpcs are sent in
+ * lov_create() */
+ qos_statfs_update(exp->exp_obd,
+ cfs_time_shift_64(-2 * lov->desc.ld_qos_maxage), 1);
/* Detect -EAGAIN early, before expensive lock is taken. */
if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
GOTO(out_nolock, rc = -EAGAIN);
/* Do actual allocation, use write lock here. */
- down_write(&lov->lov_qos.lq_rw_sem);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
/*
* Check again, while we were sleeping on @lq_rw_sem things could
if (rc)
GOTO(out, rc);
- total_bavail = 0;
good_osts = 0;
- /* Warn users about zero available space/inode every 30 min */
- if (cfs_time_sub(now, last_warn) > 60 * 30)
- warn = 1;
/* Find all the OSTs that are valid stripe candidates */
for (i = 0; i < osts->op_count; i++) {
- __u64 bavail;
-
if (!lov->lov_tgts[osts->op_array[i]] ||
!lov->lov_tgts[osts->op_array[i]]->ltd_active)
continue;
- bavail = TGT_BAVAIL(osts->op_array[i]);
- if (!bavail) {
- if (warn) {
- CDEBUG(D_QOS, "no free space on %s\n",
- obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
- last_warn = now;
- }
- continue;
- }
- if (!TGT_FFREE(osts->op_array[i])) {
- if (warn) {
- CDEBUG(D_QOS, "no free inodes on %s\n",
- obd_uuid2str(&lov->lov_tgts[osts->op_array[i]]->ltd_uuid));
- last_warn = now;
- }
- continue;
- }
/* Fail Check before osc_precreate() is called
so we can only 'fail' single OSC. */
lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_usable = 1;
qos_calc_weight(lov, osts->op_array[i]);
- total_bavail += bavail;
total_weight += lov->lov_tgts[osts->op_array[i]]->ltd_qos.ltq_weight;
good_osts++;
if (good_osts < stripe_cnt_min)
GOTO(out, rc = -EAGAIN);
- if (!total_bavail)
- GOTO(out, rc = -ENOSPC);
-
/* We have enough osts */
if (good_osts < *stripe_cnt)
*stripe_cnt = good_osts;
LASSERT(nfound == *stripe_cnt);
out:
- up_write(&lov->lov_qos.lq_rw_sem);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
out_nolock:
- if (pool != NULL)
- read_unlock(&pool_tgt_rwlock(pool));
+ if (pool != NULL) {
+ cfs_up_read(&pool_tgt_rw_sem(pool));
+ /* put back ref got by lov_find_pool() */
+ lov_pool_putref(pool);
+ }
if (rc == -EAGAIN)
rc = alloc_rr(lov, idx_arr, stripe_cnt, poolname, flags);
- lov_putref(exp->exp_obd);
+ obd_putref(exp->exp_obd);
RETURN(rc);
}
/* Find a small number of stripes we can use
(up to # of active osts). */
stripes = 1;
- lov_getref(exp->exp_obd);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
if (!lov->lov_tgts[i] ||
!lov->lov_tgts[i]->ltd_active)
break;
stripes++;
}
- lov_putref(exp->exp_obd);
if (stripes < stripes_def)
stripes = stripes_def;
lsm = set->set_oi->oi_md;
lsm->lsm_object_id = src_oa->o_id;
- lsm->lsm_object_gr = src_oa->o_gr;
+ lsm->lsm_object_seq = src_oa->o_seq;
if (!lsm->lsm_stripe_size)
lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
req->rq_stripe = i;
/* create data objects with "parent" OA */
memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa));
+ req->rq_oi.oi_cb_up = cb_create_update;
/* XXX When we start creating objects on demand, we need to
* make sure that we always create the object on the
if (stripes < lsm->lsm_stripe_count)
qos_shrink_lsm(set);
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LOV_PREP_CREATE)) {
+ qos_shrink_lsm(set);
+ rc = -EIO;
+ }
if (oti && (src_oa->o_valid & OBD_MD_FLCOOKIE)) {
oti_alloc_cookies(oti, set->set_count);
ENTRY;
lov->lov_qos.lq_dirty = 1;
}
+
+void qos_statfs_done(struct lov_obd *lov)
+{
+ LASSERT(lov->lov_qos.lq_statfs_in_progress);
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ lov->lov_qos.lq_statfs_in_progress = 0;
+ /* wake up any threads waiting for the statfs rpcs to complete */
+ cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+}
+
+static int qos_statfs_ready(struct obd_device *obd, __u64 max_age)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ int rc;
+ ENTRY;
+ cfs_down_read(&lov->lov_qos.lq_rw_sem);
+ rc = lov->lov_qos.lq_statfs_in_progress == 0 ||
+ cfs_time_beforeq_64(max_age, obd->obd_osfs_age);
+ cfs_up_read(&lov->lov_qos.lq_rw_sem);
+ RETURN(rc);
+}
+
+/*
+ * Update statfs data if the current osfs age is older than max_age.
+ * If wait is not set, it means that we are called from lov_create()
+ * and we should just issue the rpcs without waiting for them to complete.
+ * If wait is set, we are called from alloc_qos() and we just have
+ * to wait for the request set to complete.
+ */
+void qos_statfs_update(struct obd_device *obd, __u64 max_age, int wait)
+{
+ struct lov_obd *lov = &obd->u.lov;
+ struct obd_info *oinfo;
+ int rc = 0;
+ struct ptlrpc_request_set *set = NULL;
+ ENTRY;
+
+ if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
+ /* statfs data are quite recent, don't need to refresh it */
+ RETURN_EXIT;
+
+ if (!wait && lov->lov_qos.lq_statfs_in_progress)
+ /* statfs already in progress */
+ RETURN_EXIT;
+
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ if (lov->lov_qos.lq_statfs_in_progress) {
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+ GOTO(out, rc = 0);
+ }
+ /* no statfs in flight, send rpcs */
+ lov->lov_qos.lq_statfs_in_progress = 1;
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+
+ if (wait)
+ CDEBUG(D_QOS, "%s: did not manage to get fresh statfs data "
+ "in a timely manner (osfs age "LPU64", max age "LPU64")"
+ ", sending new statfs rpcs\n",
+ obd_uuid2str(&lov->desc.ld_uuid), obd->obd_osfs_age,
+ max_age);
+
+ /* need to send statfs rpcs */
+ CDEBUG(D_QOS, "sending new statfs requests\n");
+ memset(lov->lov_qos.lq_statfs_data, 0,
+ sizeof(*lov->lov_qos.lq_statfs_data));
+ oinfo = &lov->lov_qos.lq_statfs_data->lsd_oi;
+ oinfo->oi_osfs = &lov->lov_qos.lq_statfs_data->lsd_statfs;
+ oinfo->oi_flags = OBD_STATFS_NODELAY;
+ set = ptlrpc_prep_set();
+ if (!set)
+ GOTO(out_failed, rc = -ENOMEM);
+
+ rc = obd_statfs_async(obd, oinfo, max_age, set);
+ if (rc || cfs_list_empty(&set->set_requests)) {
+ if (rc)
+ CWARN("statfs failed with %d\n", rc);
+ GOTO(out_failed, rc);
+ }
+ /* send requests via ptlrpcd */
+ oinfo->oi_flags |= OBD_STATFS_PTLRPCD;
+ ptlrpcd_add_rqset(set);
+ GOTO(out, rc);
+
+out_failed:
+ cfs_down_write(&lov->lov_qos.lq_rw_sem);
+ lov->lov_qos.lq_statfs_in_progress = 0;
+ /* wake up any threads waiting for the statfs rpcs to complete */
+ cfs_waitq_signal(&lov->lov_qos.lq_statfs_waitq);
+ cfs_up_write(&lov->lov_qos.lq_rw_sem);
+ wait = 0;
+out:
+ if (set)
+ ptlrpc_set_destroy(set);
+ if (wait) {
+ struct l_wait_info lwi = { 0 };
+ CDEBUG(D_QOS, "waiting for statfs requests to complete\n");
+ l_wait_event(lov->lov_qos.lq_statfs_waitq,
+ qos_statfs_ready(obd, max_age), &lwi);
+ if (cfs_time_before_64(obd->obd_osfs_age, max_age))
+ CDEBUG(D_QOS, "%s: still no fresh statfs data after "
+ "waiting (osfs age "LPU64", max age "
+ LPU64")\n",
+ obd_uuid2str(&lov->desc.ld_uuid),
+ obd->obd_osfs_age, max_age);
+ }
+}