#include <obd_lov.h>
#include "lov_internal.h"
-
-/* #define QOS_DEBUG 1 */
+/* #define QOS_DEBUG 1 */
#define D_QOS D_OTHER
-#define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail * \
- lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
-#define TGT_FFREE(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
+#define TGT_BAVAIL(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bavail * \
+ lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_bsize)
+#define TGT_FFREE(i) (lov->lov_tgts[i]->ltd_exp->exp_obd->obd_osfs.os_ffree)
int qos_add_tgt(struct obd_device *obd, __u32 index)
ENTRY;
/* We only need this QOS struct on MDT, not clients - but we may not
- have registered the lov's observer yet, so there's no way to check
- here. */
-
+ have registered the LOV's observer yet, so there's no way to know */
if (!exp || !exp->exp_connection) {
CERROR("Missing connection\n");
RETURN(-ENOTCONN);
int rc = 0;
ENTRY;
+ if (!lov->lov_tgts[index])
+ RETURN(0);
+
down_write(&lov->lov_qos.lq_rw_sem);
oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
__u32 num_active;
int rc, i, prio_wide;
ENTRY;
-
+
if (!lov->lov_qos.lq_dirty)
GOTO(out, rc = 0);
-
+
num_active = lov->desc.ld_active_tgt_count - 1;
if (num_active < 1)
GOTO(out, rc = -EAGAIN);
/* If there's only 1 OSS, we can't penalize it, so instead
we have to double the OST penalty */
num_active = 1;
- for (i = 0; i < lov->desc.ld_tgt_count; i++)
+ for (i = 0; i < lov->desc.ld_tgt_count; i++) {
+ if (lov->lov_tgts[i] == NULL)
+ continue;
lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj <<= 1;
+ }
}
/* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */
static int qos_calc_weight(struct lov_obd *lov, int i)
{
__u64 temp, temp2;
-
+
/* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */
temp = TGT_BAVAIL(i);
temp2 = lov->lov_tgts[i]->ltd_qos.ltq_penalty +
lov->lov_tgts[index]->ltd_qos.ltq_usable = 0;
oss = lov->lov_tgts[index]->ltd_qos.ltq_oss;
-
+
/* Decay old penalty by half (we're adding max penalty, and don't
want it to run away.) */
lov->lov_tgts[index]->ltd_qos.ltq_penalty >>= 1;
lov->desc.ld_active_tgt_count;
oss->lqo_penalty += oss->lqo_penalty_per_obj *
lov->lov_qos.lq_active_oss_count;
-
+
/* Decrease all OSS penalties */
list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
if (oss->lqo_penalty < oss->lqo_penalty_per_obj)
CDEBUG(D_QOS, "recalc tgt %d avail="LPU64
" ostppo="LPU64" ostp="LPU64" ossppo="LPU64
" ossp="LPU64" wt="LPU64"\n",
- i, TGT_BAVAIL(i)>>10,
- lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj>>10,
- lov->lov_tgts[i]->ltd_qos.ltq_penalty>>10,
- lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj>>10,
- lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty>>10,
- lov->lov_tgts[i]->ltd_qos.ltq_weight>>10);
+ i, TGT_BAVAIL(i) >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty_per_obj >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_penalty >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty_per_obj >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_oss->lqo_penalty >> 10,
+ lov->lov_tgts[i]->ltd_qos.ltq_weight >> 10);
#endif
}
static int qos_calc_rr(struct lov_obd *lov)
{
struct lov_qos_oss *oss;
- unsigned ost_count, placed;
+ unsigned ost_count, placed, real_count;
int i;
ENTRY;
RETURN(0);
}
+ /* Do actuall allocation. */
down_write(&lov->lov_qos.lq_rw_sem);
+
+ /*
+ * Check again. While we were sleeping on @lq_rw_sem something could
+ * change.
+ */
+ if (!lov->lov_qos.lq_dirty_rr) {
+ LASSERT(lov->lov_qos.lq_rr_size);
+ up_write(&lov->lov_qos.lq_rw_sem);
+ RETURN(0);
+ }
+
ost_count = lov->desc.ld_tgt_count;
if (lov->lov_qos.lq_rr_size)
RETURN(-ENOMEM);
}
- for (i = 0; i < ost_count; i++)
+ real_count = 0;
+ for (i = 0; i < ost_count; i++) {
lov->lov_qos.lq_rr_array[i] = LOV_QOS_EMPTY;
+ if (lov->lov_tgts[i])
+ real_count++;
+ }
/* Place all the OSTs from 1 OSS at the same time. */
placed = 0;
list_for_each_entry(oss, &lov->lov_qos.lq_oss_list, lqo_oss_list) {
int j = 0;
for (i = 0; i < ost_count; i++) {
- if (lov->lov_tgts[i]->ltd_qos.ltq_oss == oss) {
+ if(lov->lov_tgts[i] &&
+ lov->lov_tgts[i]->ltd_qos.ltq_oss == oss) {
/* Evenly space these OSTs across arrayspace */
int next = j * ost_count / oss->lqo_ost_count;
+ LASSERT(next < ost_count);
while (lov->lov_qos.lq_rr_array[next] !=
LOV_QOS_EMPTY)
next = (next + 1) % ost_count;
lov->lov_qos.lq_rr_array[next] = i;
j++;
placed++;
- }
+ }
}
LASSERT(j == oss->lqo_ost_count);
}
lov->lov_qos.lq_dirty_rr = 0;
up_write(&lov->lov_qos.lq_rw_sem);
- if (placed != ost_count) {
+ if (placed != real_count) {
/* This should never happen */
- LCONSOLE_ERROR("Failed to place all OSTs in the round-robin "
- "list (%d of %d).\n", placed, ost_count);
+ LCONSOLE_ERROR_MSG(0x14e, "Failed to place all OSTs in the "
+ "round-robin list (%d of %d).\n",
+ placed, real_count);
for (i = 0; i < ost_count; i++) {
LCONSOLE(D_WARNING, "rr #%d ost idx=%d\n", i,
lov->lov_qos.lq_rr_array[i]);
lov->lov_qos.lq_dirty_rr = 1;
RETURN(-EAGAIN);
}
-
+
#ifdef QOS_DEBUG
for (i = 0; i < ost_count; i++) {
- LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
+ LCONSOLE(D_QOS, "rr #%d ost idx=%d\n", i,
lov->lov_qos.lq_rr_array[i]);
}
#endif
-
+
RETURN(0);
}
CWARN("using fewer stripes for object "LPX64": old %u new %u\n",
lsm->lsm_object_id, lsm->lsm_stripe_count, set->set_count);
+ LASSERT(lsm->lsm_stripe_count >= set->set_count);
- oldsize = lov_stripe_md_size(lsm->lsm_stripe_count);
newsize = lov_stripe_md_size(set->set_count);
OBD_ALLOC(lsm_new, newsize);
if (lsm_new != NULL) {
- memcpy(lsm_new, lsm, newsize);
+ int i;
+ memcpy(lsm_new, lsm, sizeof(*lsm));
+ for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ if (i < set->set_count) {
+ lsm_new->lsm_oinfo[i] = lsm->lsm_oinfo[i];
+ continue;
+ }
+ OBD_SLAB_FREE(lsm->lsm_oinfo[i], lov_oinfo_slab,
+ sizeof(struct lov_oinfo));
+ }
lsm_new->lsm_stripe_count = set->set_count;
- OBD_FREE(lsm, oldsize);
+ OBD_FREE(lsm, sizeof(struct lov_stripe_md) +
+ lsm->lsm_stripe_count * sizeof(struct lov_oinfo *));
set->set_oi->oi_md = lsm_new;
} else {
- CWARN("'leaking' %d bytes\n", oldsize - newsize);
+ CWARN("'leaking' few bytes\n");
}
}
for (stripe = 0; stripe < lsm->lsm_stripe_count; stripe++) {
if (stripe == req->rq_stripe)
continue;
- if (ost_idx == lsm->lsm_oinfo[stripe].loi_ost_idx)
+ if (ost_idx == lsm->lsm_oinfo[stripe]->loi_ost_idx)
break;
}
RETURN(rc);
}
+static int min_stripe_count(int stripe_cnt, int flags)
+{
+ return (flags & LOV_USES_DEFAULT_STRIPE ?
+ stripe_cnt - (stripe_cnt / 4) : stripe_cnt);
+}
+
#define LOV_CREATE_RESEED_MULT 4
#define LOV_CREATE_RESEED_MIN 1000
/* Allocate objects on osts with round-robin algorithm */
-static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt)
+static int alloc_rr(struct lov_obd *lov, int *idx_arr, int *stripe_cnt,
+ int flags)
{
unsigned array_idx, ost_count = lov->desc.ld_tgt_count;
unsigned ost_active_count = lov->desc.ld_active_tgt_count;
- int i, *idx_pos = idx_arr;
+ int i, *idx_pos;
__u32 ost_idx;
+ int ost_start_idx_temp;
+ int speed = 0;
+ int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
ENTRY;
i = qos_calc_rr(lov);
- if (i)
+ if (i)
RETURN(i);
if (--lov->lov_start_count <= 0) {
lov->lov_start_count =
(LOV_CREATE_RESEED_MIN / max(ost_active_count, 1U) +
LOV_CREATE_RESEED_MULT) * max(ost_active_count, 1U);
- } else if (*stripe_cnt >= ost_active_count ||
+ } else if (stripe_cnt_min >= ost_active_count ||
lov->lov_start_idx > ost_count) {
/* If we have allocated from all of the OSTs, slowly
precess the next start */
lov->lov_start_idx %= ost_count;
- ++lov->lov_offset_idx;
+ if (*stripe_cnt > 1 && (ost_active_count % (*stripe_cnt)) != 1)
+ ++lov->lov_offset_idx;
}
+ down_read(&lov->lov_qos.lq_rw_sem);
+ ost_start_idx_temp = lov->lov_start_idx;
+
+repeat_find:
array_idx = (lov->lov_start_idx + lov->lov_offset_idx) % ost_count;
+ idx_pos = idx_arr;
#ifdef QOS_DEBUG
CDEBUG(D_QOS, "want %d startidx %d startcnt %d offset %d arrayidx %d\n",
- *stripe_cnt, lov->lov_start_idx, lov->lov_start_count,
+ stripe_cnt_min, lov->lov_start_idx, lov->lov_start_count,
lov->lov_offset_idx, array_idx);
#endif
- down_read(&lov->lov_qos.lq_rw_sem);
for (i = 0; i < ost_count; i++, array_idx=(array_idx + 1) % ost_count) {
++lov->lov_start_idx;
ost_idx = lov->lov_qos.lq_rr_array[array_idx];
#ifdef QOS_DEBUG
CDEBUG(D_QOS, "#%d strt %d act %d strp %d ary %d idx %d\n",
- i, lov->lov_start_idx,
- lov->lov_tgts[ost_idx] ?
+ i, lov->lov_start_idx,
+ ((ost_idx != LOV_QOS_EMPTY) && lov->lov_tgts[ost_idx]) ?
lov->lov_tgts[ost_idx]->ltd_active : 0,
idx_pos - idx_arr, array_idx, ost_idx);
#endif
- if (!lov->lov_tgts[ost_idx] ||
- !lov->lov_tgts[ost_idx]->ltd_active)
+ if ((ost_idx == LOV_QOS_EMPTY) || !lov->lov_tgts[ost_idx] ||
+ !lov->lov_tgts[ost_idx]->ltd_active)
+ continue;
+ /* Fail Check before osc_precreate() is called
+ so we can only 'fail' single OSC. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
+ continue;
+
+ /* Drop slow OSCs if we can */
+ if (obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed)
continue;
+
*idx_pos = ost_idx;
idx_pos++;
/* We have enough stripes */
- if (idx_pos - idx_arr == *stripe_cnt)
+ if (idx_pos - idx_arr == *stripe_cnt)
break;
}
+ if ((speed < 2) && (idx_pos - idx_arr < stripe_cnt_min)) {
+ /* Try again, allowing slower OSCs */
+ speed++;
+ lov->lov_start_idx = ost_start_idx_temp;
+ goto repeat_find;
+ }
+
up_read(&lov->lov_qos.lq_rw_sem);
*stripe_cnt = idx_pos - idx_arr;
int *idx_arr)
{
unsigned ost_idx, ost_count = lov->desc.ld_tgt_count;
- int i, *idx_pos = idx_arr;
+ int i, *idx_pos;
+ int speed = 0;
ENTRY;
- ost_idx = lsm->lsm_oinfo[0].loi_ost_idx;
+repeat_find:
+ idx_pos = idx_arr;
+ ost_idx = lsm->lsm_oinfo[0]->loi_ost_idx;
for (i = 0; i < ost_count; i++, ost_idx = (ost_idx + 1) % ost_count) {
if (!lov->lov_tgts[ost_idx] ||
!lov->lov_tgts[ost_idx]->ltd_active) {
continue;
}
+
+ /* Fail Check before osc_precreate() is called
+ so we can only 'fail' single OSC. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && ost_idx == 0)
+ continue;
+
+ /* Drop slow OSCs if we can */
+ if ((obd_precreate(lov->lov_tgts[ost_idx]->ltd_exp) > speed) &&
+ (i != 0 || speed < 2))
+ continue;
+
*idx_pos = ost_idx;
idx_pos++;
- /* got enough ost */
+ /* We have enough stripes */
if (idx_pos - idx_arr == lsm->lsm_stripe_count)
RETURN(0);
}
+ if (speed < 2) {
+ /* Try again, allowing slower OSCs */
+ speed++;
+ goto repeat_find;
+ }
+
/* If we were passed specific striping params, then a failure to
* meet those requirements is an error, since we can't reallocate
* that memory (it might be part of a larger array or something).
*
* We can only get here if lsm_stripe_count was originally > 1.
*/
- CERROR("can't lstripe objid "LPX64": have "LPSZ" want %u\n",
- lsm->lsm_object_id, idx_pos - idx_arr, lsm->lsm_stripe_count);
+ CERROR("can't lstripe objid "LPX64": have %d want %u\n",
+ lsm->lsm_object_id, (int)(idx_pos - idx_arr),
+ lsm->lsm_stripe_count);
RETURN(-EFBIG);
}
- free space
- network resources (shared OSS's)
*/
-static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt)
+static int alloc_qos(struct obd_export *exp, int *idx_arr, int *stripe_cnt,
+ int flags)
{
struct lov_obd *lov = &exp->exp_obd->u.lov;
static time_t last_warn = 0;
__u64 total_bavail, total_weight = 0;
__u32 ost_count;
int nfound, good_osts, i, warn = 0, rc = 0;
+ int stripe_cnt_min = min_stripe_count(*stripe_cnt, flags);
ENTRY;
+ if (stripe_cnt_min < 1)
+ GOTO(out, rc = -EINVAL);
+
lov_getref(exp->exp_obd);
+
+ /* Detect -EAGAIN early, before expensive lock is taken. */
+ if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space)
+ GOTO(out, rc = -EAGAIN);
+
+ /* Do actuall allocation, use write lock here. */
down_write(&lov->lov_qos.lq_rw_sem);
+ /*
+ * Check again, while we were sleeping on @lq_rw_sem things could
+ * change.
+ */
+ if (!lov->lov_qos.lq_dirty && lov->lov_qos.lq_same_space) {
+ up_write(&lov->lov_qos.lq_rw_sem);
+ GOTO(out, rc = -EAGAIN);
+ }
ost_count = lov->desc.ld_tgt_count;
- if (lov->desc.ld_active_tgt_count < 2)
- GOTO(out, rc = -EAGAIN);
+ if (lov->desc.ld_active_tgt_count < 2)
+ GOTO(out_up_write, rc = -EAGAIN);
rc = qos_calc_ppo(exp->exp_obd);
if (rc)
- GOTO(out, rc);
+ GOTO(out_up_write, rc);
total_bavail = 0;
good_osts = 0;
bavail = TGT_BAVAIL(i);
if (!bavail) {
if (warn) {
- CDEBUG(D_QOS, "no free space on %s\n",
- obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
+ CDEBUG(D_QOS, "no free space on %s\n",
+ obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
last_warn = now;
}
continue;
}
if (!TGT_FFREE(i)) {
if (warn) {
- CDEBUG(D_QOS, "no free inodes on %s\n",
- obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
+ CDEBUG(D_QOS, "no free inodes on %s\n",
+ obd_uuid2str(&lov->lov_tgts[i]->ltd_uuid));
last_warn = now;
}
continue;
}
+ /* Fail Check before osc_precreate() is called
+ so we can only 'fail' single OSC. */
+ if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_PRECREATE) && i == 0)
+ continue;
+
+ if (obd_precreate(lov->lov_tgts[i]->ltd_exp) > 2)
+ continue;
+
lov->lov_tgts[i]->ltd_qos.ltq_usable = 1;
qos_calc_weight(lov, i);
total_bavail += bavail;
good_osts++;
}
+ if (good_osts < stripe_cnt_min)
+ GOTO(out_up_write, rc = -EAGAIN);
+
if (!total_bavail)
- GOTO(out, rc = -ENOSPC);
+ GOTO(out_up_write, rc = -ENOSPC);
- /* if we don't have enough good OSTs, we reduce the stripe count. */
+ /* We have enough osts */
if (good_osts < *stripe_cnt)
*stripe_cnt = good_osts;
if (!*stripe_cnt)
- GOTO(out, rc = -EAGAIN);
+ GOTO(out_up_write, rc = -EAGAIN);
/* Find enough OSTs with weighted random allocation. */
nfound = 0;
while (nfound < *stripe_cnt) {
- __u64 rand, cur_weight;
+ __u64 rand, cur_weight = 0;
- cur_weight = 0;
rc = -ENODEV;
if (total_weight) {
rand |= ll_rand() % (unsigned)total_weight;
else
rand |= ll_rand();
-
#else
rand = ((__u64)ll_rand() << 32 | ll_rand()) %
total_weight;
/* On average, this will hit larger-weighted osts more often.
0-weight osts will always get used last (only when rand=0).*/
for (i = 0; i < ost_count; i++) {
- if (!lov->lov_tgts[i]->ltd_qos.ltq_usable)
+ if (!lov->lov_tgts[i] ||
+ !lov->lov_tgts[i]->ltd_qos.ltq_usable)
continue;
+
cur_weight += lov->lov_tgts[i]->ltd_qos.ltq_weight;
if (cur_weight >= rand) {
#ifdef QOS_DEBUG
}
}
LASSERT(nfound == *stripe_cnt);
-
-out:
+
+out_up_write:
up_write(&lov->lov_qos.lq_rw_sem);
-
+
+out:
if (rc == -EAGAIN)
- rc = alloc_rr(lov, idx_arr, stripe_cnt);
-
+ rc = alloc_rr(lov, idx_arr, stripe_cnt, flags);
+
lov_putref(exp->exp_obd);
RETURN(rc);
}
/* return new alloced stripe count on success */
static int alloc_idx_array(struct obd_export *exp, struct lov_stripe_md *lsm,
- int newea, int **idx_arr, int *arr_cnt)
+ int newea, int **idx_arr, int *arr_cnt, int flags)
{
struct lov_obd *lov = &exp->exp_obd->u.lov;
int stripe_cnt = lsm->lsm_stripe_count;
tmp_arr[i] = -1;
if (newea ||
- lsm->lsm_oinfo[0].loi_ost_idx >= lov->desc.ld_tgt_count)
- rc = alloc_qos(exp, tmp_arr, &stripe_cnt);
+ lsm->lsm_oinfo[0]->loi_ost_idx >= lov->desc.ld_tgt_count)
+ rc = alloc_qos(exp, tmp_arr, &stripe_cnt, flags);
else
rc = alloc_specific(lov, lsm, tmp_arr);
struct obdo *src_oa = set->set_oi->oi_oa;
struct obd_trans_info *oti = set->set_oti;
int i, stripes, rc = 0, newea = 0;
- int *idx_arr, idx_cnt = 0;
+ int *idx_arr = NULL, idx_cnt = 0;
+ int flag = LOV_USES_ASSIGNED_STRIPE;
ENTRY;
LASSERT(src_oa->o_valid & OBD_MD_FLID);
-
+ LASSERT(src_oa->o_valid & OBD_MD_FLGROUP);
+
if (set->set_oi->oi_md == NULL) {
- int stripe_cnt = lov_get_stripecnt(lov, 0);
+ int stripes_def = lov_get_stripecnt(lov, 0);
/* If the MDS file was truncated up to some size, stripe over
- * enough OSTs to allow the file to be created at that size.
+ * enough OSTs to allow the file to be created at that size.
* This may mean we use more than the default # of stripes. */
if (src_oa->o_valid & OBD_MD_FLSIZE) {
obd_size min_bavail = LUSTRE_STRIPE_MAXBYTES;
-
- /* Find a small number of stripes we can use
+
+ /* Find a small number of stripes we can use
(up to # of active osts). */
stripes = 1;
lov_getref(exp->exp_obd);
}
lov_putref(exp->exp_obd);
- if (stripes < stripe_cnt)
- stripes = stripe_cnt;
+ if (stripes < stripes_def)
+ stripes = stripes_def;
} else {
- stripes = stripe_cnt;
+ flag = LOV_USES_DEFAULT_STRIPE;
+ stripes = stripes_def;
}
rc = lov_alloc_memmd(&set->set_oi->oi_md, stripes,
LOV_MAGIC);
if (rc < 0)
GOTO(out_err, rc);
- rc = 0;
newea = 1;
+ rc = 0;
}
lsm = set->set_oi->oi_md;
lsm->lsm_object_id = src_oa->o_id;
+ lsm->lsm_object_gr = src_oa->o_gr;
+
if (!lsm->lsm_stripe_size)
lsm->lsm_stripe_size = lov->desc.ld_default_stripe_size;
if (!lsm->lsm_pattern) {
lsm->lsm_pattern = lov->desc.ld_pattern;
}
- stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt);
- LASSERT(stripes <= lsm->lsm_stripe_count);
+ stripes = alloc_idx_array(exp, lsm, newea, &idx_arr, &idx_cnt, flag);
if (stripes <= 0)
GOTO(out_err, rc = stripes ? stripes : -EIO);
+ LASSERTF(stripes <= lsm->lsm_stripe_count,"requested %d allocated %d\n",
+ lsm->lsm_stripe_count, stripes);
for (i = 0; i < stripes; i++) {
struct lov_request *req;
if (req->rq_oi.oi_md == NULL)
GOTO(out_err, rc = -ENOMEM);
- req->rq_oi.oi_oa = obdo_alloc();
+ OBDO_ALLOC(req->rq_oi.oi_oa);
if (req->rq_oi.oi_oa == NULL)
GOTO(out_err, rc = -ENOMEM);
CDEBUG(D_INODE, "stripe %d has size "LPU64"/"LPU64"\n",
i, req->rq_oi.oi_oa->o_size, src_oa->o_size);
}
-
}
LASSERT(set->set_count == stripes);
out_err:
if (newea && rc)
obd_free_memmd(exp, &set->set_oi->oi_md);
- free_idx_array(idx_arr, idx_cnt);
+ if (idx_arr)
+ free_idx_array(idx_arr, idx_cnt);
EXIT;
return rc;
}