continue;
list_add(&tgt_desc->ltd_kill, &kill);
- tgt_pool_remove(<d->ltd_tgt_pool,
- tgt_desc->ltd_index);
+ lu_tgt_pool_remove(<d->ltd_tgt_pool,
+ tgt_desc->ltd_index);
ltd_del_tgt(ltd, tgt_desc);
ltd->ltd_death_row--;
}
if (rc)
GOTO(out_del_tgt, rc);
- rc = tgt_pool_add(<d->ltd_tgt_pool, index,
+ rc = lu_tgt_pool_add(<d->ltd_tgt_pool, index,
ltd->ltd_lov_desc.ld_tgt_count);
if (rc) {
CERROR("%s: can't set up pool, failed with %d\n",
out_ltd:
down_write(<d->ltd_rw_sem);
mutex_lock(<d->ltd_mutex);
- tgt_pool_remove(<d->ltd_tgt_pool, index);
+ lu_tgt_pool_remove(<d->ltd_tgt_pool, index);
out_del_tgt:
ltd_del_tgt(ltd, tgt_desc);
out_mutex:
int lod_fill_mirrors(struct lod_object *lo)
{
+ struct lod_device *lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
struct lod_layout_component *lod_comp;
+ bool found_preferred = false;
int mirror_idx = -1;
__u16 mirror_id = 0xffff;
- int i;
+ int i, pref;
ENTRY;
LASSERT(equi(!lo->ldo_is_composite, lo->ldo_mirror_count == 0));
RETURN(0);
lod_comp = &lo->ldo_comp_entries[0];
+
for (i = 0; i < lo->ldo_comp_cnt; i++, lod_comp++) {
int stale = !!(lod_comp->llc_flags & LCME_FL_STALE);
int preferred = !!(lod_comp->llc_flags & LCME_FL_PREF_WR);
+ int j;
+
+ pref = 0;
+ /* calculate component preference over all used OSTs */
+ for (j = 0; j < lod_comp->llc_stripes_allocated; j++) {
+ int idx = lod_comp->llc_ost_indices[j];
+ struct obd_statfs *osfs = &OST_TGT(lod,idx)->ltd_statfs;
+
+ if (osfs->os_state & OS_STATFS_NONROT)
+ pref++;
+ }
if (mirror_id_of(lod_comp->llc_id) == mirror_id) {
lo->ldo_mirrors[mirror_idx].lme_stale |= stale;
- lo->ldo_mirrors[mirror_idx].lme_primary |= preferred;
+ lo->ldo_mirrors[mirror_idx].lme_prefer |= preferred;
+ lo->ldo_mirrors[mirror_idx].lme_preference += pref;
lo->ldo_mirrors[mirror_idx].lme_end = i;
continue;
}
+ if (mirror_idx >= 0 && preferred &&
+ !lo->ldo_mirrors[mirror_idx].lme_stale)
+ found_preferred = true;
+
/* new mirror */
++mirror_idx;
if (mirror_idx >= lo->ldo_mirror_count)
lo->ldo_mirrors[mirror_idx].lme_id = mirror_id;
lo->ldo_mirrors[mirror_idx].lme_stale = stale;
- lo->ldo_mirrors[mirror_idx].lme_primary = preferred;
+ lo->ldo_mirrors[mirror_idx].lme_prefer = preferred;
+ lo->ldo_mirrors[mirror_idx].lme_preference = pref;
lo->ldo_mirrors[mirror_idx].lme_start = i;
lo->ldo_mirrors[mirror_idx].lme_end = i;
}
if (mirror_idx != lo->ldo_mirror_count - 1)
RETURN(-EINVAL);
+ if (!found_preferred && mirror_idx > 0) {
+ int best = -1;
+
+ /*
+ * if no explicited preferred found, then find a mirror
+ * with higher number of non-rotational OSTs
+ * */
+ pref = -1;
+ for (i = 0; i <= mirror_idx; i++) {
+ if (lo->ldo_mirrors[i].lme_stale)
+ continue;
+ if (lo->ldo_mirrors[i].lme_preference > pref) {
+ pref = lo->ldo_mirrors[i].lme_preference;
+ best = i;
+ }
+ }
+
+ LASSERT(best >= 0);
+ lo->ldo_mirrors[best].lme_prefer = 1;
+ }
+
RETURN(0);
}
RETURN(-E2BIG);
objs = &v3->lmm_objects[0];
}
+ lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
stripe_count = lod_comp_entry_stripe_count(lo, comp_idx, is_dir);
if (stripe_count == 0 && !is_dir &&
!(lod_comp->llc_pattern & LOV_PATTERN_F_RELEASED) &&
- !(lod_comp->llc_pattern & LOV_PATTERN_MDT))
+ !(lod_comp->llc_pattern & LOV_PATTERN_MDT)) {
+ /* Try again if all active targets are disconnected.
+ * It is possible when MDS does failover. */
+ if (!lod->lod_ost_active_count &&
+ lod->lod_ost_count)
+ RETURN(-EAGAIN);
RETURN(-E2BIG);
+ }
if (!is_dir && lo->ldo_is_composite)
lod_comp_shrink_stripe_count(lod_comp, &stripe_count);
GOTO(done, rc = 0);
/* generate ost_idx of this component stripe */
- lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
for (i = 0; i < stripe_count; i++) {
struct dt_object *object;
__u32 ost_idx = (__u32)-1UL;
* \retval 0 if the index is present
* \retval -EINVAL if not
*/
-static int validate_lod_and_idx(struct lod_device *md, __u32 idx)
+int validate_lod_and_idx(struct lod_device *md, __u32 idx)
{
if (unlikely(idx >= md->lod_ost_descs.ltd_tgts_size ||
!test_bit(idx, md->lod_ost_bitmap))) {
magic != LOV_MAGIC_SEL)
GOTO(out, rc = -EINVAL);
- if (lo->ldo_is_foreign)
- lod_free_foreign_lov(lo);
- else
- lod_free_comp_entries(lo);
+ lod_striping_free_nolock(env, lo);
if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
comp_v1 = (struct lov_comp_md_v1 *)lmm;
ext = &comp_v1->lcm_entries[i].lcme_extent;
lod_comp->llc_extent.e_start =
le64_to_cpu(ext->e_start);
+ if (lod_comp->llc_extent.e_start &
+ (LOV_MIN_STRIPE_SIZE - 1)) {
+ CDEBUG(D_LAYOUT,
+ "extent start %llu is not a multiple of min size %u\n",
+ lod_comp->llc_extent.e_start,
+ LOV_MIN_STRIPE_SIZE);
+ GOTO(out, rc = -EINVAL);
+ }
+
lod_comp->llc_extent.e_end = le64_to_cpu(ext->e_end);
+ if (lod_comp->llc_extent.e_end != LUSTRE_EOF &&
+ lod_comp->llc_extent.e_end &
+ (LOV_MIN_STRIPE_SIZE - 1)) {
+ CDEBUG(D_LAYOUT,
+ "extent end %llu is not a multiple of min size %u\n",
+ lod_comp->llc_extent.e_end,
+ LOV_MIN_STRIPE_SIZE);
+ GOTO(out, rc = -EINVAL);
+ }
+
lod_comp->llc_flags =
le32_to_cpu(comp_v1->lcm_entries[i].lcme_flags);
if (lod_comp->llc_flags & LCME_FL_NOSYNC)
ENTRY;
mutex_lock(&lo->ldo_layout_mutex);
- lod_striping_free_nolock(env, lo);
rc = lod_parse_striping(env, lo, buf);
mutex_unlock(&lo->ldo_layout_mutex);
struct lov_comp_md_entry_v1 *comp_entry_v1(struct lov_comp_md_v1 *comp, int i)
{
LASSERTF((le32_to_cpu(comp->lcm_magic) & ~LOV_MAGIC_DEFINED) ==
- LOV_USER_MAGIC_COMP_V1, "Wrong magic %x\n",
+ LOV_USER_MAGIC_COMP_V1 ||
+ (le32_to_cpu(comp->lcm_magic) & ~LOV_MAGIC_DEFINED) ==
+ LOV_USER_MAGIC_SEL, "Wrong magic %x\n",
le32_to_cpu(comp->lcm_magic));
LASSERTF(i >= 0 && i < le16_to_cpu(comp->lcm_entry_count),
"bad index %d, max = %d\n",
RETURN(-EINVAL);
}
- if (magic != LOV_USER_MAGIC_V1 &&
- magic != LOV_USER_MAGIC_V3 &&
- magic != LOV_USER_MAGIC_SPECIFIC &&
- magic != LOV_USER_MAGIC_COMP_V1) {
+ switch (magic) {
+ case LOV_USER_MAGIC_FOREIGN:
+ RETURN(0);
+ case LOV_USER_MAGIC_V1:
+ case LOV_USER_MAGIC_V3:
+ case LOV_USER_MAGIC_SPECIFIC:
+ RETURN(lod_verify_v1v3(d, buf, is_from_disk));
+ case LOV_USER_MAGIC_COMP_V1:
+ case LOV_USER_MAGIC_SEL:
+ break;
+ default:
CDEBUG(D_LAYOUT, "bad userland LOV MAGIC: %#x\n",
le32_to_cpu(lum->lmm_magic));
RETURN(-EINVAL);
}
- if (magic != LOV_USER_MAGIC_COMP_V1)
- RETURN(lod_verify_v1v3(d, buf, is_from_disk));
-
/* magic == LOV_USER_MAGIC_COMP_V1 */
comp_v1 = buf->lb_buf;
if (buf->lb_len < le32_to_cpu(comp_v1->lcm_size)) {
for_each_comp_entry_v1(comp_v1, ent) {
ext = &ent->lcme_extent;
- if (le64_to_cpu(ext->e_start) > le64_to_cpu(ext->e_end)) {
+ if (le64_to_cpu(ext->e_start) > le64_to_cpu(ext->e_end) ||
+ le64_to_cpu(ext->e_start) & (LOV_MIN_STRIPE_SIZE - 1) ||
+ (le64_to_cpu(ext->e_end) != LUSTRE_EOF &&
+ le64_to_cpu(ext->e_end) & (LOV_MIN_STRIPE_SIZE - 1))) {
CDEBUG(D_LAYOUT, "invalid extent "DEXT"\n",
le64_to_cpu(ext->e_start),
le64_to_cpu(ext->e_end));
else if (rc)
RETURN(rc);
+ if (le16_to_cpu(lum->lmm_stripe_count) == 1)
+ lum->lmm_stripe_count = 0;
/* Any stripe count is forbidden on DoM component */
- if (lum->lmm_stripe_count) {
+ if (lum->lmm_stripe_count > 0) {
CDEBUG(D_LAYOUT,
"invalid DoM layout stripe count %u, must be 0\n",
le16_to_cpu(lum->lmm_stripe_count));
if (rc)
RETURN(rc);
- if (prev_end == LUSTRE_EOF)
+ if (prev_end == LUSTRE_EOF || ext->e_start == prev_end)
continue;
/* extent end must be aligned with the stripe_size */
INIT_LIST_HEAD(&lod->lod_pool_list);
lod->lod_pool_count = 0;
- rc = tgt_pool_init(&lod->lod_mdt_descs.ltd_tgt_pool, 0);
+ rc = lu_tgt_pool_init(&lod->lod_mdt_descs.ltd_tgt_pool, 0);
if (rc)
GOTO(out_hash, rc);
- rc = tgt_pool_init(&lod->lod_mdt_descs.ltd_qos.lq_rr.lqr_pool, 0);
+ rc = lu_tgt_pool_init(&lod->lod_mdt_descs.ltd_qos.lq_rr.lqr_pool, 0);
if (rc)
GOTO(out_mdt_pool, rc);
- rc = tgt_pool_init(&lod->lod_ost_descs.ltd_tgt_pool, 0);
+ rc = lu_tgt_pool_init(&lod->lod_ost_descs.ltd_tgt_pool, 0);
if (rc)
GOTO(out_mdt_rr_pool, rc);
- rc = tgt_pool_init(&lod->lod_ost_descs.ltd_qos.lq_rr.lqr_pool, 0);
+ rc = lu_tgt_pool_init(&lod->lod_ost_descs.ltd_qos.lq_rr.lqr_pool, 0);
if (rc)
GOTO(out_ost_pool, rc);
RETURN(0);
out_ost_pool:
- tgt_pool_free(&lod->lod_ost_descs.ltd_tgt_pool);
+ lu_tgt_pool_free(&lod->lod_ost_descs.ltd_tgt_pool);
out_mdt_rr_pool:
- tgt_pool_free(&lod->lod_mdt_descs.ltd_qos.lq_rr.lqr_pool);
+ lu_tgt_pool_free(&lod->lod_mdt_descs.ltd_qos.lq_rr.lqr_pool);
out_mdt_pool:
- tgt_pool_free(&lod->lod_mdt_descs.ltd_tgt_pool);
+ lu_tgt_pool_free(&lod->lod_mdt_descs.ltd_tgt_pool);
out_hash:
lod_pool_hash_destroy(&lod->lod_pools_hash_body);
}
lod_pool_hash_destroy(&lod->lod_pools_hash_body);
- tgt_pool_free(&lod->lod_ost_descs.ltd_qos.lq_rr.lqr_pool);
- tgt_pool_free(&lod->lod_ost_descs.ltd_tgt_pool);
- tgt_pool_free(&lod->lod_mdt_descs.ltd_qos.lq_rr.lqr_pool);
- tgt_pool_free(&lod->lod_mdt_descs.ltd_tgt_pool);
+ lu_tgt_pool_free(&lod->lod_ost_descs.ltd_qos.lq_rr.lqr_pool);
+ lu_tgt_pool_free(&lod->lod_ost_descs.ltd_tgt_pool);
+ lu_tgt_pool_free(&lod->lod_mdt_descs.ltd_qos.lq_rr.lqr_pool);
+ lu_tgt_pool_free(&lod->lod_mdt_descs.ltd_tgt_pool);
RETURN(0);
}