CDEBUG(level,"stripe_size %u, stripe_count %u\n",
le32_to_cpu(lmm->lmm_stripe_size),
le32_to_cpu(lmm->lmm_stripe_count));
- CDEBUG(level,"pool_name "POOLNAMEF"\n", lmm->lmm_pool_name);
+ CDEBUG(level,"pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
if (le32_to_cpu(lmm->lmm_stripe_count) <= LOV_V1_INSANE_STRIPE_COUNT) {
for (i = 0, lod = lmm->lmm_objects;
struct lov_obd *lov = &obd->u.lov;
struct lov_mds_md_v1 *lmmv1;
struct lov_mds_md_v3 *lmmv3;
- int stripe_count = lov->desc.ld_tgt_count;
+ int stripe_count;
struct lov_ost_data_v1 *lmm_objects;
int lmm_size, lmm_magic;
int i;
} else {
stripe_count = lsm->lsm_stripe_count;
}
- } else if (lmmp && *lmmp) {
- lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
} else {
- /* lsm == NULL and lmmp == NULL */
- lmm_magic = LOV_MAGIC;
+ /* No needs to allocated more than LOV_MAX_STRIPE_COUNT.
+ * Anyway, this is pretty inaccurate since ld_tgt_count now
+ * represents max index and we should rely on the actual number
+ * of OSTs instead */
+ stripe_count = min((__u32)LOV_MAX_STRIPE_COUNT,
+ lov->desc.ld_tgt_count);
+
+ if (lmmp && *lmmp)
+ lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
+ else
+ /* lsm == NULL and lmmp == NULL */
+ lmm_magic = LOV_MAGIC;
}
if ((lmm_magic != LOV_MAGIC_V1) &&
lmmv1->lmm_stripe_count = cpu_to_le32(stripe_count);
lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
if (lsm->lsm_magic == LOV_MAGIC_V3) {
- strncpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, MAXPOOLNAME);
+ strncpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+ LOV_MAXPOOLNAME);
lmm_objects = lmmv3->lmm_objects;
} else {
lmm_objects = lmmv1->lmm_objects;
int rc;
ENTRY;
- rc = copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1));
- if (rc)
+ if (copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1)))
RETURN(-EFAULT);
lmm_magic = lumv1->lmm_magic;
lustre_swab_lov_user_md_v1(lumv1);
lmm_magic = LOV_USER_MAGIC_V1;
} else if (lmm_magic == LOV_USER_MAGIC_V3) {
- rc = copy_from_user(&lumv3, lump, sizeof(lumv3));
- if (rc)
+ if (copy_from_user(&lumv3, lump, sizeof(lumv3)))
RETURN(-EFAULT);
} else if (lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
- rc = copy_from_user(&lumv3, lump, sizeof(lumv3));
- if (rc)
+ if (copy_from_user(&lumv3, lump, sizeof(lumv3)))
RETURN(-EFAULT);
lustre_swab_lov_user_md_v3(&lumv3);
lmm_magic = LOV_USER_MAGIC_V3;
(typeof(lumv3.lmm_stripe_offset))(-1)) {
rc = lov_check_index_in_pool(lumv3.lmm_stripe_offset,
pool);
- if (rc < 0)
+ if (rc < 0) {
+ lov_pool_putref(pool);
RETURN(-EINVAL);
+ }
}
if (stripe_count > pool_tgt_count(pool))
stripe_count = pool_tgt_count(pool);
+
+ lov_pool_putref(pool);
}
if ((__u64)lumv1->lmm_stripe_size * stripe_count > ~0UL) {
(*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
if (lmm_magic == LOV_USER_MAGIC_V3)
strncpy((*lsmp)->lsm_pool_name, lumv3.lmm_pool_name,
- MAXPOOLNAME);
+ LOV_MAXPOOLNAME);
rc = 0;
}
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
- rc = copy_from_user(&lum, lump, lum_size);
-
- if (rc)
+ if (copy_from_user(&lum, lump, lum_size))
rc = -EFAULT;
else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3))
CLASSERT(sizeof lum.lmm_objects[0] ==
sizeof lmmk->lmm_objects[0]);
+ if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
+ (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC)))
+ lustre_swab_lov_mds_md(lmmk);
/* User wasn't expecting this many OST entries */
if (lum.lmm_stripe_count == 0) {
- if (copy_to_user(lump, lmmk, lum_size))
+ copy_lov_mds2user(&lum, lmmk);
+ if (copy_to_user(lump, &lum, lum_size))
rc = -EFAULT;
} else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
- } else if (copy_to_user(lump, lmmk, lmm_size))
- rc = -EFAULT;
+ } else {
+ copy_lov_mds2user(&lum, lmmk);
+ if (copy_to_user(lump, &lum, lmm_size))
+ rc = -EFAULT;
+ }
obd_free_diskmd(exp, &lmmk);
}