le32_to_cpu(lmmj->lmmj_extent_count));
}
+void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
+{
+ struct lov_ost_data_v1 *lod;
+ int i;
+
+ CDEBUG(level, "objid "LPX64", magic 0x%08x, pattern %#x\n",
+ le64_to_cpu(lmm->lmm_object_id), le32_to_cpu(lmm->lmm_magic),
+ le32_to_cpu(lmm->lmm_pattern));
+ CDEBUG(level,"stripe_size %u, stripe_count %u\n",
+ le32_to_cpu(lmm->lmm_stripe_size),
+ le32_to_cpu(lmm->lmm_stripe_count));
+ CDEBUG(level,"pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
+
+ if (le32_to_cpu(lmm->lmm_stripe_count) <= LOV_V1_INSANE_STRIPE_COUNT) {
+ for (i = 0, lod = lmm->lmm_objects;
+ i < (int)le32_to_cpu(lmm->lmm_stripe_count); i++, lod++)
+ CDEBUG(level,
+ "stripe %u idx %u subobj "LPX64"/"LPX64"\n",
+ i, le32_to_cpu(lod->l_ost_idx),
+ le64_to_cpu(lod->l_object_gr),
+ le64_to_cpu(lod->l_object_id));
+ } else {
+ CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
+ le32_to_cpu(lmm->lmm_stripe_count),
+ LOV_V1_INSANE_STRIPE_COUNT);
+ }
+}
+
+void lov_dump_lmm(int level, void *lmm)
+{
+ int magic;
+
+ magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
+ switch (magic) {
+ case LOV_MAGIC_V1:
+ return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
+ case LOV_MAGIC_JOIN:
+ return lov_dump_lmm_join(level, (struct lov_mds_md_join *)(lmm));
+ case LOV_MAGIC_V3:
+ return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
+ default:
+ CERROR("Cannot recognize lmm_magic %x", magic);
+ }
+ return;
+}
+
#define LMM_ASSERT(test) \
do { \
if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
{
struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
- struct lov_mds_md *lmm;
- int stripe_count = lov->desc.ld_tgt_count;
- int lmm_size;
+ struct lov_mds_md_v1 *lmmv1;
+ struct lov_mds_md_v3 *lmmv3;
+ int stripe_count;
+ struct lov_ost_data_v1 *lmm_objects;
+ int lmm_size, lmm_magic;
int i;
ENTRY;
if (lsm) {
- if (lsm->lsm_magic != LOV_MAGIC) {
- CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X\n",
- lsm->lsm_magic, LOV_MAGIC);
- RETURN(-EINVAL);
- }
+ lmm_magic = lsm->lsm_magic;
+
/* If we are just sizing the EA, limit the stripe count
* to the actual number of OSTs in this filesystem. */
if (!lmmp) {
- stripe_count = lov_get_stripecnt(lov, lsm->lsm_stripe_count);
+ stripe_count = lov_get_stripecnt(lov,
+ lsm->lsm_stripe_count);
lsm->lsm_stripe_count = stripe_count;
} else {
stripe_count = lsm->lsm_stripe_count;
}
+ } else {
+ /* No needs to allocated more than LOV_MAX_STRIPE_COUNT.
+ * Anyway, this is pretty inaccurate since ld_tgt_count now
+ * represents max index and we should rely on the actual number
+ * of OSTs instead */
+ stripe_count = min((__u32)LOV_MAX_STRIPE_COUNT,
+ lov->desc.ld_tgt_count);
+
+ if (lmmp && *lmmp)
+ lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
+ else
+ /* lsm == NULL and lmmp == NULL */
+ lmm_magic = LOV_MAGIC;
+ }
+
+ if ((lmm_magic != LOV_MAGIC_V1) &&
+ (lmm_magic != LOV_MAGIC_V3)) {
+ CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+ RETURN(-EINVAL);
+
}
/* XXX LOV STACKING call into osc for sizes */
- lmm_size = lov_mds_md_size(stripe_count);
+ lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
if (!lmmp)
RETURN(lmm_size);
if (*lmmp && !lsm) {
stripe_count = le32_to_cpu((*lmmp)->lmm_stripe_count);
- OBD_FREE(*lmmp, lov_mds_md_size(stripe_count));
+ lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
+ OBD_FREE(*lmmp, lmm_size);
*lmmp = NULL;
RETURN(0);
}
RETURN(-ENOMEM);
}
- lmm = *lmmp;
- lmm->lmm_magic = cpu_to_le32(LOV_MAGIC); /* only write new format */
+ CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
+ lmm_magic, lmm_size);
+
+ lmmv1 = *lmmp;
+ lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
+ if (lmm_magic == LOV_MAGIC_V3)
+ lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
+ else
+ lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
if (!lsm)
RETURN(lmm_size);
- lmm->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
- lmm->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
- lmm->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
- lmm->lmm_stripe_count = cpu_to_le32(stripe_count);
- lmm->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
+ /* lmmv1 and lmmv3 point to the same struct and have the
+ * same first fields
+ */
+ lmmv1->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
+ lmmv1->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
+ lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
+ lmmv1->lmm_stripe_count = cpu_to_le32(stripe_count);
+ lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
+ if (lsm->lsm_magic == LOV_MAGIC_V3) {
+ strncpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+ LOV_MAXPOOLNAME);
+ lmm_objects = lmmv3->lmm_objects;
+ } else {
+ lmm_objects = lmmv1->lmm_objects;
+ }
for (i = 0; i < stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
/* XXX LOV STACKING call down to osc_packmd() to do packing */
LASSERTF(loi->loi_id, "lmm_oid "LPU64" stripe %u/%u idx %u\n",
- lmm->lmm_object_id, i, stripe_count, loi->loi_ost_idx);
- lmm->lmm_objects[i].l_object_id = cpu_to_le64(loi->loi_id);
- lmm->lmm_objects[i].l_object_gr = cpu_to_le64(loi->loi_gr);
- lmm->lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
- lmm->lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
+ lmmv1->lmm_object_id, i, stripe_count, loi->loi_ost_idx);
+ lmm_objects[i].l_object_id = cpu_to_le64(loi->loi_id);
+ lmm_objects[i].l_object_gr = cpu_to_le64(loi->loi_gr);
+ lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
+ lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
}
RETURN(lmm_size);
int rc;
if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
- CERROR("bad disk LOV MAGIC: 0x%08X; dumping V1 LMM:\n",
- le32_to_cpu(*(__u32 *)lmm));
- lov_dump_lmm_v1(D_WARNING, lmm);
+ char *buffer;
+ int sz;
+
+ CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
+ le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
+ sz = lmm_bytes * 2 + 1;
+ OBD_ALLOC(buffer, sz);
+ if (buffer != NULL) {
+ int i;
+
+ for (i = 0; i < lmm_bytes; i++)
+ sprintf(buffer+2*i, "%.2X", ((char *)lmm)[i]);
+ buffer[sz] = '\0';
+ CERROR("%s\n", buffer);
+ OBD_FREE(buffer, sz);
+ }
return -EINVAL;
}
rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
(*lsmp)->lsm_stripe_count = stripe_count;
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
(*lsmp)->lsm_pattern = pattern;
+ (*lsmp)->lsm_pool_name[0] = '\0';
(*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
for (i = 0; i < stripe_count; i++)
{
struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
- struct lov_user_md lum;
+ struct lov_user_md_v3 lumv3;
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&lumv3;
+ int lmm_magic;
int stripe_count;
int rc;
ENTRY;
- rc = copy_from_user(&lum, lump, sizeof(lum));
+ rc = copy_from_user(&lumv3, lump, sizeof(struct lov_user_md_v1));
if (rc)
RETURN(-EFAULT);
- if (lum.lmm_magic != LOV_USER_MAGIC) {
- if (lum.lmm_magic == __swab32(LOV_USER_MAGIC)) {
- lustre_swab_lov_user_md(&lum);
- } else {
- CDEBUG(D_IOCTL, "bad userland LOV MAGIC:"
- " %#08x != %#08x\n",
- lum.lmm_magic, LOV_USER_MAGIC);
- RETURN(-EINVAL);
- }
+ lmm_magic = lumv1->lmm_magic;
+
+ if (lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1(lumv1);
+ lmm_magic = LOV_USER_MAGIC_V1;
+ } else if (lmm_magic == LOV_USER_MAGIC_V3) {
+ rc = copy_from_user(&lumv3, lump, sizeof(lumv3));
+ if (rc)
+ RETURN(-EFAULT);
+ } else if (lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
+ rc = copy_from_user(&lumv3, lump, sizeof(lumv3));
+ if (rc)
+ RETURN(-EFAULT);
+ lustre_swab_lov_user_md_v3(&lumv3);
+ lmm_magic = LOV_USER_MAGIC_V3;
+ } else if (lmm_magic != LOV_USER_MAGIC_V1) {
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
+ RETURN(-EINVAL);
}
- if (lum.lmm_pattern == 0) {
- lum.lmm_pattern = lov->desc.ld_pattern ?
+ /* in the rest of the tests, as *lumv1 and lumv3 have the same
+ * fields, we use lumv1 to avoid code duplication */
+
+ if (lumv1->lmm_pattern == 0) {
+ lumv1->lmm_pattern = lov->desc.ld_pattern ?
lov->desc.ld_pattern : LOV_PATTERN_RAID0;
}
- if (lum.lmm_pattern != LOV_PATTERN_RAID0) {
+ if (lumv1->lmm_pattern != LOV_PATTERN_RAID0) {
CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
- lum.lmm_pattern);
+ lumv1->lmm_pattern);
RETURN(-EINVAL);
}
/* 64kB is the largest common page size we see (ia64), and matches the
* check in lfs */
- if (lum.lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
+ if (lumv1->lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
CDEBUG(D_IOCTL, "stripe size %u not multiple of %u, fixing\n",
- lum.lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
- lum.lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
+ lumv1->lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
+ lumv1->lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
}
- if ((lum.lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
- (lum.lmm_stripe_offset != (typeof(lum.lmm_stripe_offset))(-1))) {
+ if ((lumv1->lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
+ (lumv1->lmm_stripe_offset !=
+ (typeof(lumv1->lmm_stripe_offset))(-1))) {
CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
- lum.lmm_stripe_offset, lov->desc.ld_tgt_count);
+ lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
RETURN(-EINVAL);
}
- stripe_count = lov_get_stripecnt(lov, lum.lmm_stripe_count);
+ stripe_count = lov_get_stripecnt(lov, lumv1->lmm_stripe_count);
+
+ if (lmm_magic == LOV_USER_MAGIC_V3) {
+ struct pool_desc *pool;
+
+ pool = lov_find_pool(lov, lumv3.lmm_pool_name);
+ if (pool == NULL)
+ RETURN(-EINVAL);
+
+ if (lumv3.lmm_stripe_offset !=
+ (typeof(lumv3.lmm_stripe_offset))(-1)) {
+ rc = lov_check_index_in_pool(lumv3.lmm_stripe_offset,
+ pool);
+ if (rc < 0)
+ RETURN(-EINVAL);
+ }
+
+ if (stripe_count > pool_tgt_count(pool))
+ stripe_count = pool_tgt_count(pool);
+ }
- if ((__u64)lum.lmm_stripe_size * stripe_count > ~0UL) {
+ if ((__u64)lumv1->lmm_stripe_size * stripe_count > ~0UL) {
CDEBUG(D_IOCTL, "stripe width %ux%i exeeds %lu bytes\n",
- lum.lmm_stripe_size, (int)lum.lmm_stripe_count, ~0UL);
+ lumv1->lmm_stripe_size, (int)lumv1->lmm_stripe_count,
+ ~0UL);
RETURN(-EINVAL);
}
- rc = lov_alloc_memmd(lsmp, stripe_count, lum.lmm_pattern, LOV_MAGIC);
+ rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);
if (rc >= 0) {
- (*lsmp)->lsm_oinfo[0]->loi_ost_idx = lum.lmm_stripe_offset;
- (*lsmp)->lsm_stripe_size = lum.lmm_stripe_size;
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = lumv1->lmm_stripe_offset;
+ (*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
+ if (lmm_magic == LOV_USER_MAGIC_V3)
+ strncpy((*lsmp)->lsm_pool_name, lumv3.lmm_pool_name,
+ LOV_MAXPOOLNAME);
rc = 0;
}
- RETURN(0);
+ RETURN(rc);
}
/* Configure object striping information on a new file.
struct obd_export *oexp;
struct lov_obd *lov = &exp->exp_obd->u.lov;
obd_id last_id = 0;
+ struct lov_user_ost_data_v1 *lmm_objects;
ENTRY;
+
+ if (lump->lmm_magic == LOV_USER_MAGIC_V3)
+ lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects;
+ else
+ lmm_objects = lump->lmm_objects;
+
for (i = 0; i < lump->lmm_stripe_count; i++) {
__u32 len = sizeof(last_id);
- oexp = lov->lov_tgts[lump->lmm_objects[i].l_ost_idx]->ltd_exp;
+ oexp = lov->lov_tgts[lmm_objects[i].l_ost_idx]->ltd_exp;
rc = obd_get_info(oexp, sizeof(KEY_LAST_ID), KEY_LAST_ID,
&len, &last_id, NULL);
if (rc)
RETURN(rc);
- if (lump->lmm_objects[i].l_object_id > last_id) {
+ if (lmm_objects[i].l_object_id > last_id) {
CERROR("Setting EA for object > than last id on "
"ost idx %d "LPD64" > "LPD64" \n",
- lump->lmm_objects[i].l_ost_idx,
- lump->lmm_objects[i].l_object_id, last_id);
+ lmm_objects[i].l_ost_idx,
+ lmm_objects[i].l_object_id, last_id);
RETURN(-EINVAL);
}
}
for (i = 0; i < lump->lmm_stripe_count; i++) {
(*lsmp)->lsm_oinfo[i]->loi_ost_idx =
- lump->lmm_objects[i].l_ost_idx;
- (*lsmp)->lsm_oinfo[i]->loi_id = lump->lmm_objects[i].l_object_id;
- (*lsmp)->lsm_oinfo[i]->loi_gr = lump->lmm_objects[i].l_object_gr;
+ lmm_objects[i].l_ost_idx;
+ (*lsmp)->lsm_oinfo[i]->loi_id = lmm_objects[i].l_object_id;
+ (*lsmp)->lsm_oinfo[i]->loi_gr = lmm_objects[i].l_object_gr;
}
RETURN(0);
}
/*
* XXX huge struct allocated on stack.
*/
- struct lov_user_md lum;
+ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_user_md_v3 lum;
struct lov_mds_md *lmmk = NULL;
int rc, lmm_size;
+ int lum_size;
mm_segment_t seg;
ENTRY;
*/
seg = get_fs();
set_fs(KERNEL_DS);
- rc = copy_from_user(&lum, lump, sizeof(lum));
+
+ /* we only need the header part from user space to get lmm_magic and
+ * lmm_stripe_count, (the header part is common to v1 and v3) */
+ lum_size = sizeof(struct lov_user_md_v1);
+ rc = copy_from_user(&lum, lump, lum_size);
+
if (rc)
rc = -EFAULT;
- else if (lum.lmm_magic != LOV_USER_MAGIC)
+ else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3))
rc = -EINVAL;
else {
+ /* if v3 we just have to update the lum_size */
+ if (lum.lmm_magic == LOV_USER_MAGIC_V3)
+ lum_size = sizeof(struct lov_user_md_v3);
+
rc = lov_packmd(exp, &lmmk, lsm);
if (rc < 0)
RETURN(rc);
rc = 0;
/* FIXME: Bug 1185 - copy fields properly when structs change */
- CLASSERT(sizeof lum == sizeof *lmmk);
+ /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
+ CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
CLASSERT(sizeof lum.lmm_objects[0] ==
sizeof lmmk->lmm_objects[0]);
/* User wasn't expecting this many OST entries */
if (lum.lmm_stripe_count == 0) {
- if (copy_to_user(lump, lmmk, sizeof lum))
+ if (copy_to_user(lump, lmmk, lum_size))
rc = -EFAULT;
} else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
- } else if (copy_to_user(lump, lmmk, sizeof lum))
+ } else if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
obd_free_diskmd(exp, &lmmk);