* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "lov_internal.h"
-static void lov_dump_lmm_common(int level, void *lmmp)
+void lov_dump_lmm_common(int level, void *lmmp)
{
struct lov_mds_md *lmm = lmmp;
struct ost_id oi;
- ostid_le_to_cpu(&lmm->lmm_oi, &oi);
+ lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
le32_to_cpu(lmm->lmm_pattern));
if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
+ return;
}
for (i = 0; i < stripe_count; ++i, ++lod) {
void lov_dump_lmm(int level, void *lmm)
{
- int magic;
-
- magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
- switch (magic) {
- case LOV_MAGIC_V1:
- return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
- case LOV_MAGIC_V3:
- return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
- default:
- CERROR("Cannot recognize lmm_magic %x", magic);
- }
- return;
+ int magic;
+
+ magic = le32_to_cpu(((struct lov_mds_md *)lmm)->lmm_magic);
+ switch (magic) {
+ case LOV_MAGIC_V1:
+ lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)lmm);
+ break;
+ case LOV_MAGIC_V3:
+ lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)lmm);
+ break;
+ default:
+ CDEBUG(level, "unrecognized lmm_magic %x, assuming %x\n",
+ magic, LOV_MAGIC_V1);
+ lov_dump_lmm_common(level, lmm);
+ break;
+ }
}
-#define LMM_ASSERT(test) \
-do { \
- if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
- LASSERT(test); /* so we know what assertion failed */ \
-} while(0)
-
/* Pack LOV object metadata for disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*
if (lsm) {
/* If we are just sizing the EA, limit the stripe count
* to the actual number of OSTs in this filesystem. */
- if (!lmmp) {
- stripe_count = lov_get_stripecnt(lov, lmm_magic,
- lsm->lsm_stripe_count);
- lsm->lsm_stripe_count = stripe_count;
- } else {
- stripe_count = lsm->lsm_stripe_count;
- }
+ if (!lmmp) {
+ stripe_count = lov_get_stripecnt(lov, lmm_magic,
+ lsm->lsm_stripe_count);
+ lsm->lsm_stripe_count = stripe_count;
+ } else if (!lsm_is_released(lsm)) {
+ stripe_count = lsm->lsm_stripe_count;
+ } else {
+ stripe_count = 0;
+ }
} else {
/* No need to allocate more than maximum supported stripes.
* Anyway, this is pretty inaccurate since ld_tgt_count now
/* lmmv1 and lmmv3 point to the same struct and have the
* same first fields
*/
- ostid_cpu_to_le(&lsm->lsm_oi, &lmmv1->lmm_oi);
+ lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
lmmv1->lmm_stripe_count = cpu_to_le16(stripe_count);
lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
for (i = 0; i < stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
/* XXX LOV STACKING call down to osc_packmd() to do packing */
- LASSERTF(loi->loi_id != 0, "lmm_oi "DOSTID" stripe %u/%u"
- " idx %u\n", POSTID(&lmmv1->lmm_oi), i, stripe_count,
- loi->loi_ost_idx);
+ LASSERTF(ostid_id(&loi->loi_oi) != 0, "lmm_oi "DOSTID
+ " stripe %u/%u idx %u\n", POSTID(&lmmv1->lmm_oi),
+ i, stripe_count, loi->loi_ost_idx);
ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
(*lsmp)->lsm_pattern = pattern;
(*lsmp)->lsm_pool_name[0] = '\0';
(*lsmp)->lsm_layout_gen = 0;
- (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+ if (stripe_count > 0)
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
for (i = 0; i < stripe_count; i++)
loi_init((*lsmp)->lsm_oinfo[i]);
int rc = 0, lsm_size;
__u16 stripe_count;
__u32 magic;
+ __u32 pattern;
ENTRY;
/* If passed an MDS struct use values from there, otherwise defaults */
RETURN(0);
}
- lsm_size = lov_alloc_memmd(lsmp, stripe_count, LOV_PATTERN_RAID0,
- magic);
+ pattern = le32_to_cpu(lmm->lmm_pattern);
+ lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
if (lsm_size < 0)
RETURN(lsm_size);
lov->desc.ld_pattern : LOV_PATTERN_RAID0;
}
- if (lumv1->lmm_pattern != LOV_PATTERN_RAID0) {
+ if (lov_pattern(lumv1->lmm_pattern) != LOV_PATTERN_RAID0) {
CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
lumv1->lmm_pattern);
RETURN(-EINVAL);
}
}
+ if (lumv1->lmm_pattern & LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+
rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);
if (rc >= 0) {
&len, &last_id, NULL);
if (rc)
RETURN(rc);
- if (lmm_objects[i].l_object_id > last_id) {
- CERROR("Setting EA for object > than last id on "
- "ost idx %d "LPD64" > "LPD64" \n",
- lmm_objects[i].l_ost_idx,
- lmm_objects[i].l_object_id, last_id);
- RETURN(-EINVAL);
- }
+ if (ostid_id(&lmm_objects[i].l_ost_oi) > last_id) {
+ CERROR("Setting EA for object > than last id on"
+ " ost idx %d "DOSTID" > "LPD64" \n",
+ lmm_objects[i].l_ost_idx,
+ POSTID(&lmm_objects[i].l_ost_oi), last_id);
+ RETURN(-EINVAL);
+ }
}
rc = lov_setstripe(exp, 0, lsmp, lump);
for (i = 0; i < lump->lmm_stripe_count; i++) {
(*lsmp)->lsm_oinfo[i]->loi_ost_idx =
lmm_objects[i].l_ost_idx;
- (*lsmp)->lsm_oinfo[i]->loi_id = lmm_objects[i].l_object_id;
- (*lsmp)->lsm_oinfo[i]->loi_seq = lmm_objects[i].l_object_seq;
+ (*lsmp)->lsm_oinfo[i]->loi_oi = lmm_objects[i].l_ost_oi;
}
RETURN(0);
}
struct lov_mds_md *lmmk = NULL;
int rc, lmm_size;
int lum_size;
- mm_segment_t seg;
ENTRY;
if (!lsm)
RETURN(-ENODATA);
- /*
- * "Switch to kernel segment" to allow copying from kernel space by
- * copy_{to,from}_user().
- */
- seg = get_fs();
- set_fs(KERNEL_DS);
-
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
- if (cfs_copy_from_user(&lum, lump, lum_size))
+ if (copy_from_user(&lum, lump, lum_size))
GOTO(out_set, rc = -EFAULT);
else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3))
(lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
/* Return right size of stripe to user */
lum.lmm_stripe_count = lsm->lsm_stripe_count;
- rc = cfs_copy_to_user(lump, &lum, lum_size);
+ rc = copy_to_user(lump, &lum, lum_size);
GOTO(out_set, rc = -EOVERFLOW);
}
rc = lov_packmd(exp, &lmmk, lsm);
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove((char*)(&lmmk->lmm_stripe_count) +
- sizeof(lmmk->lmm_stripe_count),
- ((struct lov_mds_md_v3*)lmmk)->lmm_objects,
+ memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
+ ((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
lmmk->lmm_stripe_count *
sizeof(struct lov_ost_data_v1));
lmm_size -= LOV_MAXPOOLNAME;
lum.lmm_layout_gen = lmmk->lmm_layout_gen;
((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
- if (cfs_copy_to_user(lump, lmmk, lmm_size))
+ if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
obd_free_diskmd(exp, &lmmk);
out_set:
- set_fs(seg);
RETURN(rc);
}