/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
- * Author: Andreas Dilger <adilger@clusterfs.com>
+ * GPL HEADER START
*
- * This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/lov/lov_pack.c
*
* (Un)packing of OST/MDS requests
*
+ * Author: Andreas Dilger <adilger@clusterfs.com>
*/
-#define DEBUG_SUBSYSTEM S_LLITE
+#define DEBUG_SUBSYSTEM S_LOV
#ifndef __KERNEL__
#include <liblustre.h>
#endif
-#include <linux/lustre_net.h>
-#include <linux/obd.h>
-#include <linux/obd_lov.h>
-#include <linux/obd_class.h>
-#include <linux/obd_support.h>
+#include <lustre_net.h>
+#include <obd.h>
+#include <obd_lov.h>
+#include <obd_class.h>
+#include <obd_support.h>
+#include <lustre/lustre_user.h>
+
+#include "lov_internal.h"
+
+static void lov_dump_lmm_common(int level, void *lmmp)
+{
+ struct lov_mds_md *lmm = lmmp;
+
+ CDEBUG(level, "objid "LPX64", magic 0x%08x, pattern %#x\n",
+ le64_to_cpu(lmm->lmm_object_id),
+ le32_to_cpu(lmm->lmm_magic),
+ le32_to_cpu(lmm->lmm_pattern));
+ CDEBUG(level,"stripe_size %u, stripe_count %u\n",
+ le32_to_cpu(lmm->lmm_stripe_size),
+ le32_to_cpu(lmm->lmm_stripe_count));
+}
+
+static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
+ int stripe_count)
+{
+ int i;
+
+ if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
+ CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
+ stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
+ }
+
+ for (i = 0; i < stripe_count; ++i, ++lod) {
+ CDEBUG(level, "stripe %u idx %u subobj "LPX64"/"LPX64"\n", i,
+ le32_to_cpu(lod->l_ost_idx),
+ le64_to_cpu(lod->l_object_gr),
+ le64_to_cpu(lod->l_object_id));
+ }
+}
+
+void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
+{
+ lov_dump_lmm_common(level, lmm);
+ lov_dump_lmm_objects(level, lmm->lmm_objects,
+ le32_to_cpu(lmm->lmm_stripe_count));
+}
-void lov_dump_lmm(int level, struct lov_mds_md *lmm)
+void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
{
- struct lov_object_id *loi;
- int idx;
-
- CDEBUG(level, "objid "LPX64", magic %#08x, ost_count %u\n",
- lmm->lmm_object_id, lmm->lmm_magic, lmm->lmm_ost_count);
- CDEBUG(level,"stripe_size %u, stripe_count %u, stripe_offset %u\n",
- lmm->lmm_stripe_size, lmm->lmm_stripe_count,
- lmm->lmm_stripe_offset);
- for (idx = 0, loi = lmm->lmm_objects; idx < lmm->lmm_ost_count;
- idx++, loi++)
- CDEBUG(level, "ost idx %u subobj "LPX64"\n", idx,
- loi->l_object_id);
+ lov_dump_lmm_common(level, lmm);
+ CDEBUG(level,"pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
+ lov_dump_lmm_objects(level, lmm->lmm_objects,
+ le32_to_cpu(lmm->lmm_stripe_count));
+}
+
+void lov_dump_lmm(int level, void *lmm)
+{
+ int magic;
+
+ magic = ((struct lov_mds_md_v1 *)(lmm))->lmm_magic;
+ switch (magic) {
+ case LOV_MAGIC_V1:
+ return lov_dump_lmm_v1(level, (struct lov_mds_md_v1 *)(lmm));
+ case LOV_MAGIC_V3:
+ return lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)(lmm));
+ default:
+ CERROR("Cannot recognize lmm_magic %x", magic);
+ }
+ return;
}
#define LMM_ASSERT(test) \
* LOVs properly. For now lov_mds_md_size() just assumes one obd_id
* per stripe.
*/
-int lov_packmd(struct lustre_handle *conn, struct lov_mds_md **lmmp,
+int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
struct lov_stripe_md *lsm)
{
- struct obd_device *obd = class_conn2obd(conn);
+ struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
- struct lov_oinfo *loi;
- struct lov_mds_md *lmm;
- int ost_count = lov->desc.ld_tgt_count;
- int stripe_count = ost_count;
- int lmm_size;
+ struct lov_mds_md_v1 *lmmv1;
+ struct lov_mds_md_v3 *lmmv3;
+ int stripe_count;
+ struct lov_ost_data_v1 *lmm_objects;
+ int lmm_size, lmm_magic;
int i;
ENTRY;
if (lsm) {
- int i, max = 0;
- if (lsm->lsm_magic != LOV_MAGIC) {
- CERROR("bad mem LOV MAGIC: %#010x != %#010x\n",
- lsm->lsm_magic, LOV_MAGIC);
- RETURN(-EINVAL);
+ lmm_magic = lsm->lsm_magic;
+
+ /* If we are just sizing the EA, limit the stripe count
+ * to the actual number of OSTs in this filesystem. */
+ if (!lmmp) {
+ stripe_count = lov_get_stripecnt(lov,
+ lsm->lsm_stripe_count);
+ lsm->lsm_stripe_count = stripe_count;
+ } else {
+ stripe_count = lsm->lsm_stripe_count;
}
- stripe_count = lsm->lsm_stripe_count;
+ } else {
+ /* No needs to allocated more than LOV_MAX_STRIPE_COUNT.
+ * Anyway, this is pretty inaccurate since ld_tgt_count now
+ * represents max index and we should rely on the actual number
+ * of OSTs instead */
+ stripe_count = min((__u32)LOV_MAX_STRIPE_COUNT,
+ lov->desc.ld_tgt_count);
+
+ if (lmmp && *lmmp)
+ lmm_magic = le32_to_cpu((*lmmp)->lmm_magic);
+ else
+ /* lsm == NULL and lmmp == NULL */
+ lmm_magic = LOV_MAGIC;
+ }
+
+ if ((lmm_magic != LOV_MAGIC_V1) &&
+ (lmm_magic != LOV_MAGIC_V3)) {
+ CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
+ lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
+ RETURN(-EINVAL);
- for (i = 0,loi = lsm->lsm_oinfo; i < stripe_count; i++,loi++) {
- if (loi->loi_ost_idx > max)
- max = loi->loi_ost_idx;
- }
- ost_count = max + 1;
}
/* XXX LOV STACKING call into osc for sizes */
- lmm_size = lov_mds_md_size(ost_count);
+ lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
if (!lmmp)
RETURN(lmm_size);
if (*lmmp && !lsm) {
- ost_count = le32_to_cpu ((*lmmp)->lmm_ost_count);
- OBD_FREE(*lmmp, lov_mds_md_size(ost_count));
+ stripe_count = le32_to_cpu((*lmmp)->lmm_stripe_count);
+ lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
+ OBD_FREE(*lmmp, lmm_size);
*lmmp = NULL;
RETURN(0);
}
RETURN(-ENOMEM);
}
- lmm = *lmmp;
- lmm->lmm_magic = cpu_to_le32 (LOV_MAGIC);
- lmm->lmm_ost_count = cpu_to_le16 (ost_count);
+ CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
+ lmm_magic, lmm_size);
+
+ lmmv1 = *lmmp;
+ lmmv3 = (struct lov_mds_md_v3 *)*lmmp;
+ if (lmm_magic == LOV_MAGIC_V3)
+ lmmv3->lmm_magic = cpu_to_le32(LOV_MAGIC_V3);
+ else
+ lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
if (!lsm)
RETURN(lmm_size);
- lmm->lmm_object_id = cpu_to_le64 (lsm->lsm_object_id);
- lmm->lmm_stripe_count = cpu_to_le16 (stripe_count);
- lmm->lmm_stripe_size = cpu_to_le32 (lsm->lsm_stripe_size);
- lmm->lmm_stripe_offset = cpu_to_le32 (lsm->lsm_stripe_offset);
-
- /* Only fill in the object ids which we are actually using.
- * Assumes lmm_objects is otherwise zero-filled. */
- for (i = 0, loi = lsm->lsm_oinfo; i < stripe_count; i++, loi++) {
- /* XXX call down to osc_packmd() to do the packing */
- LASSERT (loi->loi_id);
- lmm->lmm_objects[loi->loi_ost_idx].l_object_id =
- cpu_to_le64 (loi->loi_id);
+ /* lmmv1 and lmmv3 point to the same struct and have the
+ * same first fields
+ */
+ lmmv1->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
+ lmmv1->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
+ lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
+ lmmv1->lmm_stripe_count = cpu_to_le32(stripe_count);
+ lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
+ if (lsm->lsm_magic == LOV_MAGIC_V3) {
+ strncpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
+ LOV_MAXPOOLNAME);
+ lmm_objects = lmmv3->lmm_objects;
+ } else {
+ lmm_objects = lmmv1->lmm_objects;
+ }
+
+ for (i = 0; i < stripe_count; i++) {
+ struct lov_oinfo *loi = lsm->lsm_oinfo[i];
+
+ /* XXX LOV STACKING call down to osc_packmd() to do packing */
+ LASSERTF(loi->loi_id, "lmm_oid "LPU64" stripe %u/%u idx %u\n",
+ lmmv1->lmm_object_id, i, stripe_count, loi->loi_ost_idx);
+ lmm_objects[i].l_object_id = cpu_to_le64(loi->loi_id);
+ lmm_objects[i].l_object_gr = cpu_to_le64(loi->loi_gr);
+ lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
+ lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
}
RETURN(lmm_size);
}
-static int lov_get_stripecnt(struct lov_obd *lov, int stripe_count)
+/* Find the max stripecount we should use */
+int lov_get_stripecnt(struct lov_obd *lov, __u32 stripe_count)
{
if (!stripe_count)
stripe_count = lov->desc.ld_default_stripe_count;
- if (!stripe_count || stripe_count > lov->desc.ld_active_tgt_count)
+ if (stripe_count > lov->desc.ld_active_tgt_count)
stripe_count = lov->desc.ld_active_tgt_count;
+ if (!stripe_count)
+ stripe_count = 1;
+ /* for now, we limit the stripe count directly, when bug 4424 is
+ * fixed this needs to be somewhat dynamic based on whether ext3
+ * can handle larger EA sizes. */
+ if (stripe_count > LOV_MAX_STRIPE_COUNT)
+ stripe_count = LOV_MAX_STRIPE_COUNT;
return stripe_count;
}
+
+static int lov_verify_lmm(void *lmm, int lmm_bytes, int *stripe_count)
+{
+ int rc;
+
+ if (lsm_op_find(le32_to_cpu(*(__u32 *)lmm)) == NULL) {
+ char *buffer;
+ int sz;
+
+ CERROR("bad disk LOV MAGIC: 0x%08X; dumping LMM (size=%d):\n",
+ le32_to_cpu(*(__u32 *)lmm), lmm_bytes);
+ sz = lmm_bytes * 2 + 1;
+ OBD_ALLOC(buffer, sz);
+ if (buffer != NULL) {
+ int i;
+
+ for (i = 0; i < lmm_bytes; i++)
+ sprintf(buffer+2*i, "%.2X", ((char *)lmm)[i]);
+ buffer[sz] = '\0';
+ CERROR("%s\n", buffer);
+ OBD_FREE(buffer, sz);
+ }
+ return -EINVAL;
+ }
+ rc = lsm_op_find(le32_to_cpu(*(__u32 *)lmm))->lsm_lmm_verify(lmm,
+ lmm_bytes, stripe_count);
+ return rc;
+}
+
+int lov_alloc_memmd(struct lov_stripe_md **lsmp, int stripe_count,
+ int pattern, int magic)
+{
+ int i, lsm_size;
+ ENTRY;
+
+ CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
+
+ *lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
+ if (!*lsmp) {
+ CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
+ RETURN(-ENOMEM);
+ }
+
+ cfs_spin_lock_init(&(*lsmp)->lsm_lock);
+ (*lsmp)->lsm_magic = magic;
+ (*lsmp)->lsm_stripe_count = stripe_count;
+ (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
+ (*lsmp)->lsm_pattern = pattern;
+ (*lsmp)->lsm_pool_name[0] = '\0';
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+
+ for (i = 0; i < stripe_count; i++)
+ loi_init((*lsmp)->lsm_oinfo[i]);
+
+ RETURN(lsm_size);
+}
+
+void lov_free_memmd(struct lov_stripe_md **lsmp)
+{
+ struct lov_stripe_md *lsm = *lsmp;
+
+ LASSERT(lsm_op_find(lsm->lsm_magic) != NULL);
+ lsm_op_find(lsm->lsm_magic)->lsm_free(lsm);
+
+ *lsmp = NULL;
+}
+
+
/* Unpack LOV object metadata from disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*/
-int lov_unpackmd(struct lustre_handle *conn, struct lov_stripe_md **lsmp,
+int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
struct lov_mds_md *lmm, int lmm_bytes)
{
- struct obd_device *obd = class_conn2obd(conn);
+ struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
- struct lov_stripe_md *lsm;
- struct lov_oinfo *loi;
- int ost_count = 0;
- int ost_offset = 0;
- int stripe_count;
- int lsm_size;
- int i;
+ int rc = 0, stripe_count, lsm_size;
+ __u32 magic;
ENTRY;
+ /* If passed an MDS struct use values from there, otherwise defaults */
if (lmm) {
- if (lmm_bytes < sizeof (*lmm)) {
- CERROR("lov_mds_md too small: %d, need %d\n",
- lmm_bytes, (int)sizeof(*lmm));
- RETURN(-EINVAL);
- }
- if (le32_to_cpu (lmm->lmm_magic) != LOV_MAGIC) {
- CERROR("bad disk LOV MAGIC: %#08x != %#08x\n",
- le32_to_cpu (lmm->lmm_magic), LOV_MAGIC);
- RETURN(-EINVAL);
- }
-
- ost_count = le16_to_cpu (lmm->lmm_ost_count);
- stripe_count = le16_to_cpu (lmm->lmm_stripe_count);
-
- if (ost_count == 0 || stripe_count == 0) {
- CERROR ("zero ost %d or stripe %d count\n",
- ost_count, stripe_count);
- RETURN (-EINVAL);
- }
-
- if (lmm_bytes < lov_mds_md_size (ost_count)) {
- CERROR ("lov_mds_md too small: %d, need %d\n",
- lmm_bytes, lov_mds_md_size (ost_count));
- RETURN (-EINVAL);
- }
- } else
+ rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
+ if (rc)
+ RETURN(rc);
+ magic = le32_to_cpu(lmm->lmm_magic);
+ } else {
stripe_count = lov_get_stripecnt(lov, 0);
+ magic = LOV_MAGIC;
+ }
- /* XXX LOV STACKING call into osc for sizes */
- lsm_size = lov_stripe_md_size(stripe_count);
-
- if (!lsmp)
- RETURN(lsm_size);
-
+ /* If we aren't passed an lsmp struct, we just want the size */
+ if (!lsmp) {
+ /* XXX LOV STACKING call into osc for sizes */
+ LBUG();
+ RETURN(lov_stripe_md_size(stripe_count));
+ }
+ /* If we are passed an allocated struct but nothing to unpack, free */
if (*lsmp && !lmm) {
- stripe_count = (*lsmp)->lsm_stripe_count;
- OBD_FREE(*lsmp, lov_stripe_md_size(stripe_count));
- *lsmp = NULL;
+ lov_free_memmd(lsmp);
RETURN(0);
}
- if (!*lsmp) {
- OBD_ALLOC(*lsmp, lsm_size);
- if (!*lsmp)
- RETURN(-ENOMEM);
- }
-
- lsm = *lsmp;
- lsm->lsm_magic = LOV_MAGIC;
- lsm->lsm_stripe_count = stripe_count;
- lsm->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
+ lsm_size = lov_alloc_memmd(lsmp, stripe_count, LOV_PATTERN_RAID0,
+ magic);
+ if (lsm_size < 0)
+ RETURN(lsm_size);
+ /* If we are passed a pointer but nothing to unpack, we only alloc */
if (!lmm)
RETURN(lsm_size);
- lsm->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
- lsm->lsm_stripe_size = le32_to_cpu (lmm->lmm_stripe_size);
- ost_offset = lsm->lsm_stripe_offset = le32_to_cpu (lmm->lmm_stripe_offset);
-
- LMM_ASSERT(lsm->lsm_object_id);
- LMM_ASSERT(ost_count);
-
- for (i = 0, loi = lsm->lsm_oinfo; i < ost_count; i++, ost_offset++) {
- ost_offset %= ost_count;
-
- if (!lmm->lmm_objects[ost_offset].l_object_id)
- continue;
-
- LMM_ASSERT(loi - lsm->lsm_oinfo < stripe_count);
- /* XXX LOV STACKING call down to osc_unpackmd() */
- loi->loi_id =
- le64_to_cpu (lmm->lmm_objects[ost_offset].l_object_id);
- loi->loi_ost_idx = ost_offset;
- loi->loi_dirty_ot = &loi->loi_dirty_ot_inline;
- ot_init(loi->loi_dirty_ot);
- loi++;
+ LASSERT(lsm_op_find(magic) != NULL);
+ rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
+ if (rc) {
+ lov_free_memmd(lsmp);
+ RETURN(rc);
}
- LMM_ASSERT(loi - lsm->lsm_oinfo > 0);
- LMM_ASSERT(loi - lsm->lsm_oinfo == stripe_count);
RETURN(lsm_size);
}
-/* Configure object striping information on a new file.
- *
- * @lmmu is a pointer to a user struct with one or more of the fields set to
- * indicate the application preference: lmm_stripe_count, lmm_stripe_size,
- * lmm_stripe_offset, and lmm_stripe_pattern. lmm_magic must be LOV_MAGIC.
- * @lsmp is a pointer to an in-core stripe MD that needs to be filled in.
- */
-int lov_setstripe(struct lustre_handle *conn, struct lov_stripe_md **lsmp,
- struct lov_mds_md *lmmu)
+static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
+ struct lov_stripe_md **lsmp,
+ struct lov_user_md *lump)
{
- struct obd_device *obd = class_conn2obd(conn);
+ struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
- struct lov_mds_md lmm;
- struct lov_stripe_md *lsm;
+ char buffer[sizeof(struct lov_user_md_v3)];
+ struct lov_user_md_v3 *lumv3 = (struct lov_user_md_v3 *)&buffer[0];
+ struct lov_user_md_v1 *lumv1 = (struct lov_user_md_v1 *)&buffer[0];
+ int lmm_magic;
int stripe_count;
int rc;
ENTRY;
- rc = copy_from_user(&lmm, lmmu, sizeof(lmm));
- if (rc)
+ if (cfs_copy_from_user(lumv3, lump, sizeof(struct lov_user_md_v1)))
RETURN(-EFAULT);
- /* Bug 1185 FIXME: struct lov_mds_md is little-endian everywhere else */
+ lmm_magic = lumv1->lmm_magic;
+
+ if (lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1(lumv1);
+ lmm_magic = LOV_USER_MAGIC_V1;
+ } else if (lmm_magic == LOV_USER_MAGIC_V3) {
+ if (cfs_copy_from_user(lumv3, lump, sizeof(*lumv3)))
+ RETURN(-EFAULT);
+ } else if (lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
+ if (cfs_copy_from_user(lumv3, lump, sizeof(*lumv3)))
+ RETURN(-EFAULT);
+ lustre_swab_lov_user_md_v3(lumv3);
+ lmm_magic = LOV_USER_MAGIC_V3;
+ } else if (lmm_magic != LOV_USER_MAGIC_V1) {
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
+ RETURN(-EINVAL);
+ }
- if (lmm.lmm_magic != LOV_MAGIC) {
- CERROR("bad userland LOV MAGIC: %#08x != %#08x\n",
- lmm.lmm_magic, LOV_MAGIC);
- RETURN(-EINVAL);
+ /* in the rest of the tests, as *lumv1 and lumv3 have the same
+ * fields, we use lumv1 to avoid code duplication */
+
+ if (lumv1->lmm_pattern == 0) {
+ lumv1->lmm_pattern = lov->desc.ld_pattern ?
+ lov->desc.ld_pattern : LOV_PATTERN_RAID0;
}
-#if 0 /* the stripe_count/offset is "advisory", and it gets fixed later */
- if (lmm.lmm_stripe_count > lov->desc.ld_tgt_count &&
- lmm.lmm_stripe_count != 0xffffffff) {
- CERROR("stripe count %u more than OST count %d\n",
- lmm.lmm_stripe_count, lov->desc.ld_tgt_count);
+
+ if (lumv1->lmm_pattern != LOV_PATTERN_RAID0) {
+ CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
+ lumv1->lmm_pattern);
RETURN(-EINVAL);
}
- if (lmm.lmm_stripe_offset >= lov->desc.ld_tgt_count &&
- lmm.lmm_stripe_offset != 0xffffffff) {
- CERROR("stripe offset %u more than max OST index %d\n",
- lmm.lmm_stripe_offset, lov->desc.ld_tgt_count);
- RETURN(-EINVAL);
+
+ /* 64kB is the largest common page size we see (ia64), and matches the
+ * check in lfs */
+ if (lumv1->lmm_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
+ CDEBUG(D_IOCTL, "stripe size %u not multiple of %u, fixing\n",
+ lumv1->lmm_stripe_size, LOV_MIN_STRIPE_SIZE);
+ lumv1->lmm_stripe_size = LOV_MIN_STRIPE_SIZE;
}
-#endif
- if (lmm.lmm_stripe_size & (PAGE_SIZE - 1)) {
- CERROR("stripe size %u not multiple of %lu\n",
- lmm.lmm_stripe_size, PAGE_SIZE);
+
+ if ((lumv1->lmm_stripe_offset >= lov->desc.ld_tgt_count) &&
+ (lumv1->lmm_stripe_offset !=
+ (typeof(lumv1->lmm_stripe_offset))(-1))) {
+ CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
+ lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
RETURN(-EINVAL);
}
- stripe_count = lov_get_stripecnt(lov, lmm.lmm_stripe_count);
+ stripe_count = lov_get_stripecnt(lov, lumv1->lmm_stripe_count);
+
+ if (max_lmm_size) {
+ int max_stripes = (max_lmm_size -
+ lov_mds_md_size(0, lmm_magic)) /
+ sizeof(struct lov_ost_data_v1);
+ if (unlikely(max_stripes < stripe_count)) {
+ CDEBUG(D_IOCTL, "stripe count reset from %d to %d\n",
+ stripe_count, max_stripes);
+ stripe_count = max_stripes;
+ }
+ }
- if ((__u64)lmm.lmm_stripe_size * stripe_count > ~0UL) {
- CERROR("stripe width %ux%u > %lu on 32-bit system\n",
- lmm.lmm_stripe_size, (int)lmm.lmm_stripe_count, ~0UL);
- RETURN(-EINVAL);
+ if (lmm_magic == LOV_USER_MAGIC_V3) {
+ struct pool_desc *pool;
+
+ pool = lov_find_pool(lov, lumv3->lmm_pool_name);
+ if (pool != NULL) {
+ if (lumv3->lmm_stripe_offset !=
+ (typeof(lumv3->lmm_stripe_offset))(-1)) {
+ rc = lov_check_index_in_pool(
+ lumv3->lmm_stripe_offset, pool);
+ if (rc < 0) {
+ lov_pool_putref(pool);
+ RETURN(-EINVAL);
+ }
+ }
+
+ if (stripe_count > pool_tgt_count(pool))
+ stripe_count = pool_tgt_count(pool);
+
+ lov_pool_putref(pool);
+ }
}
- /* XXX LOV STACKING call into osc for sizes */
- OBD_ALLOC(lsm, lov_stripe_md_size(stripe_count));
- if (!lsm)
- RETURN(-ENOMEM);
+ rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);
- lsm->lsm_magic = LOV_MAGIC;
- lsm->lsm_stripe_count = stripe_count;
- lsm->lsm_stripe_offset = lmm.lmm_stripe_offset;
- lsm->lsm_stripe_size = lmm.lmm_stripe_size;
- lsm->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
+ if (rc >= 0) {
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = lumv1->lmm_stripe_offset;
+ (*lsmp)->lsm_stripe_size = lumv1->lmm_stripe_size;
+ if (lmm_magic == LOV_USER_MAGIC_V3)
+ strncpy((*lsmp)->lsm_pool_name, lumv3->lmm_pool_name,
+ LOV_MAXPOOLNAME);
+ rc = 0;
+ }
- *lsmp = lsm;
+ RETURN(rc);
+}
+/* Configure object striping information on a new file.
+ *
+ * @lmmu is a pointer to a user struct with one or more of the fields set to
+ * indicate the application preference: lmm_stripe_count, lmm_stripe_size,
+ * lmm_stripe_offset, and lmm_stripe_pattern. lmm_magic must be LOV_MAGIC.
+ * @lsmp is a pointer to an in-core stripe MD that needs to be filled in.
+ */
+int lov_setstripe(struct obd_export *exp, int max_lmm_size,
+ struct lov_stripe_md **lsmp, struct lov_user_md *lump)
+{
+ int rc;
+ mm_segment_t seg;
+
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+
+ rc = __lov_setstripe(exp, max_lmm_size, lsmp, lump);
+ set_fs(seg);
RETURN(rc);
}
+int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
+ struct lov_user_md *lump)
+{
+ int i;
+ int rc;
+ struct obd_export *oexp;
+ struct lov_obd *lov = &exp->exp_obd->u.lov;
+ obd_id last_id = 0;
+ struct lov_user_ost_data_v1 *lmm_objects;
+
+ ENTRY;
+
+ if (lump->lmm_magic == LOV_USER_MAGIC_V3)
+ lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects;
+ else
+ lmm_objects = lump->lmm_objects;
+
+ for (i = 0; i < lump->lmm_stripe_count; i++) {
+ __u32 len = sizeof(last_id);
+ oexp = lov->lov_tgts[lmm_objects[i].l_ost_idx]->ltd_exp;
+ rc = obd_get_info(oexp, sizeof(KEY_LAST_ID), KEY_LAST_ID,
+ &len, &last_id, NULL);
+ if (rc)
+ RETURN(rc);
+ if (lmm_objects[i].l_object_id > last_id) {
+ CERROR("Setting EA for object > than last id on "
+ "ost idx %d "LPD64" > "LPD64" \n",
+ lmm_objects[i].l_ost_idx,
+ lmm_objects[i].l_object_id, last_id);
+ RETURN(-EINVAL);
+ }
+ }
+
+ rc = lov_setstripe(exp, 0, lsmp, lump);
+ if (rc)
+ RETURN(rc);
+
+ for (i = 0; i < lump->lmm_stripe_count; i++) {
+ (*lsmp)->lsm_oinfo[i]->loi_ost_idx =
+ lmm_objects[i].l_ost_idx;
+ (*lsmp)->lsm_oinfo[i]->loi_id = lmm_objects[i].l_object_id;
+ (*lsmp)->lsm_oinfo[i]->loi_gr = lmm_objects[i].l_object_gr;
+ }
+ RETURN(0);
+}
+
+
/* Retrieve object striping information.
*
- * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
+ * @lump is a pointer to an in-core struct with lmm_ost_count indicating
* the maximum number of OST indices which will fit in the user buffer.
- * lmm_magic must be LOV_MAGIC.
+ * lmm_magic must be LOV_USER_MAGIC.
*/
-int lov_getstripe(struct lustre_handle *conn, struct lov_stripe_md *lsm,
- struct lov_mds_md *lmmu)
+int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
+ struct lov_user_md *lump)
{
- struct lov_mds_md lmm, *lmmk = NULL;
+ /*
+ * XXX huge struct allocated on stack.
+ */
+ /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
+ struct lov_user_md_v3 lum;
+ struct lov_mds_md *lmmk = NULL;
int rc, lmm_size;
+ int lum_size;
+ mm_segment_t seg;
ENTRY;
if (!lsm)
RETURN(-ENODATA);
- rc = copy_from_user(&lmm, lmmu, sizeof(lmm));
- if (rc)
- RETURN(-EFAULT);
-
- if (lmm.lmm_magic != LOV_MAGIC)
- RETURN(-EINVAL);
-
- rc = lov_packmd(conn, &lmmk, lsm);
+ /*
+ * "Switch to kernel segment" to allow copying from kernel space by
+ * copy_{to,from}_user().
+ */
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+
+ /* we only need the header part from user space to get lmm_magic and
+ * lmm_stripe_count, (the header part is common to v1 and v3) */
+ lum_size = sizeof(struct lov_user_md_v1);
+ if (cfs_copy_from_user(&lum, lump, lum_size))
+ GOTO(out_set, rc = -EFAULT);
+ else if ((lum.lmm_magic != LOV_USER_MAGIC) &&
+ (lum.lmm_magic != LOV_USER_MAGIC_V3))
+ GOTO(out_set, rc = -EINVAL);
+
+ if (lum.lmm_stripe_count &&
+ (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
+ /* Return right size of stripe to user */
+ lum.lmm_stripe_count = lsm->lsm_stripe_count;
+ rc = cfs_copy_to_user(lump, &lum, lum_size);
+ GOTO(out_set, rc = -EOVERFLOW);
+ }
+ rc = lov_packmd(exp, &lmmk, lsm);
if (rc < 0)
- RETURN(rc);
- /* Bug 1185 FIXME: convert lmmk to big-endian before copy to userspace */
+ GOTO(out_set, rc);
lmm_size = rc;
rc = 0;
+ /* FIXME: Bug 1185 - copy fields properly when structs change */
+ /* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
+ CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
+ CLASSERT(sizeof lum.lmm_objects[0] == sizeof lmmk->lmm_objects[0]);
+
+ if ((cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) &&
+ ((lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) ||
+ (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)))) {
+ lustre_swab_lov_mds_md(lmmk);
+ lustre_swab_lov_user_md_objects(
+ (struct lov_user_ost_data*)lmmk->lmm_objects,
+ lmmk->lmm_stripe_count);
+ }
+ if (lum.lmm_magic == LOV_USER_MAGIC) {
+ /* User request for v1, we need skip lmm_pool_name */
+ if (lmmk->lmm_magic == LOV_MAGIC_V3) {
+ memmove((char*)(&lmmk->lmm_stripe_count) +
+ sizeof(lmmk->lmm_stripe_count),
+ ((struct lov_mds_md_v3*)lmmk)->lmm_objects,
+ lmmk->lmm_stripe_count *
+ sizeof(struct lov_ost_data_v1));
+ lmm_size -= LOV_MAXPOOLNAME;
+ }
+ } else {
+ /* if v3 we just have to update the lum_size */
+ lum_size = sizeof(struct lov_user_md_v3);
+ }
+
/* User wasn't expecting this many OST entries */
- if (lmm.lmm_ost_count < lmmk->lmm_ost_count)
- rc = -EOVERFLOW;
- else if (copy_to_user(lmmu, lmmk, lmm_size))
+ if (lum.lmm_stripe_count == 0)
+ lmm_size = lum_size;
+ else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count)
+ GOTO(out_set, rc = -EOVERFLOW);
+ /*
+ * Have a difference between lov_mds_md & lov_user_md.
+ * So we have to re-order the data before copy to user.
+ */
+ lum.lmm_stripe_count = lmmk->lmm_stripe_count;
+ ((struct lov_user_md*)lmmk)->lmm_stripe_offset = 0;
+ ((struct lov_user_md*)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
+ if (cfs_copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
- obd_free_diskmd (conn, &lmmk);
-
+ obd_free_diskmd(exp, &lmmk);
+out_set:
+ set_fs(seg);
RETURN(rc);
}