* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LOV
-#include <lustre/lustre_idl.h>
-#include <lustre/lustre_user.h>
-
#include <lustre_net.h>
#include <lustre_swab.h>
#include <obd.h>
void lov_dump_lmm_common(int level, void *lmmp)
{
struct lov_mds_md *lmm = lmmp;
- struct ost_id oi;
+ struct ost_id oi;
lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
- CDEBUG(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
- POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
- le32_to_cpu(lmm->lmm_pattern));
- CDEBUG(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
- le32_to_cpu(lmm->lmm_stripe_size),
- le16_to_cpu(lmm->lmm_stripe_count),
- le16_to_cpu(lmm->lmm_layout_gen));
+ CDEBUG_LIMIT(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
+ POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
+ le32_to_cpu(lmm->lmm_pattern));
+ CDEBUG_LIMIT(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
+ le32_to_cpu(lmm->lmm_stripe_size),
+ le16_to_cpu(lmm->lmm_stripe_count),
+ le16_to_cpu(lmm->lmm_layout_gen));
}
static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
- int stripe_count)
+ int stripe_count)
{
int i;
if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
- CDEBUG(level, "bad stripe_count %u > max_stripe_count %u\n",
- stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
+ CDEBUG_LIMIT(level,
+ "bad stripe_count %u > max_stripe_count %u\n",
+ stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
return;
}
for (i = 0; i < stripe_count; ++i, ++lod) {
- struct ost_id oi;
+ struct ost_id oi;
ostid_le_to_cpu(&lod->l_ost_oi, &oi);
- CDEBUG(level, "stripe %u idx %u subobj "DOSTID"\n", i,
- le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
+ CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n", i,
+ le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
}
}
void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
{
- lov_dump_lmm_common(level, lmm);
- lov_dump_lmm_objects(level, lmm->lmm_objects,
- le16_to_cpu(lmm->lmm_stripe_count));
+ lov_dump_lmm_common(level, lmm);
+ lov_dump_lmm_objects(level, lmm->lmm_objects,
+ le16_to_cpu(lmm->lmm_stripe_count));
}
void lov_dump_lmm_v3(int level, struct lov_mds_md_v3 *lmm)
{
- lov_dump_lmm_common(level, lmm);
- CDEBUG(level,"pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
- lov_dump_lmm_objects(level, lmm->lmm_objects,
- le16_to_cpu(lmm->lmm_stripe_count));
+ lov_dump_lmm_common(level, lmm);
+ CDEBUG_LIMIT(level, "pool_name "LOV_POOLNAMEF"\n", lmm->lmm_pool_name);
+ lov_dump_lmm_objects(level, lmm->lmm_objects,
+ le16_to_cpu(lmm->lmm_stripe_count));
}
void lov_dump_lmm(int level, void *lmm)
lov_dump_lmm_v3(level, (struct lov_mds_md_v3 *)lmm);
break;
default:
- CDEBUG(level, "unrecognized lmm_magic %x, assuming %x\n",
- magic, LOV_MAGIC_V1);
+ CDEBUG_LIMIT(level, "unrecognized lmm_magic %x, assuming %x\n",
+ magic, LOV_MAGIC_V1);
lov_dump_lmm_common(level, lmm);
break;
}
struct lov_ost_data_v1 *lmm_objects;
size_t lmm_size;
unsigned int i;
+
ENTRY;
lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count,
if (buf_size < lmm_size)
RETURN(-ERANGE);
- /* lmmv1 and lmmv3 point to the same struct and have the
+ /*
+ * lmmv1 and lmmv3 point to the same struct and have the
* same first fields
*/
lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
if (lsm->lsm_magic == LOV_MAGIC_V3) {
- CLASSERT(sizeof(lsm->lsm_entries[0]->lsme_pool_name) ==
- sizeof(lmmv3->lmm_pool_name));
+ BUILD_BUG_ON(sizeof(lsm->lsm_entries[0]->lsme_pool_name) !=
+ sizeof(lmmv3->lmm_pool_name));
strlcpy(lmmv3->lmm_pool_name,
lsm->lsm_entries[0]->lsme_pool_name,
sizeof(lmmv3->lmm_pool_name));
RETURN(lmm_size);
}
+ssize_t lov_lsm_pack_foreign(const struct lov_stripe_md *lsm, void *buf,
+ size_t buf_size)
+{
+ struct lov_foreign_md *lfm = buf;
+ size_t lfm_size;
+
+ lfm_size = lsm->lsm_foreign_size;
+
+ if (buf_size == 0)
+ RETURN(lfm_size);
+
+ if (buf_size < lfm_size)
+ RETURN(-ERANGE);
+
+ /* full foreign LOV is already avail in its cache
+ * no need to translate format fields to little-endian
+ */
+ memcpy(lfm, lsm_foreign(lsm), lsm->lsm_foreign_size);
+
+ RETURN(lfm_size);
+}
+
ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
size_t buf_size)
{
unsigned int offset;
unsigned int size;
unsigned int i;
+
ENTRY;
if (lsm->lsm_magic == LOV_MAGIC_V1 || lsm->lsm_magic == LOV_MAGIC_V3)
return lov_lsm_pack_v1v3(lsm, buf, buf_size);
+ if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
+ return lov_lsm_pack_foreign(lsm, buf, buf_size);
+
lmm_size = lov_comp_md_size(lsm);
if (buf_size == 0)
RETURN(lmm_size);
lcmv1->lcm_magic = cpu_to_le32(lsm->lsm_magic);
lcmv1->lcm_size = cpu_to_le32(lmm_size);
lcmv1->lcm_layout_gen = cpu_to_le32(lsm->lsm_layout_gen);
+ lcmv1->lcm_flags = cpu_to_le16(lsm->lsm_flags);
+ lcmv1->lcm_mirror_count = cpu_to_le16(lsm->lsm_mirror_count);
lcmv1->lcm_entry_count = cpu_to_le16(lsm->lsm_entry_count);
offset = sizeof(*lcmv1) + sizeof(*lcme) * lsm->lsm_entry_count;
for (entry = 0; entry < lsm->lsm_entry_count; entry++) {
struct lov_stripe_md_entry *lsme;
struct lov_mds_md *lmm;
- __u16 stripecnt;
+ __u16 stripe_count;
lsme = lsm->lsm_entries[entry];
lcme = &lcmv1->lcm_entries[entry];
lcme->lcme_id = cpu_to_le32(lsme->lsme_id);
lcme->lcme_flags = cpu_to_le32(lsme->lsme_flags);
+ if (lsme->lsme_flags & LCME_FL_NOSYNC)
+ lcme->lcme_timestamp =
+ cpu_to_le64(lsme->lsme_timestamp);
lcme->lcme_extent.e_start =
cpu_to_le64(lsme->lsme_extent.e_start);
lcme->lcme_extent.e_end =
if (lsme_inited(lsme) &&
!(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
- stripecnt = lsme->lsme_stripe_count;
+ stripe_count = lsme->lsme_stripe_count;
else
- stripecnt = 0;
+ stripe_count = 0;
- for (i = 0; i < stripecnt; i++) {
+ for (i = 0; i < stripe_count; i++) {
struct lov_oinfo *loi = lsme->lsme_oinfo[i];
ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
cpu_to_le32(loi->loi_ost_idx);
}
- size = lov_mds_md_size(stripecnt, lsme->lsme_magic);
+ size = lov_mds_md_size(stripe_count, lsme->lsme_magic);
lcme->lcme_size = cpu_to_le32(size);
offset += size;
} /* for each layout component */
}
/* Find the max stripecount we should use */
-__u16 lov_get_stripecnt(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
+__u16 lov_get_stripe_count(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
{
- __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
-
- if (!stripe_count)
- stripe_count = lov->desc.ld_default_stripe_count;
- if (stripe_count > lov->desc.ld_active_tgt_count)
- stripe_count = lov->desc.ld_active_tgt_count;
- if (!stripe_count)
- stripe_count = 1;
-
- /* stripe count is based on whether ldiskfs can handle
- * larger EA sizes */
- if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
- lov->lov_ocd.ocd_max_easize)
+ __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
+
+ if (!stripe_count)
+ stripe_count = lov->desc.ld_default_stripe_count;
+ if (stripe_count > lov->desc.ld_active_tgt_count)
+ stripe_count = lov->desc.ld_active_tgt_count;
+ if (!stripe_count)
+ stripe_count = 1;
+
+ /*
+ * stripe count is based on whether ldiskfs can handle
+ * larger EA sizes
+ */
+ if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
+ lov->lov_ocd.ocd_max_easize)
max_stripes = lov_mds_md_max_stripe_count(
lov->lov_ocd.ocd_max_easize, magic);
- if (stripe_count > max_stripes)
- stripe_count = max_stripes;
+ if (stripe_count > max_stripes)
+ stripe_count = max_stripes;
- return stripe_count;
+ return stripe_count;
}
int lov_free_memmd(struct lov_stripe_md **lsmp)
return refc;
}
-/* Unpack LOV object metadata from disk storage. It is packed in LE byte
+/*
+ * Unpack LOV object metadata from disk storage. It is packed in LE byte
* order and is opaque to the networking layer.
*/
struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, void *buf,
const struct lsm_operations *op;
struct lov_stripe_md *lsm;
u32 magic;
+
ENTRY;
if (buf_size < sizeof(magic))
magic = le32_to_cpu(*(u32 *)buf);
op = lsm_op_find(magic);
- if (op == NULL)
+ if (!op)
RETURN(ERR_PTR(-EINVAL));
- lsm = lsm_op_find(magic)->lsm_unpackmd(lov, buf, buf_size);
+ lsm = op->lsm_unpackmd(lov, buf, buf_size);
RETURN(lsm);
}
-/* Retrieve object striping information.
+/*
+ * Retrieve object striping information.
*
* @lump is a pointer to an in-core struct with lmm_ost_count indicating
* the maximum number of OST indices which will fit in the user buffer.
* lmm_magic must be LOV_USER_MAGIC.
+ *
+ * If @size > 0, User specified limited buffer size, usually the buffer is from
+ * ll_lov_setstripe(), and the buffer can only hold basic layout template info.
*/
-int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
- struct lov_user_md __user *lump)
+int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
+ struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
+ size_t size)
{
/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
- struct lov_mds_md *lmmk;
- size_t lmmk_size;
- ssize_t lmm_size;
- int rc = 0;
+ struct lov_mds_md *lmmk, *lmm;
+ struct lov_foreign_md *lfm;
+ struct lov_user_md_v1 lum;
+ size_t lmmk_size, lum_size = 0;
+ ssize_t lmm_size;
+ static bool printed;
+ int rc = 0;
+
ENTRY;
if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3 &&
- lsm->lsm_magic != LOV_MAGIC_COMP_V1) {
+ lsm->lsm_magic != LOV_MAGIC_COMP_V1 &&
+ lsm->lsm_magic != LOV_MAGIC_FOREIGN) {
CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
GOTO(out, rc = -EIO);
}
+ if (!printed) {
+ LCONSOLE_WARN("%s: using old ioctl(LL_IOC_LOV_GETSTRIPE) on "
+ DFID", use llapi_layout_get_by_path()\n",
+ current->comm,
+ PFID(&obj->lo_cl.co_lu.lo_header->loh_fid));
+ printed = true;
+ }
+
lmmk_size = lov_comp_md_size(lsm);
OBD_ALLOC_LARGE(lmmk, lmmk_size);
- if (lmmk == NULL)
+ if (!lmmk)
GOTO(out, rc = -ENOMEM);
lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
} else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
lustre_swab_lov_comp_md_v1(
(struct lov_comp_md_v1 *)lmmk);
+ } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_FOREIGN)) {
+ lfm = (struct lov_foreign_md *)lmmk;
+ __swab32s(&lfm->lfm_magic);
+ __swab32s(&lfm->lfm_length);
+ __swab32s(&lfm->lfm_type);
+ __swab32s(&lfm->lfm_flags);
}
}
- if (copy_to_user(lump, lmmk, lmmk_size))
+ /*
+ * Legacy appication passes limited buffer, we need to figure out
+ * the user buffer size by the passed in lmm_stripe_count.
+ */
+ if (lsm->lsm_magic != LOV_MAGIC_FOREIGN)
+ if (copy_from_user(&lum, lump, sizeof(struct lov_user_md_v1)))
+ GOTO(out_free, rc = -EFAULT);
+
+ if (lum.lmm_magic == LOV_USER_MAGIC_V1 ||
+ lum.lmm_magic == LOV_USER_MAGIC_V3)
+ lum_size = lov_user_md_size(lum.lmm_stripe_count,
+ lum.lmm_magic);
+
+ if (lum_size != 0) {
+ struct lov_mds_md *comp_md = lmmk;
+
+ /*
+ * Legacy app (ADIO for instance) treats the layout as V1/V3
+ * blindly, we'd return a reasonable V1/V3 for them.
+ */
+ if (lmmk->lmm_magic == LOV_MAGIC_COMP_V1) {
+ struct lov_comp_md_v1 *comp_v1;
+ struct cl_object *cl_obj;
+ struct cl_attr attr;
+ int i;
+
+ attr.cat_size = 0;
+ cl_obj = cl_object_top(&obj->lo_cl);
+ cl_object_attr_lock(cl_obj);
+ cl_object_attr_get(env, cl_obj, &attr);
+ cl_object_attr_unlock(cl_obj);
+
+ /*
+ * return the last instantiated component if file size
+ * is non-zero, otherwise, return the last component.
+ */
+ comp_v1 = (struct lov_comp_md_v1 *)lmmk;
+ i = attr.cat_size == 0 ? comp_v1->lcm_entry_count : 0;
+ for (; i < comp_v1->lcm_entry_count; i++) {
+ if (!(comp_v1->lcm_entries[i].lcme_flags &
+ LCME_FL_INIT))
+ break;
+ }
+ if (i > 0)
+ i--;
+ comp_md = (struct lov_mds_md *)((char *)comp_v1 +
+ comp_v1->lcm_entries[i].lcme_offset);
+ lum_size = comp_v1->lcm_entries[i].lcme_size;
+ }
+
+ lmm = comp_md;
+ lmm_size = min(lum_size, lmmk_size);
+ } else {
+ lmm = lmmk;
+ lmm_size = lmmk_size;
+ }
+ /**
+ * User specified limited buffer size, usually the buffer is
+ * from ll_lov_setstripe(), and the buffer can only hold basic
+ * layout template info.
+ */
+ if (size == 0 || size > lmm_size)
+ size = lmm_size;
+ if (copy_to_user(lump, lmm, size))
GOTO(out_free, rc = -EFAULT);
out_free: