1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2011, 2017, Intel Corporation.
11 * This file is part of Lustre, http://www.lustre.org/
13 * (Un)packing of OST/MDS requests
15 * Author: Andreas Dilger <adilger@clusterfs.com>
18 #define DEBUG_SUBSYSTEM S_LOV
20 #include <lustre_net.h>
21 #include <lustre_swab.h>
23 #include <obd_class.h>
24 #include <obd_support.h>
26 #include "lov_cl_internal.h"
27 #include "lov_internal.h"
29 void lov_dump_lmm_common(int level, void *lmmp)
31 struct lov_mds_md *lmm = lmmp;
34 lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
35 CDEBUG_LIMIT(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
36 POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
37 le32_to_cpu(lmm->lmm_pattern));
38 CDEBUG_LIMIT(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
39 le32_to_cpu(lmm->lmm_stripe_size),
40 le16_to_cpu(lmm->lmm_stripe_count),
41 le16_to_cpu(lmm->lmm_layout_gen));
44 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
49 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
51 "bad stripe_count %u > max_stripe_count %u\n",
52 stripe_count, LOV_V1_INSANE_STRIPE_INDEX);
56 for (i = 0; i < stripe_count; ++i, ++lod) {
59 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
60 CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n", i,
61 le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
65 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
67 lov_dump_lmm_common(level, lmm);
68 lov_dump_lmm_objects(level, lmm->lmm_objects,
69 le16_to_cpu(lmm->lmm_stripe_count));
73 * Pack LOV striping metadata for disk storage format (in little
76 * This follows the getxattr() conventions. If \a buf_size is zero
77 * then return the size needed. If \a buf_size is too small then
78 * return -ERANGE. Otherwise return the size of the result.
80 static ssize_t lov_lsm_pack_v1v3(const struct lov_stripe_md *lsm, void *buf,
83 struct lov_mds_md_v1 *lmmv1 = buf;
84 struct lov_mds_md_v3 *lmmv3 = buf;
85 struct lov_ost_data_v1 *lmm_objects;
91 lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count,
96 if (buf_size < lmm_size)
100 * lmmv1 and lmmv3 point to the same struct and have the
103 lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
104 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
105 lmmv1->lmm_stripe_size = cpu_to_le32(
106 lsm->lsm_entries[0]->lsme_stripe_size);
107 lmmv1->lmm_stripe_count = cpu_to_le16(
108 lsm->lsm_entries[0]->lsme_stripe_count);
109 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_entries[0]->lsme_pattern);
110 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
112 if (lsm->lsm_magic == LOV_MAGIC_V3) {
113 BUILD_BUG_ON(sizeof(lsm->lsm_entries[0]->lsme_pool_name) !=
114 sizeof(lmmv3->lmm_pool_name));
115 strscpy(lmmv3->lmm_pool_name,
116 lsm->lsm_entries[0]->lsme_pool_name,
117 sizeof(lmmv3->lmm_pool_name));
118 lmm_objects = lmmv3->lmm_objects;
120 lmm_objects = lmmv1->lmm_objects;
123 if (lsm->lsm_is_released)
126 for (i = 0; i < lsm->lsm_entries[0]->lsme_stripe_count; i++) {
127 struct lov_oinfo *loi = lsm->lsm_entries[0]->lsme_oinfo[i];
129 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
130 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
131 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
137 static ssize_t lov_lsm_pack_foreign(const struct lov_stripe_md *lsm, void *buf,
140 struct lov_foreign_md *lfm = buf;
143 lfm_size = lsm->lsm_foreign_size;
148 /* if buffer too small return ERANGE but copy the size the
149 * caller has requested anyway. This may be useful to get
150 * only the header without the need to alloc the full size
152 if (buf_size < lfm_size) {
153 memcpy(lfm, lsm_foreign(lsm), buf_size);
157 /* full foreign LOV is already avail in its cache
158 * no need to translate format fields to little-endian
160 memcpy(lfm, lsm_foreign(lsm), lsm->lsm_foreign_size);
165 static unsigned int lov_lsme_pack_foreign(struct lov_stripe_md_entry *lsme,
168 struct lov_foreign_md *lfm = (struct lov_foreign_md *)lmm;
170 lfm->lfm_magic = cpu_to_le32(lsme->lsme_magic);
171 lfm->lfm_length = cpu_to_le32(lsme->lsme_length);
172 lfm->lfm_type = cpu_to_le32(lsme->lsme_type);
173 lfm->lfm_flags = cpu_to_le32(lsme->lsme_foreign_flags);
175 /* TODO: support for foreign layout other than HSM, i.e. DAOS. */
176 if (lov_hsm_type_supported(lsme->lsme_type))
177 lov_foreign_hsm_to_le(lfm, &lsme->lsme_hsm);
179 return lov_foreign_md_size(lsme->lsme_length);
182 static unsigned int lov_lsme_pack_v1v3(struct lov_stripe_md_entry *lsme,
183 struct lov_mds_md *lmm)
185 struct lov_ost_data_v1 *lmm_objects;
189 lmm->lmm_magic = cpu_to_le32(lsme->lsme_magic);
190 /* lmm->lmm_oi not set */
191 lmm->lmm_pattern = cpu_to_le32(lsme->lsme_pattern);
192 lmm->lmm_stripe_size = cpu_to_le32(lsme->lsme_stripe_size);
193 lmm->lmm_stripe_count = cpu_to_le16(lsme->lsme_stripe_count);
194 lmm->lmm_layout_gen = cpu_to_le16(lsme->lsme_layout_gen);
196 if (lsme->lsme_magic == LOV_MAGIC_V3) {
197 struct lov_mds_md_v3 *lmmv3 = (struct lov_mds_md_v3 *)lmm;
199 strscpy(lmmv3->lmm_pool_name, lsme->lsme_pool_name,
200 sizeof(lmmv3->lmm_pool_name));
201 lmm_objects = lmmv3->lmm_objects;
203 lmm_objects = ((struct lov_mds_md_v1 *)lmm)->lmm_objects;
206 if (lsme_inited(lsme) && !(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
207 stripe_count = lsme->lsme_stripe_count;
211 for (i = 0; i < stripe_count; i++) {
212 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
214 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
215 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
216 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
219 return lov_mds_md_size(stripe_count, lsme->lsme_magic);
222 ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
225 struct lov_comp_md_v1 *lcmv1 = buf;
226 struct lov_comp_md_entry_v1 *lcme;
234 if (lsm->lsm_magic == LOV_MAGIC_V1 || lsm->lsm_magic == LOV_MAGIC_V3)
235 return lov_lsm_pack_v1v3(lsm, buf, buf_size);
237 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
238 return lov_lsm_pack_foreign(lsm, buf, buf_size);
240 lmm_size = lov_comp_md_size(lsm);
244 if (buf_size < lmm_size)
247 lcmv1->lcm_magic = cpu_to_le32(lsm->lsm_magic);
248 lcmv1->lcm_size = cpu_to_le32(lmm_size);
249 lcmv1->lcm_layout_gen = cpu_to_le32(lsm->lsm_layout_gen);
250 lcmv1->lcm_flags = cpu_to_le16(lsm->lsm_flags);
251 lcmv1->lcm_mirror_count = cpu_to_le16(lsm->lsm_mirror_count);
252 lcmv1->lcm_entry_count = cpu_to_le16(lsm->lsm_entry_count);
254 offset = sizeof(*lcmv1) + sizeof(*lcme) * lsm->lsm_entry_count;
256 for (entry = 0; entry < lsm->lsm_entry_count; entry++) {
257 struct lov_stripe_md_entry *lsme;
258 struct lov_mds_md *lmm;
260 lsme = lsm->lsm_entries[entry];
261 lcme = &lcmv1->lcm_entries[entry];
263 lcme->lcme_id = cpu_to_le32(lsme->lsme_id);
264 lcme->lcme_flags = cpu_to_le32(lsme->lsme_flags);
265 if (lsme->lsme_flags & LCME_FL_NOSYNC)
266 lcme->lcme_timestamp =
267 cpu_to_le64(lsme->lsme_timestamp);
268 lcme->lcme_extent.e_start =
269 cpu_to_le64(lsme->lsme_extent.e_start);
270 lcme->lcme_extent.e_end =
271 cpu_to_le64(lsme->lsme_extent.e_end);
272 lcme->lcme_offset = cpu_to_le32(offset);
274 lmm = (struct lov_mds_md *)((char *)lcmv1 + offset);
275 if (lsme->lsme_magic == LOV_MAGIC_FOREIGN)
276 size = lov_lsme_pack_foreign(lsme, lmm);
278 size = lov_lsme_pack_v1v3(lsme, lmm);
279 lcme->lcme_size = cpu_to_le32(size);
281 } /* for each layout component */
286 /* Find the max stripecount we should use */
287 __u16 lov_get_stripe_count(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
289 struct lu_tgt_descs *ltd = &lov->lov_ost_descs;
290 u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
293 stripe_count = ltd->ltd_lov_desc.ld_default_stripe_count;
294 if (stripe_count > ltd->ltd_lov_desc.ld_active_tgt_count)
295 stripe_count = ltd->ltd_lov_desc.ld_active_tgt_count;
300 * stripe count is based on whether ldiskfs can handle
303 if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
304 lov->lov_ocd.ocd_max_easize)
305 max_stripes = lov_mds_md_max_stripe_count(
306 lov->lov_ocd.ocd_max_easize, magic);
308 if (stripe_count > max_stripes)
309 stripe_count = max_stripes;
314 void lov_free_memmd(struct lov_stripe_md **lsmp)
316 struct lov_stripe_md *lsm = *lsmp;
319 kref_put(&lsm->lsm_refc, lsm_free);
323 * Unpack LOV object metadata from disk storage. It is packed in LE byte
324 * order and is opaque to the networking layer.
326 struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, void *buf,
329 const struct lsm_operations *op;
330 struct lov_stripe_md *lsm;
335 if (buf_size < sizeof(magic))
336 RETURN(ERR_PTR(-EINVAL));
338 magic = le32_to_cpu(*(u32 *)buf);
339 op = lsm_op_find(magic);
341 RETURN(ERR_PTR(-EINVAL));
343 lsm = op->lsm_unpackmd(lov, buf, buf_size);
349 * Retrieve object striping information.
351 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
352 * the maximum number of OST indices which will fit in the user buffer.
353 * lmm_magic must be LOV_USER_MAGIC.
355 * If @size > 0, User specified limited buffer size, usually the buffer is from
356 * ll_lov_setstripe(), and the buffer can only hold basic layout template info.
358 int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
359 struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
362 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
363 struct lov_mds_md *lmmk, *lmm;
364 struct lov_foreign_md *lfm;
365 struct lov_user_md_v1 lum;
366 size_t lmmk_size, lum_size = 0;
372 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3 &&
373 lsm->lsm_magic != LOV_MAGIC_COMP_V1 &&
374 lsm->lsm_magic != LOV_MAGIC_FOREIGN) {
375 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
376 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
377 GOTO(out, rc = -EIO);
380 lmmk_size = lov_comp_md_size(lsm);
382 OBD_ALLOC_LARGE(lmmk, lmmk_size);
384 GOTO(out, rc = -ENOMEM);
386 lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
388 GOTO(out_free, rc = lmm_size);
390 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
391 if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
392 lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
393 lustre_swab_lov_mds_md(lmmk);
394 lustre_swab_lov_user_md_objects(
395 (struct lov_user_ost_data *)lmmk->lmm_objects,
396 lmmk->lmm_stripe_count);
397 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
398 lustre_swab_lov_comp_md_v1(
399 (struct lov_comp_md_v1 *)lmmk);
400 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_FOREIGN)) {
401 lfm = (struct lov_foreign_md *)lmmk;
402 __swab32s(&lfm->lfm_magic);
403 __swab32s(&lfm->lfm_length);
404 __swab32s(&lfm->lfm_type);
405 __swab32s(&lfm->lfm_flags);
410 * Legacy appication passes limited buffer, we need to figure out
411 * the user buffer size by the passed in lmm_stripe_count.
413 if (lsm->lsm_magic != LOV_MAGIC_FOREIGN)
414 if (copy_from_user(&lum, lump, sizeof(struct lov_user_md_v1)))
415 GOTO(out_free, rc = -EFAULT);
417 if (lum.lmm_magic == LOV_USER_MAGIC_V1 ||
418 lum.lmm_magic == LOV_USER_MAGIC_V3)
419 lum_size = lov_user_md_size(lum.lmm_stripe_count,
423 struct lov_mds_md *comp_md = lmmk;
426 * Legacy app (ADIO for instance) treats the layout as V1/V3
427 * blindly, we'd return a reasonable V1/V3 for them.
429 if (lmmk->lmm_magic == LOV_MAGIC_COMP_V1) {
430 struct lov_comp_md_v1 *comp_v1;
431 struct cl_object *cl_obj;
436 cl_obj = cl_object_top(&obj->lo_cl);
437 cl_object_attr_lock(cl_obj);
438 cl_object_attr_get(env, cl_obj, &attr);
439 cl_object_attr_unlock(cl_obj);
442 * return the last instantiated component if file size
443 * is non-zero, otherwise, return the last component.
445 comp_v1 = (struct lov_comp_md_v1 *)lmmk;
446 i = attr.cat_size == 0 ? comp_v1->lcm_entry_count : 0;
447 for (; i < comp_v1->lcm_entry_count; i++) {
448 if (!(comp_v1->lcm_entries[i].lcme_flags &
454 comp_md = (struct lov_mds_md *)((char *)comp_v1 +
455 comp_v1->lcm_entries[i].lcme_offset);
456 lum_size = comp_v1->lcm_entries[i].lcme_size;
460 lmm_size = min(lum_size, lmmk_size);
463 lmm_size = lmmk_size;
467 * User specified limited buffer size, usually the buffer is
468 * from ll_lov_setstripe(), and the buffer can only hold basic
469 * layout template info.
471 if (size == 0 || size > lmm_size)
473 if (copy_to_user(lump, lmm, size))
474 GOTO(out_free, rc = -EFAULT);
477 OBD_FREE_LARGE(lmmk, lmmk_size);