4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/lov/lov_pack.c
34 * (Un)packing of OST/MDS requests
36 * Author: Andreas Dilger <adilger@clusterfs.com>
39 #define DEBUG_SUBSYSTEM S_LOV
41 #include <lustre_net.h>
42 #include <lustre_swab.h>
44 #include <obd_class.h>
45 #include <obd_support.h>
47 #include "lov_cl_internal.h"
48 #include "lov_internal.h"
50 void lov_dump_lmm_common(int level, void *lmmp)
52 struct lov_mds_md *lmm = lmmp;
55 lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
56 CDEBUG_LIMIT(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
57 POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
58 le32_to_cpu(lmm->lmm_pattern));
59 CDEBUG_LIMIT(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
60 le32_to_cpu(lmm->lmm_stripe_size),
61 le16_to_cpu(lmm->lmm_stripe_count),
62 le16_to_cpu(lmm->lmm_layout_gen));
65 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
70 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
72 "bad stripe_count %u > max_stripe_count %u\n",
73 stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
77 for (i = 0; i < stripe_count; ++i, ++lod) {
80 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
81 CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n", i,
82 le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
86 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
88 lov_dump_lmm_common(level, lmm);
89 lov_dump_lmm_objects(level, lmm->lmm_objects,
90 le16_to_cpu(lmm->lmm_stripe_count));
94 * Pack LOV striping metadata for disk storage format (in little
97 * This follows the getxattr() conventions. If \a buf_size is zero
98 * then return the size needed. If \a buf_size is too small then
99 * return -ERANGE. Otherwise return the size of the result.
101 static ssize_t lov_lsm_pack_v1v3(const struct lov_stripe_md *lsm, void *buf,
104 struct lov_mds_md_v1 *lmmv1 = buf;
105 struct lov_mds_md_v3 *lmmv3 = buf;
106 struct lov_ost_data_v1 *lmm_objects;
112 lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count,
117 if (buf_size < lmm_size)
121 * lmmv1 and lmmv3 point to the same struct and have the
124 lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
125 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
126 lmmv1->lmm_stripe_size = cpu_to_le32(
127 lsm->lsm_entries[0]->lsme_stripe_size);
128 lmmv1->lmm_stripe_count = cpu_to_le16(
129 lsm->lsm_entries[0]->lsme_stripe_count);
130 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_entries[0]->lsme_pattern);
131 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
133 if (lsm->lsm_magic == LOV_MAGIC_V3) {
134 BUILD_BUG_ON(sizeof(lsm->lsm_entries[0]->lsme_pool_name) !=
135 sizeof(lmmv3->lmm_pool_name));
136 strlcpy(lmmv3->lmm_pool_name,
137 lsm->lsm_entries[0]->lsme_pool_name,
138 sizeof(lmmv3->lmm_pool_name));
139 lmm_objects = lmmv3->lmm_objects;
141 lmm_objects = lmmv1->lmm_objects;
144 if (lsm->lsm_is_released)
147 for (i = 0; i < lsm->lsm_entries[0]->lsme_stripe_count; i++) {
148 struct lov_oinfo *loi = lsm->lsm_entries[0]->lsme_oinfo[i];
150 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
151 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
152 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
158 static ssize_t lov_lsm_pack_foreign(const struct lov_stripe_md *lsm, void *buf,
161 struct lov_foreign_md *lfm = buf;
164 lfm_size = lsm->lsm_foreign_size;
169 if (buf_size < lfm_size)
172 /* full foreign LOV is already avail in its cache
173 * no need to translate format fields to little-endian
175 memcpy(lfm, lsm_foreign(lsm), lsm->lsm_foreign_size);
180 ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
183 struct lov_comp_md_v1 *lcmv1 = buf;
184 struct lov_comp_md_entry_v1 *lcme;
185 struct lov_ost_data_v1 *lmm_objects;
194 if (lsm->lsm_magic == LOV_MAGIC_V1 || lsm->lsm_magic == LOV_MAGIC_V3)
195 return lov_lsm_pack_v1v3(lsm, buf, buf_size);
197 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
198 return lov_lsm_pack_foreign(lsm, buf, buf_size);
200 lmm_size = lov_comp_md_size(lsm);
204 if (buf_size < lmm_size)
207 lcmv1->lcm_magic = cpu_to_le32(lsm->lsm_magic);
208 lcmv1->lcm_size = cpu_to_le32(lmm_size);
209 lcmv1->lcm_layout_gen = cpu_to_le32(lsm->lsm_layout_gen);
210 lcmv1->lcm_flags = cpu_to_le16(lsm->lsm_flags);
211 lcmv1->lcm_mirror_count = cpu_to_le16(lsm->lsm_mirror_count);
212 lcmv1->lcm_entry_count = cpu_to_le16(lsm->lsm_entry_count);
214 offset = sizeof(*lcmv1) + sizeof(*lcme) * lsm->lsm_entry_count;
216 for (entry = 0; entry < lsm->lsm_entry_count; entry++) {
217 struct lov_stripe_md_entry *lsme;
218 struct lov_mds_md *lmm;
221 lsme = lsm->lsm_entries[entry];
222 lcme = &lcmv1->lcm_entries[entry];
224 lcme->lcme_id = cpu_to_le32(lsme->lsme_id);
225 lcme->lcme_flags = cpu_to_le32(lsme->lsme_flags);
226 if (lsme->lsme_flags & LCME_FL_NOSYNC)
227 lcme->lcme_timestamp =
228 cpu_to_le64(lsme->lsme_timestamp);
229 lcme->lcme_extent.e_start =
230 cpu_to_le64(lsme->lsme_extent.e_start);
231 lcme->lcme_extent.e_end =
232 cpu_to_le64(lsme->lsme_extent.e_end);
233 lcme->lcme_offset = cpu_to_le32(offset);
235 lmm = (struct lov_mds_md *)((char *)lcmv1 + offset);
236 lmm->lmm_magic = cpu_to_le32(lsme->lsme_magic);
237 /* lmm->lmm_oi not set */
238 lmm->lmm_pattern = cpu_to_le32(lsme->lsme_pattern);
239 lmm->lmm_stripe_size = cpu_to_le32(lsme->lsme_stripe_size);
240 lmm->lmm_stripe_count = cpu_to_le16(lsme->lsme_stripe_count);
241 lmm->lmm_layout_gen = cpu_to_le16(lsme->lsme_layout_gen);
243 if (lsme->lsme_magic == LOV_MAGIC_V3) {
244 struct lov_mds_md_v3 *lmmv3 =
245 (struct lov_mds_md_v3 *)lmm;
247 strlcpy(lmmv3->lmm_pool_name, lsme->lsme_pool_name,
248 sizeof(lmmv3->lmm_pool_name));
249 lmm_objects = lmmv3->lmm_objects;
252 ((struct lov_mds_md_v1 *)lmm)->lmm_objects;
255 if (lsme_inited(lsme) &&
256 !(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
257 stripe_count = lsme->lsme_stripe_count;
261 for (i = 0; i < stripe_count; i++) {
262 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
264 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
265 lmm_objects[i].l_ost_gen =
266 cpu_to_le32(loi->loi_ost_gen);
267 lmm_objects[i].l_ost_idx =
268 cpu_to_le32(loi->loi_ost_idx);
271 size = lov_mds_md_size(stripe_count, lsme->lsme_magic);
272 lcme->lcme_size = cpu_to_le32(size);
274 } /* for each layout component */
279 /* Find the max stripecount we should use */
280 __u16 lov_get_stripe_count(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
282 __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
285 stripe_count = lov->desc.ld_default_stripe_count;
286 if (stripe_count > lov->desc.ld_active_tgt_count)
287 stripe_count = lov->desc.ld_active_tgt_count;
292 * stripe count is based on whether ldiskfs can handle
295 if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
296 lov->lov_ocd.ocd_max_easize)
297 max_stripes = lov_mds_md_max_stripe_count(
298 lov->lov_ocd.ocd_max_easize, magic);
300 if (stripe_count > max_stripes)
301 stripe_count = max_stripes;
306 int lov_free_memmd(struct lov_stripe_md **lsmp)
308 struct lov_stripe_md *lsm = *lsmp;
312 refc = atomic_dec_return(&lsm->lsm_refc);
321 * Unpack LOV object metadata from disk storage. It is packed in LE byte
322 * order and is opaque to the networking layer.
324 struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, void *buf,
327 const struct lsm_operations *op;
328 struct lov_stripe_md *lsm;
333 if (buf_size < sizeof(magic))
334 RETURN(ERR_PTR(-EINVAL));
336 magic = le32_to_cpu(*(u32 *)buf);
337 op = lsm_op_find(magic);
339 RETURN(ERR_PTR(-EINVAL));
341 lsm = op->lsm_unpackmd(lov, buf, buf_size);
347 * Retrieve object striping information.
349 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
350 * the maximum number of OST indices which will fit in the user buffer.
351 * lmm_magic must be LOV_USER_MAGIC.
353 * If @size > 0, User specified limited buffer size, usually the buffer is from
354 * ll_lov_setstripe(), and the buffer can only hold basic layout template info.
356 int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
357 struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
360 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
361 struct lov_mds_md *lmmk, *lmm;
362 struct lov_foreign_md *lfm;
363 struct lov_user_md_v1 lum;
364 size_t lmmk_size, lum_size = 0;
371 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3 &&
372 lsm->lsm_magic != LOV_MAGIC_COMP_V1 &&
373 lsm->lsm_magic != LOV_MAGIC_FOREIGN) {
374 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
375 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
376 GOTO(out, rc = -EIO);
380 LCONSOLE_WARN("%s: using old ioctl(LL_IOC_LOV_GETSTRIPE) on "
381 DFID", use llapi_layout_get_by_path()\n",
383 PFID(&obj->lo_cl.co_lu.lo_header->loh_fid));
387 lmmk_size = lov_comp_md_size(lsm);
389 OBD_ALLOC_LARGE(lmmk, lmmk_size);
391 GOTO(out, rc = -ENOMEM);
393 lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
395 GOTO(out_free, rc = lmm_size);
397 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
398 if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
399 lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
400 lustre_swab_lov_mds_md(lmmk);
401 lustre_swab_lov_user_md_objects(
402 (struct lov_user_ost_data *)lmmk->lmm_objects,
403 lmmk->lmm_stripe_count);
404 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
405 lustre_swab_lov_comp_md_v1(
406 (struct lov_comp_md_v1 *)lmmk);
407 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_FOREIGN)) {
408 lfm = (struct lov_foreign_md *)lmmk;
409 __swab32s(&lfm->lfm_magic);
410 __swab32s(&lfm->lfm_length);
411 __swab32s(&lfm->lfm_type);
412 __swab32s(&lfm->lfm_flags);
417 * Legacy appication passes limited buffer, we need to figure out
418 * the user buffer size by the passed in lmm_stripe_count.
420 if (lsm->lsm_magic != LOV_MAGIC_FOREIGN)
421 if (copy_from_user(&lum, lump, sizeof(struct lov_user_md_v1)))
422 GOTO(out_free, rc = -EFAULT);
424 if (lum.lmm_magic == LOV_USER_MAGIC_V1 ||
425 lum.lmm_magic == LOV_USER_MAGIC_V3)
426 lum_size = lov_user_md_size(lum.lmm_stripe_count,
430 struct lov_mds_md *comp_md = lmmk;
433 * Legacy app (ADIO for instance) treats the layout as V1/V3
434 * blindly, we'd return a reasonable V1/V3 for them.
436 if (lmmk->lmm_magic == LOV_MAGIC_COMP_V1) {
437 struct lov_comp_md_v1 *comp_v1;
438 struct cl_object *cl_obj;
443 cl_obj = cl_object_top(&obj->lo_cl);
444 cl_object_attr_lock(cl_obj);
445 cl_object_attr_get(env, cl_obj, &attr);
446 cl_object_attr_unlock(cl_obj);
449 * return the last instantiated component if file size
450 * is non-zero, otherwise, return the last component.
452 comp_v1 = (struct lov_comp_md_v1 *)lmmk;
453 i = attr.cat_size == 0 ? comp_v1->lcm_entry_count : 0;
454 for (; i < comp_v1->lcm_entry_count; i++) {
455 if (!(comp_v1->lcm_entries[i].lcme_flags &
461 comp_md = (struct lov_mds_md *)((char *)comp_v1 +
462 comp_v1->lcm_entries[i].lcme_offset);
463 lum_size = comp_v1->lcm_entries[i].lcme_size;
467 lmm_size = min(lum_size, lmmk_size);
470 lmm_size = lmmk_size;
473 * User specified limited buffer size, usually the buffer is
474 * from ll_lov_setstripe(), and the buffer can only hold basic
475 * layout template info.
477 if (size == 0 || size > lmm_size)
479 if (copy_to_user(lump, lmm, size))
480 GOTO(out_free, rc = -EFAULT);
483 OBD_FREE_LARGE(lmmk, lmmk_size);