4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/lov/lov_pack.c
33 * (Un)packing of OST/MDS requests
35 * Author: Andreas Dilger <adilger@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_LOV
40 #include <lustre_net.h>
41 #include <lustre_swab.h>
43 #include <obd_class.h>
44 #include <obd_support.h>
46 #include "lov_cl_internal.h"
47 #include "lov_internal.h"
49 void lov_dump_lmm_common(int level, void *lmmp)
51 struct lov_mds_md *lmm = lmmp;
54 lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
55 CDEBUG_LIMIT(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
56 POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
57 le32_to_cpu(lmm->lmm_pattern));
58 CDEBUG_LIMIT(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
59 le32_to_cpu(lmm->lmm_stripe_size),
60 le16_to_cpu(lmm->lmm_stripe_count),
61 le16_to_cpu(lmm->lmm_layout_gen));
64 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
69 if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
71 "bad stripe_count %u > max_stripe_count %u\n",
72 stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
76 for (i = 0; i < stripe_count; ++i, ++lod) {
79 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
80 CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n", i,
81 le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
85 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
87 lov_dump_lmm_common(level, lmm);
88 lov_dump_lmm_objects(level, lmm->lmm_objects,
89 le16_to_cpu(lmm->lmm_stripe_count));
93 * Pack LOV striping metadata for disk storage format (in little
96 * This follows the getxattr() conventions. If \a buf_size is zero
97 * then return the size needed. If \a buf_size is too small then
98 * return -ERANGE. Otherwise return the size of the result.
100 static ssize_t lov_lsm_pack_v1v3(const struct lov_stripe_md *lsm, void *buf,
103 struct lov_mds_md_v1 *lmmv1 = buf;
104 struct lov_mds_md_v3 *lmmv3 = buf;
105 struct lov_ost_data_v1 *lmm_objects;
111 lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count,
116 if (buf_size < lmm_size)
120 * lmmv1 and lmmv3 point to the same struct and have the
123 lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
124 lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
125 lmmv1->lmm_stripe_size = cpu_to_le32(
126 lsm->lsm_entries[0]->lsme_stripe_size);
127 lmmv1->lmm_stripe_count = cpu_to_le16(
128 lsm->lsm_entries[0]->lsme_stripe_count);
129 lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_entries[0]->lsme_pattern);
130 lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
132 if (lsm->lsm_magic == LOV_MAGIC_V3) {
133 BUILD_BUG_ON(sizeof(lsm->lsm_entries[0]->lsme_pool_name) !=
134 sizeof(lmmv3->lmm_pool_name));
135 strlcpy(lmmv3->lmm_pool_name,
136 lsm->lsm_entries[0]->lsme_pool_name,
137 sizeof(lmmv3->lmm_pool_name));
138 lmm_objects = lmmv3->lmm_objects;
140 lmm_objects = lmmv1->lmm_objects;
143 if (lsm->lsm_is_released)
146 for (i = 0; i < lsm->lsm_entries[0]->lsme_stripe_count; i++) {
147 struct lov_oinfo *loi = lsm->lsm_entries[0]->lsme_oinfo[i];
149 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
150 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
151 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
157 static ssize_t lov_lsm_pack_foreign(const struct lov_stripe_md *lsm, void *buf,
160 struct lov_foreign_md *lfm = buf;
163 lfm_size = lsm->lsm_foreign_size;
168 /* if buffer too small return ERANGE but copy the size the
169 * caller has requested anyway. This may be useful to get
170 * only the header without the need to alloc the full size
172 if (buf_size < lfm_size) {
173 memcpy(lfm, lsm_foreign(lsm), buf_size);
177 /* full foreign LOV is already avail in its cache
178 * no need to translate format fields to little-endian
180 memcpy(lfm, lsm_foreign(lsm), lsm->lsm_foreign_size);
185 ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
188 struct lov_comp_md_v1 *lcmv1 = buf;
189 struct lov_comp_md_entry_v1 *lcme;
190 struct lov_ost_data_v1 *lmm_objects;
199 if (lsm->lsm_magic == LOV_MAGIC_V1 || lsm->lsm_magic == LOV_MAGIC_V3)
200 return lov_lsm_pack_v1v3(lsm, buf, buf_size);
202 if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
203 return lov_lsm_pack_foreign(lsm, buf, buf_size);
205 lmm_size = lov_comp_md_size(lsm);
209 if (buf_size < lmm_size)
212 lcmv1->lcm_magic = cpu_to_le32(lsm->lsm_magic);
213 lcmv1->lcm_size = cpu_to_le32(lmm_size);
214 lcmv1->lcm_layout_gen = cpu_to_le32(lsm->lsm_layout_gen);
215 lcmv1->lcm_flags = cpu_to_le16(lsm->lsm_flags);
216 lcmv1->lcm_mirror_count = cpu_to_le16(lsm->lsm_mirror_count);
217 lcmv1->lcm_entry_count = cpu_to_le16(lsm->lsm_entry_count);
219 offset = sizeof(*lcmv1) + sizeof(*lcme) * lsm->lsm_entry_count;
221 for (entry = 0; entry < lsm->lsm_entry_count; entry++) {
222 struct lov_stripe_md_entry *lsme;
223 struct lov_mds_md *lmm;
226 lsme = lsm->lsm_entries[entry];
227 lcme = &lcmv1->lcm_entries[entry];
229 lcme->lcme_id = cpu_to_le32(lsme->lsme_id);
230 lcme->lcme_flags = cpu_to_le32(lsme->lsme_flags);
231 if (lsme->lsme_flags & LCME_FL_NOSYNC)
232 lcme->lcme_timestamp =
233 cpu_to_le64(lsme->lsme_timestamp);
234 lcme->lcme_extent.e_start =
235 cpu_to_le64(lsme->lsme_extent.e_start);
236 lcme->lcme_extent.e_end =
237 cpu_to_le64(lsme->lsme_extent.e_end);
238 lcme->lcme_offset = cpu_to_le32(offset);
240 lmm = (struct lov_mds_md *)((char *)lcmv1 + offset);
241 lmm->lmm_magic = cpu_to_le32(lsme->lsme_magic);
242 /* lmm->lmm_oi not set */
243 lmm->lmm_pattern = cpu_to_le32(lsme->lsme_pattern);
244 lmm->lmm_stripe_size = cpu_to_le32(lsme->lsme_stripe_size);
245 lmm->lmm_stripe_count = cpu_to_le16(lsme->lsme_stripe_count);
246 lmm->lmm_layout_gen = cpu_to_le16(lsme->lsme_layout_gen);
248 if (lsme->lsme_magic == LOV_MAGIC_V3) {
249 struct lov_mds_md_v3 *lmmv3 =
250 (struct lov_mds_md_v3 *)lmm;
252 strlcpy(lmmv3->lmm_pool_name, lsme->lsme_pool_name,
253 sizeof(lmmv3->lmm_pool_name));
254 lmm_objects = lmmv3->lmm_objects;
257 ((struct lov_mds_md_v1 *)lmm)->lmm_objects;
260 if (lsme_inited(lsme) &&
261 !(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
262 stripe_count = lsme->lsme_stripe_count;
266 for (i = 0; i < stripe_count; i++) {
267 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
269 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
270 lmm_objects[i].l_ost_gen =
271 cpu_to_le32(loi->loi_ost_gen);
272 lmm_objects[i].l_ost_idx =
273 cpu_to_le32(loi->loi_ost_idx);
276 size = lov_mds_md_size(stripe_count, lsme->lsme_magic);
277 lcme->lcme_size = cpu_to_le32(size);
279 } /* for each layout component */
284 /* Find the max stripecount we should use */
285 __u16 lov_get_stripe_count(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
287 __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
290 stripe_count = lov->desc.ld_default_stripe_count;
291 if (stripe_count > lov->desc.ld_active_tgt_count)
292 stripe_count = lov->desc.ld_active_tgt_count;
297 * stripe count is based on whether ldiskfs can handle
300 if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
301 lov->lov_ocd.ocd_max_easize)
302 max_stripes = lov_mds_md_max_stripe_count(
303 lov->lov_ocd.ocd_max_easize, magic);
305 if (stripe_count > max_stripes)
306 stripe_count = max_stripes;
311 int lov_free_memmd(struct lov_stripe_md **lsmp)
313 struct lov_stripe_md *lsm = *lsmp;
317 refc = atomic_dec_return(&lsm->lsm_refc);
326 * Unpack LOV object metadata from disk storage. It is packed in LE byte
327 * order and is opaque to the networking layer.
329 struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, void *buf,
332 const struct lsm_operations *op;
333 struct lov_stripe_md *lsm;
338 if (buf_size < sizeof(magic))
339 RETURN(ERR_PTR(-EINVAL));
341 magic = le32_to_cpu(*(u32 *)buf);
342 op = lsm_op_find(magic);
344 RETURN(ERR_PTR(-EINVAL));
346 lsm = op->lsm_unpackmd(lov, buf, buf_size);
352 * Retrieve object striping information.
354 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
355 * the maximum number of OST indices which will fit in the user buffer.
356 * lmm_magic must be LOV_USER_MAGIC.
358 * If @size > 0, User specified limited buffer size, usually the buffer is from
359 * ll_lov_setstripe(), and the buffer can only hold basic layout template info.
361 int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
362 struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
365 /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
366 struct lov_mds_md *lmmk, *lmm;
367 struct lov_foreign_md *lfm;
368 struct lov_user_md_v1 lum;
369 size_t lmmk_size, lum_size = 0;
375 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3 &&
376 lsm->lsm_magic != LOV_MAGIC_COMP_V1 &&
377 lsm->lsm_magic != LOV_MAGIC_FOREIGN) {
378 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
379 lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
380 GOTO(out, rc = -EIO);
383 lmmk_size = lov_comp_md_size(lsm);
385 OBD_ALLOC_LARGE(lmmk, lmmk_size);
387 GOTO(out, rc = -ENOMEM);
389 lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
391 GOTO(out_free, rc = lmm_size);
393 if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
394 if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
395 lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
396 lustre_swab_lov_mds_md(lmmk);
397 lustre_swab_lov_user_md_objects(
398 (struct lov_user_ost_data *)lmmk->lmm_objects,
399 lmmk->lmm_stripe_count);
400 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
401 lustre_swab_lov_comp_md_v1(
402 (struct lov_comp_md_v1 *)lmmk);
403 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_FOREIGN)) {
404 lfm = (struct lov_foreign_md *)lmmk;
405 __swab32s(&lfm->lfm_magic);
406 __swab32s(&lfm->lfm_length);
407 __swab32s(&lfm->lfm_type);
408 __swab32s(&lfm->lfm_flags);
413 * Legacy appication passes limited buffer, we need to figure out
414 * the user buffer size by the passed in lmm_stripe_count.
416 if (lsm->lsm_magic != LOV_MAGIC_FOREIGN)
417 if (copy_from_user(&lum, lump, sizeof(struct lov_user_md_v1)))
418 GOTO(out_free, rc = -EFAULT);
420 if (lum.lmm_magic == LOV_USER_MAGIC_V1 ||
421 lum.lmm_magic == LOV_USER_MAGIC_V3)
422 lum_size = lov_user_md_size(lum.lmm_stripe_count,
426 struct lov_mds_md *comp_md = lmmk;
429 * Legacy app (ADIO for instance) treats the layout as V1/V3
430 * blindly, we'd return a reasonable V1/V3 for them.
432 if (lmmk->lmm_magic == LOV_MAGIC_COMP_V1) {
433 struct lov_comp_md_v1 *comp_v1;
434 struct cl_object *cl_obj;
439 cl_obj = cl_object_top(&obj->lo_cl);
440 cl_object_attr_lock(cl_obj);
441 cl_object_attr_get(env, cl_obj, &attr);
442 cl_object_attr_unlock(cl_obj);
445 * return the last instantiated component if file size
446 * is non-zero, otherwise, return the last component.
448 comp_v1 = (struct lov_comp_md_v1 *)lmmk;
449 i = attr.cat_size == 0 ? comp_v1->lcm_entry_count : 0;
450 for (; i < comp_v1->lcm_entry_count; i++) {
451 if (!(comp_v1->lcm_entries[i].lcme_flags &
457 comp_md = (struct lov_mds_md *)((char *)comp_v1 +
458 comp_v1->lcm_entries[i].lcme_offset);
459 lum_size = comp_v1->lcm_entries[i].lcme_size;
463 lmm_size = min(lum_size, lmmk_size);
466 lmm_size = lmmk_size;
470 * User specified limited buffer size, usually the buffer is
471 * from ll_lov_setstripe(), and the buffer can only hold basic
472 * layout template info.
474 if (size == 0 || size > lmm_size)
476 if (copy_to_user(lump, lmm, size))
477 GOTO(out_free, rc = -EFAULT);
480 OBD_FREE_LARGE(lmmk, lmmk_size);