1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002, 2003 Cluster File Systems, Inc.
5 * Author: Andreas Dilger <adilger@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 * (Un)packing of OST/MDS requests
26 #define DEBUG_SUBSYSTEM S_LOV
28 #include <liblustre.h>
31 #include <linux/lustre_net.h>
32 #include <linux/obd.h>
33 #include <linux/obd_lov.h>
34 #include <linux/obd_class.h>
35 #include <linux/obd_support.h>
36 #include <linux/lustre_user.h>
38 #include "lov_internal.h"
40 void lov_dump_lmm_v0(int level, struct lov_mds_md_v0 *lmm)
42 int i, num_ost, stripe, idx;
44 num_ost = le32_to_cpu(lmm->lmm_ost_count);
45 idx = le32_to_cpu(lmm->lmm_stripe_offset);
46 CDEBUG(level, "objid "LPX64", magic 0x%08X, ost_count %u\n",
47 le64_to_cpu(lmm->lmm_object_id), le32_to_cpu(lmm->lmm_magic),
49 CDEBUG(level,"stripe_size %u, stripe_count %u, stripe_offset %u\n",
50 le32_to_cpu(lmm->lmm_stripe_size),
51 le32_to_cpu(lmm->lmm_stripe_count), idx);
52 for (i = stripe = 0; i < le32_to_cpu(lmm->lmm_ost_count); i++, idx++) {
54 if (lmm->lmm_objects[idx].l_object_id == 0)
56 CDEBUG(level, "stripe %u idx %u subobj "LPX64"\n", stripe, idx,
57 le64_to_cpu(lmm->lmm_objects[idx].l_object_id));
62 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
64 struct lov_ost_data_v1 *lod;
67 CDEBUG(level, "objid "LPX64", magic 0x%08X, pattern %#X\n",
68 le64_to_cpu(lmm->lmm_object_id), le32_to_cpu(lmm->lmm_magic),
69 le32_to_cpu(lmm->lmm_pattern));
70 CDEBUG(level,"stripe_size %u, stripe_count %u\n",
71 le32_to_cpu(lmm->lmm_stripe_size),
72 le32_to_cpu(lmm->lmm_stripe_count));
73 for (i = 0, lod = lmm->lmm_objects;
74 i < le32_to_cpu(lmm->lmm_stripe_count); i++, lod++)
75 CDEBUG(level, "stripe %u idx %u subobj "LPX64"/"LPX64"\n",
76 i, le32_to_cpu(lod->l_ost_idx),
77 le64_to_cpu(lod->l_object_gr),
78 le64_to_cpu(lod->l_object_id));
81 #define LMM_ASSERT(test) \
83 if (!(test)) lov_dump_lmm(D_ERROR, lmm); \
84 LASSERT(test); /* so we know what assertion failed */ \
87 /* Pack LOV object metadata for disk storage. It is packed in LE byte
88 * order and is opaque to the networking layer.
90 * XXX In the future, this will be enhanced to get the EA size from the
91 * underlying OSC device(s) to get their EA sizes so we can stack
92 * LOVs properly. For now lov_mds_md_size() just assumes one obd_id
95 int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
96 struct lov_stripe_md *lsm)
98 struct obd_device *obd = class_exp2obd(exp);
99 struct lov_obd *lov = &obd->u.lov;
100 struct lov_oinfo *loi;
101 struct lov_mds_md *lmm;
102 int stripe_count = lov->desc.ld_tgt_count;
108 if (lsm->lsm_magic != LOV_MAGIC) {
109 CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X\n",
110 lsm->lsm_magic, LOV_MAGIC);
113 stripe_count = lsm->lsm_stripe_count;
116 /* XXX LOV STACKING call into osc for sizes */
117 lmm_size = lov_mds_md_size(stripe_count);
123 stripe_count = le32_to_cpu((*lmmp)->lmm_stripe_count);
124 OBD_FREE(*lmmp, lov_mds_md_size(stripe_count));
130 OBD_ALLOC(*lmmp, lmm_size);
136 lmm->lmm_magic = cpu_to_le32(LOV_MAGIC); /* only write new format */
141 lmm->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
142 lmm->lmm_object_gr = cpu_to_le64(lsm->lsm_object_gr);
143 lmm->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
144 lmm->lmm_stripe_count = cpu_to_le32(stripe_count);
145 lmm->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
147 for (i = 0, loi = lsm->lsm_oinfo; i < stripe_count; i++, loi++) {
148 /* XXX LOV STACKING call down to osc_packmd() to do packing */
149 LASSERT(loi->loi_id);
150 lmm->lmm_objects[i].l_object_id = cpu_to_le64(loi->loi_id);
151 lmm->lmm_objects[i].l_object_gr = cpu_to_le64(loi->loi_gr);
152 lmm->lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
153 lmm->lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
159 int lov_get_stripecnt(struct lov_obd *lov, int stripe_count)
162 stripe_count = lov->desc.ld_default_stripe_count;
163 if (!stripe_count || stripe_count > lov->desc.ld_active_tgt_count)
164 stripe_count = lov->desc.ld_active_tgt_count;
169 static int lov_verify_lmm_v0(struct lov_mds_md_v0 *lmm, int lmm_bytes,
172 if (lmm_bytes < sizeof(*lmm)) {
173 CERROR("lov_mds_md too small: %d, need at least %d\n",
174 lmm_bytes, (int)sizeof(*lmm));
178 *stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
180 if (*stripe_count == 0 ||
181 *stripe_count > le32_to_cpu(lmm->lmm_ost_count)) {
182 CERROR("bad stripe count %d\n", *stripe_count);
183 lov_dump_lmm_v0(D_WARNING, lmm);
187 if (lmm_bytes < lov_mds_md_v0_size(*stripe_count)) {
188 CERROR("LOV EA too small: %d, need %d\n",
189 lmm_bytes, lov_mds_md_size(*stripe_count));
190 lov_dump_lmm_v0(D_WARNING, lmm);
194 if (lmm->lmm_object_id == 0) {
195 CERROR("zero object id\n");
196 lov_dump_lmm_v0(D_WARNING, lmm);
200 if (le32_to_cpu(lmm->lmm_stripe_offset) >
201 le32_to_cpu(lmm->lmm_ost_count)) {
202 CERROR("stripe offset %d more than number of OSTs %d\n",
203 le32_to_cpu(lmm->lmm_stripe_offset),
204 le32_to_cpu(lmm->lmm_ost_count));
205 lov_dump_lmm_v0(D_WARNING, lmm);
209 if (lmm->lmm_stripe_size == 0) {
210 CERROR("zero stripe size\n");
211 lov_dump_lmm_v0(D_WARNING, lmm);
218 static int lov_verify_lmm_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
221 if (lmm_bytes < sizeof(*lmm)) {
222 CERROR("lov_mds_md too small: %d, need at least %d\n",
223 lmm_bytes, (int)sizeof(*lmm));
227 if (lmm->lmm_magic != le32_to_cpu(LOV_MAGIC_V1)) {
228 CERROR("bad disk LOV MAGIC: 0x%08X\n",
229 le32_to_cpu(*(__u32 *)lmm));
233 *stripe_count = le32_to_cpu(lmm->lmm_stripe_count);
235 if (*stripe_count == 0) {
236 CERROR("bad stripe count %d\n", *stripe_count);
237 lov_dump_lmm_v1(D_WARNING, lmm);
241 if (lmm_bytes < lov_mds_md_size(*stripe_count)) {
242 CERROR("LOV EA too small: %d, need %d\n",
243 lmm_bytes, lov_mds_md_size(*stripe_count));
244 lov_dump_lmm_v1(D_WARNING, lmm);
248 if (lmm->lmm_object_id == 0) {
249 CERROR("zero object id\n");
250 lov_dump_lmm_v1(D_WARNING, lmm);
254 if (lmm->lmm_pattern != cpu_to_le32(LOV_PATTERN_RAID0)) {
255 CERROR("bad striping pattern\n");
256 lov_dump_lmm_v1(D_WARNING, lmm);
260 if (lmm->lmm_stripe_size == 0 ||
261 (__u64)le32_to_cpu(lmm->lmm_stripe_size) * *stripe_count > ~0UL) {
262 CERROR("bad stripe size %u\n",
263 le32_to_cpu(lmm->lmm_stripe_size));
264 lov_dump_lmm_v1(D_WARNING, lmm);
271 static int lov_verify_lmm(void *lmm, int lmm_bytes, int *stripe_count)
273 switch (le32_to_cpu(*(__u32 *)lmm)) {
275 return lov_verify_lmm_v1(lmm, lmm_bytes, stripe_count);
277 return lov_verify_lmm_v0(lmm, lmm_bytes, stripe_count);
279 CERROR("bad disk LOV MAGIC: 0x%08X\n",
280 le32_to_cpu(*(__u32 *)lmm));
285 int lov_alloc_memmd(struct lov_stripe_md **lsmp, int stripe_count, int pattern)
287 int lsm_size = lov_stripe_md_size(stripe_count);
288 struct lov_oinfo *loi;
291 OBD_ALLOC(*lsmp, lsm_size);
295 (*lsmp)->lsm_magic = LOV_MAGIC;
296 (*lsmp)->lsm_stripe_count = stripe_count;
297 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
298 (*lsmp)->lsm_pattern = pattern;
299 (*lsmp)->lsm_oinfo[0].loi_ost_idx = ~0;
301 for (i = 0, loi = (*lsmp)->lsm_oinfo; i < stripe_count; i++, loi++)
307 void lov_free_memmd(struct lov_stripe_md **lsmp)
309 OBD_FREE(*lsmp, lov_stripe_md_size((*lsmp)->lsm_stripe_count));
313 int lov_unpackmd_v0(struct lov_obd *lov, struct lov_stripe_md *lsm,
314 struct lov_mds_md_v0 *lmm)
316 struct lov_oinfo *loi;
317 int i, ost_offset, ost_count;
319 lsm->lsm_object_id = le64_to_cpu(lmm->lmm_object_id);
320 /* lsm->lsm_object_gr = 0; implicit */
321 lsm->lsm_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
322 lsm->lsm_pattern = LOV_PATTERN_RAID0;
323 ost_offset = le32_to_cpu(lmm->lmm_stripe_offset);
324 ost_count = le16_to_cpu(lmm->lmm_ost_count);
326 for (i = 0, loi = lsm->lsm_oinfo; i < ost_count; i++, ost_offset++) {
327 ost_offset %= ost_count;
329 if (!lmm->lmm_objects[ost_offset].l_object_id)
333 le64_to_cpu(lmm->lmm_objects[ost_offset].l_object_id);
334 /* loi->loi_gr = 0; implicit */
335 loi->loi_ost_idx = ost_offset;
336 /* loi->loi_ost_gen = 0; implicit */
340 if (loi - lsm->lsm_oinfo != lsm->lsm_stripe_count) {
341 CERROR("missing objects in lmm struct\n");
342 lov_dump_lmm_v0(D_WARNING, lmm);
349 int lov_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
350 struct lov_mds_md_v1 *lmm)
352 struct lov_oinfo *loi;
355 lsm->lsm_object_id = le64_to_cpu(lmm->lmm_object_id);
356 lsm->lsm_object_gr = le64_to_cpu(lmm->lmm_object_gr);
357 lsm->lsm_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
358 lsm->lsm_pattern = le32_to_cpu(lmm->lmm_pattern);
360 for (i = 0, loi = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++) {
361 /* XXX LOV STACKING call down to osc_unpackmd() */
362 loi->loi_id = le64_to_cpu(lmm->lmm_objects[i].l_object_id);
363 loi->loi_gr = le64_to_cpu(lmm->lmm_objects[i].l_object_gr);
364 loi->loi_ost_idx = le32_to_cpu(lmm->lmm_objects[i].l_ost_idx);
365 loi->loi_ost_gen = le32_to_cpu(lmm->lmm_objects[i].l_ost_gen);
366 if (loi->loi_ost_idx > lov->desc.ld_tgt_count) {
367 CERROR("OST index %d more than OST count %d\n",
368 loi->loi_ost_idx, lov->desc.ld_tgt_count);
369 lov_dump_lmm_v1(D_WARNING, lmm);
378 /* Unpack LOV object metadata from disk storage. It is packed in LE byte
379 * order and is opaque to the networking layer.
381 int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
382 struct lov_mds_md *lmm, int lmm_bytes)
384 struct obd_device *obd = class_exp2obd(exp);
385 struct lov_obd *lov = &obd->u.lov;
386 int rc = 0, stripe_count, lsm_size;
389 /* If passed an MDS struct use values from there, otherwise defaults */
391 rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
395 stripe_count = lov_get_stripecnt(lov, 0);
398 /* If we aren't passed an lsmp struct, we just want the size */
400 /* XXX LOV STACKING call into osc for sizes */
401 RETURN(lov_stripe_md_size(stripe_count));
403 /* If we are passed an allocated struct but nothing to unpack, free */
405 lov_free_memmd(lsmp);
409 lsm_size = lov_alloc_memmd(lsmp, stripe_count, LOV_PATTERN_RAID0);
413 /* If we are passed a pointer but nothing to unpack, we only alloc */
417 switch (le32_to_cpu(lmm->lmm_magic)) {
419 rc = lov_unpackmd_v1(lov, *lsmp, lmm);
422 rc = lov_unpackmd_v0(lov, *lsmp, (void *)lmm);
427 lov_free_memmd(lsmp);
434 /* Configure object striping information on a new file.
436 * @lmmu is a pointer to a user struct with one or more of the fields set to
437 * indicate the application preference: lmm_stripe_count, lmm_stripe_size,
438 * lmm_stripe_offset, and lmm_stripe_pattern. lmm_magic must be LOV_MAGIC.
439 * @lsmp is a pointer to an in-core stripe MD that needs to be filled in.
441 int lov_setstripe(struct obd_export *exp, struct lov_stripe_md **lsmp,
442 struct lov_user_md *lump)
444 struct obd_device *obd = class_exp2obd(exp);
445 struct lov_obd *lov = &obd->u.lov;
446 struct lov_user_md lum;
451 rc = copy_from_user(&lum, lump, sizeof(lum));
455 if (lum.lmm_magic != LOV_USER_MAGIC) {
456 CDEBUG(D_IOCTL, "bad userland LOV MAGIC: %#08x != %#08x\n",
457 lum.lmm_magic, LOV_USER_MAGIC);
461 if (lum.lmm_pattern == 0) {
462 lum.lmm_pattern = lov->desc.ld_pattern ?
463 lov->desc.ld_pattern : LOV_PATTERN_RAID0;
466 if (lum.lmm_pattern != LOV_PATTERN_RAID0) {
467 CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
472 if (lum.lmm_stripe_size & (PAGE_SIZE - 1)) {
473 CDEBUG(D_IOCTL, "stripe size %u not multiple of %lu\n",
474 lum.lmm_stripe_size, PAGE_SIZE);
478 if ((lum.lmm_stripe_offset >= lov->desc.ld_active_tgt_count) &&
479 (lum.lmm_stripe_offset != (typeof(lum.lmm_stripe_offset))(-1))) {
480 CDEBUG(D_IOCTL, "stripe offset %u > number of active OSTs %u\n",
481 lum.lmm_stripe_offset, lov->desc.ld_active_tgt_count);
484 stripe_count = lov_get_stripecnt(lov, lum.lmm_stripe_count);
486 if ((__u64)lum.lmm_stripe_size * stripe_count > ~0UL) {
487 CDEBUG(D_IOCTL, "stripe width %ux%u > %lu on 32-bit system\n",
488 lum.lmm_stripe_size, (int)lum.lmm_stripe_count, ~0UL);
492 rc = lov_alloc_memmd(lsmp, stripe_count, lum.lmm_pattern);
497 (*lsmp)->lsm_oinfo[0].loi_ost_idx = lum.lmm_stripe_offset;
498 (*lsmp)->lsm_stripe_size = lum.lmm_stripe_size;
503 /* Retrieve object striping information.
505 * @lump is a pointer to an in-core struct with lmm_ost_count indicating
506 * the maximum number of OST indices which will fit in the user buffer.
507 * lmm_magic must be LOV_USER_MAGIC.
509 int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
510 struct lov_user_md *lump)
512 struct lov_user_md lum;
513 struct lov_mds_md *lmmk = NULL;
520 rc = copy_from_user(&lum, lump, sizeof(lum));
524 if (lum.lmm_magic != LOV_USER_MAGIC)
527 rc = lov_packmd(exp, &lmmk, lsm);
533 /* FIXME: Bug 1185 - copy fields properly when structs change */
534 LASSERT(sizeof(lum) == sizeof(*lmmk));
535 LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
537 /* User wasn't expecting this many OST entries */
538 if (lum.lmm_stripe_count < lmmk->lmm_stripe_count)
540 else if (copy_to_user(lump, lmmk, lmm_size))
543 obd_free_diskmd(exp, &lmmk);