Whamcloud - gitweb
8399f3ee00a82aa66ec02a0f41c7de44623106f2
[fs/lustre-release.git] / lustre / lov / lov_pack.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/lov/lov_pack.c
32  *
33  * (Un)packing of OST/MDS requests
34  *
35  * Author: Andreas Dilger <adilger@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include <lustre_net.h>
41 #include <lustre_swab.h>
42 #include <obd.h>
43 #include <obd_class.h>
44 #include <obd_support.h>
45
46 #include "lov_cl_internal.h"
47 #include "lov_internal.h"
48
49 void lov_dump_lmm_common(int level, void *lmmp)
50 {
51         struct lov_mds_md *lmm = lmmp;
52         struct ost_id oi;
53
54         lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
55         CDEBUG_LIMIT(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
56                      POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
57                      le32_to_cpu(lmm->lmm_pattern));
58         CDEBUG_LIMIT(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
59                      le32_to_cpu(lmm->lmm_stripe_size),
60                      le16_to_cpu(lmm->lmm_stripe_count),
61                      le16_to_cpu(lmm->lmm_layout_gen));
62 }
63
64 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
65                                  int stripe_count)
66 {
67         int i;
68
69         if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
70                 CDEBUG_LIMIT(level,
71                              "bad stripe_count %u > max_stripe_count %u\n",
72                              stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
73                 return;
74         }
75
76         for (i = 0; i < stripe_count; ++i, ++lod) {
77                 struct ost_id oi;
78
79                 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
80                 CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n", i,
81                              le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
82         }
83 }
84
85 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
86 {
87         lov_dump_lmm_common(level, lmm);
88         lov_dump_lmm_objects(level, lmm->lmm_objects,
89                              le16_to_cpu(lmm->lmm_stripe_count));
90 }
91
92 /**
93  * Pack LOV striping metadata for disk storage format (in little
94  * endian byte order).
95  *
96  * This follows the getxattr() conventions. If \a buf_size is zero
97  * then return the size needed. If \a buf_size is too small then
98  * return -ERANGE. Otherwise return the size of the result.
99  */
100 static ssize_t lov_lsm_pack_v1v3(const struct lov_stripe_md *lsm, void *buf,
101                                  size_t buf_size)
102 {
103         struct lov_mds_md_v1 *lmmv1 = buf;
104         struct lov_mds_md_v3 *lmmv3 = buf;
105         struct lov_ost_data_v1 *lmm_objects;
106         size_t lmm_size;
107         unsigned int i;
108
109         ENTRY;
110
111         lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count,
112                                    lsm->lsm_magic);
113         if (buf_size == 0)
114                 RETURN(lmm_size);
115
116         if (buf_size < lmm_size)
117                 RETURN(-ERANGE);
118
119         /*
120          * lmmv1 and lmmv3 point to the same struct and have the
121          * same first fields
122          */
123         lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
124         lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
125         lmmv1->lmm_stripe_size = cpu_to_le32(
126                                 lsm->lsm_entries[0]->lsme_stripe_size);
127         lmmv1->lmm_stripe_count = cpu_to_le16(
128                                 lsm->lsm_entries[0]->lsme_stripe_count);
129         lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_entries[0]->lsme_pattern);
130         lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
131
132         if (lsm->lsm_magic == LOV_MAGIC_V3) {
133                 BUILD_BUG_ON(sizeof(lsm->lsm_entries[0]->lsme_pool_name) !=
134                                     sizeof(lmmv3->lmm_pool_name));
135                 strlcpy(lmmv3->lmm_pool_name,
136                         lsm->lsm_entries[0]->lsme_pool_name,
137                         sizeof(lmmv3->lmm_pool_name));
138                 lmm_objects = lmmv3->lmm_objects;
139         } else {
140                 lmm_objects = lmmv1->lmm_objects;
141         }
142
143         if (lsm->lsm_is_released)
144                 RETURN(lmm_size);
145
146         for (i = 0; i < lsm->lsm_entries[0]->lsme_stripe_count; i++) {
147                 struct lov_oinfo *loi = lsm->lsm_entries[0]->lsme_oinfo[i];
148
149                 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
150                 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
151                 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
152         }
153
154         RETURN(lmm_size);
155 }
156
157 static ssize_t lov_lsm_pack_foreign(const struct lov_stripe_md *lsm, void *buf,
158                                     size_t buf_size)
159 {
160         struct lov_foreign_md *lfm = buf;
161         size_t lfm_size;
162
163         lfm_size = lsm->lsm_foreign_size;
164
165         if (buf_size == 0)
166                 RETURN(lfm_size);
167
168         /* if buffer too small return ERANGE but copy the size the
169          * caller has requested anyway. This may be useful to get
170          * only the header without the need to alloc the full size
171          */
172         if (buf_size < lfm_size) {
173                 memcpy(lfm, lsm_foreign(lsm), buf_size);
174                 RETURN(-ERANGE);
175         }
176
177         /* full foreign LOV is already avail in its cache
178          * no need to translate format fields to little-endian
179          */
180         memcpy(lfm, lsm_foreign(lsm), lsm->lsm_foreign_size);
181
182         RETURN(lfm_size);
183 }
184
185 unsigned int lov_lsme_pack_foreign(struct lov_stripe_md_entry *lsme, void *lmm)
186 {
187         struct lov_foreign_md *lfm = (struct lov_foreign_md *)lmm;
188
189         lfm->lfm_magic = cpu_to_le32(lsme->lsme_magic);
190         lfm->lfm_length = cpu_to_le32(lsme->lsme_length);
191         lfm->lfm_type = cpu_to_le32(lsme->lsme_type);
192         lfm->lfm_flags = cpu_to_le32(lsme->lsme_foreign_flags);
193
194         /* TODO: support for foreign layout other than HSM, i.e. DAOS. */
195         if (lov_hsm_type_supported(lsme->lsme_type))
196                 lov_foreign_hsm_to_le(lfm, &lsme->lsme_hsm);
197
198         return lov_foreign_md_size(lsme->lsme_length);
199 }
200
201 unsigned int lov_lsme_pack_v1v3(struct lov_stripe_md_entry *lsme,
202                                 struct lov_mds_md *lmm)
203 {
204         struct lov_ost_data_v1 *lmm_objects;
205         __u16 stripe_count;
206         unsigned int i;
207
208         lmm->lmm_magic = cpu_to_le32(lsme->lsme_magic);
209         /* lmm->lmm_oi not set */
210         lmm->lmm_pattern = cpu_to_le32(lsme->lsme_pattern);
211         lmm->lmm_stripe_size = cpu_to_le32(lsme->lsme_stripe_size);
212         lmm->lmm_stripe_count = cpu_to_le16(lsme->lsme_stripe_count);
213         lmm->lmm_layout_gen = cpu_to_le16(lsme->lsme_layout_gen);
214
215         if (lsme->lsme_magic == LOV_MAGIC_V3) {
216                 struct lov_mds_md_v3 *lmmv3 = (struct lov_mds_md_v3 *)lmm;
217
218                 strlcpy(lmmv3->lmm_pool_name, lsme->lsme_pool_name,
219                         sizeof(lmmv3->lmm_pool_name));
220                 lmm_objects = lmmv3->lmm_objects;
221         } else {
222                 lmm_objects = ((struct lov_mds_md_v1 *)lmm)->lmm_objects;
223         }
224
225         if (lsme_inited(lsme) && !(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
226                 stripe_count = lsme->lsme_stripe_count;
227         else
228                 stripe_count = 0;
229
230         for (i = 0; i < stripe_count; i++) {
231                 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
232
233                 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
234                 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
235                 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
236         }
237
238         return lov_mds_md_size(stripe_count, lsme->lsme_magic);
239 }
240
241 ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
242                      size_t buf_size)
243 {
244         struct lov_comp_md_v1 *lcmv1 = buf;
245         struct lov_comp_md_entry_v1 *lcme;
246         size_t lmm_size;
247         unsigned int entry;
248         unsigned int offset;
249         unsigned int size;
250
251         ENTRY;
252
253         if (lsm->lsm_magic == LOV_MAGIC_V1 || lsm->lsm_magic == LOV_MAGIC_V3)
254                 return lov_lsm_pack_v1v3(lsm, buf, buf_size);
255
256         if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
257                 return lov_lsm_pack_foreign(lsm, buf, buf_size);
258
259         lmm_size = lov_comp_md_size(lsm);
260         if (buf_size == 0)
261                 RETURN(lmm_size);
262
263         if (buf_size < lmm_size)
264                 RETURN(-ERANGE);
265
266         lcmv1->lcm_magic = cpu_to_le32(lsm->lsm_magic);
267         lcmv1->lcm_size = cpu_to_le32(lmm_size);
268         lcmv1->lcm_layout_gen = cpu_to_le32(lsm->lsm_layout_gen);
269         lcmv1->lcm_flags = cpu_to_le16(lsm->lsm_flags);
270         lcmv1->lcm_mirror_count = cpu_to_le16(lsm->lsm_mirror_count);
271         lcmv1->lcm_entry_count = cpu_to_le16(lsm->lsm_entry_count);
272
273         offset = sizeof(*lcmv1) + sizeof(*lcme) * lsm->lsm_entry_count;
274
275         for (entry = 0; entry < lsm->lsm_entry_count; entry++) {
276                 struct lov_stripe_md_entry *lsme;
277                 struct lov_mds_md *lmm;
278
279                 lsme = lsm->lsm_entries[entry];
280                 lcme = &lcmv1->lcm_entries[entry];
281
282                 lcme->lcme_id = cpu_to_le32(lsme->lsme_id);
283                 lcme->lcme_flags = cpu_to_le32(lsme->lsme_flags);
284                 if (lsme->lsme_flags & LCME_FL_NOSYNC)
285                         lcme->lcme_timestamp =
286                                 cpu_to_le64(lsme->lsme_timestamp);
287                 lcme->lcme_extent.e_start =
288                         cpu_to_le64(lsme->lsme_extent.e_start);
289                 lcme->lcme_extent.e_end =
290                         cpu_to_le64(lsme->lsme_extent.e_end);
291                 lcme->lcme_offset = cpu_to_le32(offset);
292
293                 lmm = (struct lov_mds_md *)((char *)lcmv1 + offset);
294                 if (lsme->lsme_magic == LOV_MAGIC_FOREIGN)
295                         size = lov_lsme_pack_foreign(lsme, lmm);
296                 else
297                         size = lov_lsme_pack_v1v3(lsme, lmm);
298                 lcme->lcme_size = cpu_to_le32(size);
299                 offset += size;
300         } /* for each layout component */
301
302         RETURN(lmm_size);
303 }
304
305 /* Find the max stripecount we should use */
306 __u16 lov_get_stripe_count(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
307 {
308         __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
309
310         if (!stripe_count)
311                 stripe_count = lov->desc.ld_default_stripe_count;
312         if (stripe_count > lov->desc.ld_active_tgt_count)
313                 stripe_count = lov->desc.ld_active_tgt_count;
314         if (!stripe_count)
315                 stripe_count = 1;
316
317         /*
318          * stripe count is based on whether ldiskfs can handle
319          * larger EA sizes
320          */
321         if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
322             lov->lov_ocd.ocd_max_easize)
323                 max_stripes = lov_mds_md_max_stripe_count(
324                         lov->lov_ocd.ocd_max_easize, magic);
325
326         if (stripe_count > max_stripes)
327                 stripe_count = max_stripes;
328
329         return stripe_count;
330 }
331
332 int lov_free_memmd(struct lov_stripe_md **lsmp)
333 {
334         struct lov_stripe_md *lsm = *lsmp;
335         int refc;
336
337         *lsmp = NULL;
338         refc = atomic_dec_return(&lsm->lsm_refc);
339         LASSERT(refc >= 0);
340         if (refc == 0)
341                 lsm_free(lsm);
342
343         return refc;
344 }
345
346 /*
347  * Unpack LOV object metadata from disk storage.  It is packed in LE byte
348  * order and is opaque to the networking layer.
349  */
350 struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, void *buf,
351                                    size_t buf_size)
352 {
353         const struct lsm_operations *op;
354         struct lov_stripe_md *lsm;
355         u32 magic;
356
357         ENTRY;
358
359         if (buf_size < sizeof(magic))
360                 RETURN(ERR_PTR(-EINVAL));
361
362         magic = le32_to_cpu(*(u32 *)buf);
363         op = lsm_op_find(magic);
364         if (!op)
365                 RETURN(ERR_PTR(-EINVAL));
366
367         lsm = op->lsm_unpackmd(lov, buf, buf_size);
368
369         RETURN(lsm);
370 }
371
372 /*
373  * Retrieve object striping information.
374  *
375  * @lump is a pointer to an in-core struct with lmm_ost_count indicating
376  * the maximum number of OST indices which will fit in the user buffer.
377  * lmm_magic must be LOV_USER_MAGIC.
378  *
379  * If @size > 0, User specified limited buffer size, usually the buffer is from
380  * ll_lov_setstripe(), and the buffer can only hold basic layout template info.
381  */
382 int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
383                   struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
384                   size_t size)
385 {
386         /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
387         struct lov_mds_md *lmmk, *lmm;
388         struct lov_foreign_md *lfm;
389         struct lov_user_md_v1 lum;
390         size_t lmmk_size, lum_size = 0;
391         ssize_t lmm_size;
392         int rc = 0;
393
394         ENTRY;
395
396         if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3 &&
397             lsm->lsm_magic != LOV_MAGIC_COMP_V1 &&
398             lsm->lsm_magic != LOV_MAGIC_FOREIGN) {
399                 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
400                        lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
401                 GOTO(out, rc = -EIO);
402         }
403
404         lmmk_size = lov_comp_md_size(lsm);
405
406         OBD_ALLOC_LARGE(lmmk, lmmk_size);
407         if (!lmmk)
408                 GOTO(out, rc = -ENOMEM);
409
410         lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
411         if (lmm_size < 0)
412                 GOTO(out_free, rc = lmm_size);
413
414         if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
415                 if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
416                     lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
417                         lustre_swab_lov_mds_md(lmmk);
418                         lustre_swab_lov_user_md_objects(
419                                 (struct lov_user_ost_data *)lmmk->lmm_objects,
420                                 lmmk->lmm_stripe_count);
421                 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
422                         lustre_swab_lov_comp_md_v1(
423                                         (struct lov_comp_md_v1 *)lmmk);
424                 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_FOREIGN)) {
425                         lfm = (struct lov_foreign_md *)lmmk;
426                         __swab32s(&lfm->lfm_magic);
427                         __swab32s(&lfm->lfm_length);
428                         __swab32s(&lfm->lfm_type);
429                         __swab32s(&lfm->lfm_flags);
430                 }
431         }
432
433         /*
434          * Legacy appication passes limited buffer, we need to figure out
435          * the user buffer size by the passed in lmm_stripe_count.
436          */
437         if (lsm->lsm_magic != LOV_MAGIC_FOREIGN)
438                 if (copy_from_user(&lum, lump, sizeof(struct lov_user_md_v1)))
439                         GOTO(out_free, rc = -EFAULT);
440
441         if (lum.lmm_magic == LOV_USER_MAGIC_V1 ||
442             lum.lmm_magic == LOV_USER_MAGIC_V3)
443                 lum_size = lov_user_md_size(lum.lmm_stripe_count,
444                                             lum.lmm_magic);
445
446         if (lum_size != 0) {
447                 struct lov_mds_md *comp_md = lmmk;
448
449                 /*
450                  * Legacy app (ADIO for instance) treats the layout as V1/V3
451                  * blindly, we'd return a reasonable V1/V3 for them.
452                  */
453                 if (lmmk->lmm_magic == LOV_MAGIC_COMP_V1) {
454                         struct lov_comp_md_v1 *comp_v1;
455                         struct cl_object *cl_obj;
456                         struct cl_attr attr;
457                         int i;
458
459                         attr.cat_size = 0;
460                         cl_obj = cl_object_top(&obj->lo_cl);
461                         cl_object_attr_lock(cl_obj);
462                         cl_object_attr_get(env, cl_obj, &attr);
463                         cl_object_attr_unlock(cl_obj);
464
465                         /*
466                          * return the last instantiated component if file size
467                          * is non-zero, otherwise, return the last component.
468                          */
469                         comp_v1 = (struct lov_comp_md_v1 *)lmmk;
470                         i = attr.cat_size == 0 ? comp_v1->lcm_entry_count : 0;
471                         for (; i < comp_v1->lcm_entry_count; i++) {
472                                 if (!(comp_v1->lcm_entries[i].lcme_flags &
473                                                 LCME_FL_INIT))
474                                         break;
475                         }
476                         if (i > 0)
477                                 i--;
478                         comp_md = (struct lov_mds_md *)((char *)comp_v1 +
479                                         comp_v1->lcm_entries[i].lcme_offset);
480                         lum_size = comp_v1->lcm_entries[i].lcme_size;
481                 }
482
483                 lmm = comp_md;
484                 lmm_size = min(lum_size, lmmk_size);
485         } else {
486                 lmm = lmmk;
487                 lmm_size = lmmk_size;
488         }
489
490         /**
491          * User specified limited buffer size, usually the buffer is
492          * from ll_lov_setstripe(), and the buffer can only hold basic
493          * layout template info.
494          */
495         if (size == 0 || size > lmm_size)
496                 size = lmm_size;
497         if (copy_to_user(lump, lmm, size))
498                 GOTO(out_free, rc = -EFAULT);
499
500 out_free:
501         OBD_FREE_LARGE(lmmk, lmmk_size);
502 out:
503         RETURN(rc);
504 }