Whamcloud - gitweb
LU-17662 osd-zfs: Support for ZFS 2.2.3
[fs/lustre-release.git] / lustre / lov / lov_pack.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/lov/lov_pack.c
32  *
33  * (Un)packing of OST/MDS requests
34  *
35  * Author: Andreas Dilger <adilger@clusterfs.com>
36  */
37
38 #define DEBUG_SUBSYSTEM S_LOV
39
40 #include <lustre_net.h>
41 #include <lustre_swab.h>
42 #include <obd.h>
43 #include <obd_class.h>
44 #include <obd_support.h>
45
46 #include "lov_cl_internal.h"
47 #include "lov_internal.h"
48
49 void lov_dump_lmm_common(int level, void *lmmp)
50 {
51         struct lov_mds_md *lmm = lmmp;
52         struct ost_id oi;
53
54         lmm_oi_le_to_cpu(&oi, &lmm->lmm_oi);
55         CDEBUG_LIMIT(level, "objid "DOSTID", magic 0x%08x, pattern %#x\n",
56                      POSTID(&oi), le32_to_cpu(lmm->lmm_magic),
57                      le32_to_cpu(lmm->lmm_pattern));
58         CDEBUG_LIMIT(level, "stripe_size %u, stripe_count %u, layout_gen %u\n",
59                      le32_to_cpu(lmm->lmm_stripe_size),
60                      le16_to_cpu(lmm->lmm_stripe_count),
61                      le16_to_cpu(lmm->lmm_layout_gen));
62 }
63
64 static void lov_dump_lmm_objects(int level, struct lov_ost_data *lod,
65                                  int stripe_count)
66 {
67         int i;
68
69         if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
70                 CDEBUG_LIMIT(level,
71                              "bad stripe_count %u > max_stripe_count %u\n",
72                              stripe_count, LOV_V1_INSANE_STRIPE_COUNT);
73                 return;
74         }
75
76         for (i = 0; i < stripe_count; ++i, ++lod) {
77                 struct ost_id oi;
78
79                 ostid_le_to_cpu(&lod->l_ost_oi, &oi);
80                 CDEBUG_LIMIT(level, "stripe %u idx %u subobj "DOSTID"\n", i,
81                              le32_to_cpu(lod->l_ost_idx), POSTID(&oi));
82         }
83 }
84
85 void lov_dump_lmm_v1(int level, struct lov_mds_md_v1 *lmm)
86 {
87         lov_dump_lmm_common(level, lmm);
88         lov_dump_lmm_objects(level, lmm->lmm_objects,
89                              le16_to_cpu(lmm->lmm_stripe_count));
90 }
91
92 /**
93  * Pack LOV striping metadata for disk storage format (in little
94  * endian byte order).
95  *
96  * This follows the getxattr() conventions. If \a buf_size is zero
97  * then return the size needed. If \a buf_size is too small then
98  * return -ERANGE. Otherwise return the size of the result.
99  */
100 static ssize_t lov_lsm_pack_v1v3(const struct lov_stripe_md *lsm, void *buf,
101                                  size_t buf_size)
102 {
103         struct lov_mds_md_v1 *lmmv1 = buf;
104         struct lov_mds_md_v3 *lmmv3 = buf;
105         struct lov_ost_data_v1 *lmm_objects;
106         size_t lmm_size;
107         unsigned int i;
108
109         ENTRY;
110
111         lmm_size = lov_mds_md_size(lsm->lsm_entries[0]->lsme_stripe_count,
112                                    lsm->lsm_magic);
113         if (buf_size == 0)
114                 RETURN(lmm_size);
115
116         if (buf_size < lmm_size)
117                 RETURN(-ERANGE);
118
119         /*
120          * lmmv1 and lmmv3 point to the same struct and have the
121          * same first fields
122          */
123         lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
124         lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
125         lmmv1->lmm_stripe_size = cpu_to_le32(
126                                 lsm->lsm_entries[0]->lsme_stripe_size);
127         lmmv1->lmm_stripe_count = cpu_to_le16(
128                                 lsm->lsm_entries[0]->lsme_stripe_count);
129         lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_entries[0]->lsme_pattern);
130         lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
131
132         if (lsm->lsm_magic == LOV_MAGIC_V3) {
133                 BUILD_BUG_ON(sizeof(lsm->lsm_entries[0]->lsme_pool_name) !=
134                                     sizeof(lmmv3->lmm_pool_name));
135                 strscpy(lmmv3->lmm_pool_name,
136                         lsm->lsm_entries[0]->lsme_pool_name,
137                         sizeof(lmmv3->lmm_pool_name));
138                 lmm_objects = lmmv3->lmm_objects;
139         } else {
140                 lmm_objects = lmmv1->lmm_objects;
141         }
142
143         if (lsm->lsm_is_released)
144                 RETURN(lmm_size);
145
146         for (i = 0; i < lsm->lsm_entries[0]->lsme_stripe_count; i++) {
147                 struct lov_oinfo *loi = lsm->lsm_entries[0]->lsme_oinfo[i];
148
149                 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
150                 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
151                 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
152         }
153
154         RETURN(lmm_size);
155 }
156
157 static ssize_t lov_lsm_pack_foreign(const struct lov_stripe_md *lsm, void *buf,
158                                     size_t buf_size)
159 {
160         struct lov_foreign_md *lfm = buf;
161         size_t lfm_size;
162
163         lfm_size = lsm->lsm_foreign_size;
164
165         if (buf_size == 0)
166                 RETURN(lfm_size);
167
168         /* if buffer too small return ERANGE but copy the size the
169          * caller has requested anyway. This may be useful to get
170          * only the header without the need to alloc the full size
171          */
172         if (buf_size < lfm_size) {
173                 memcpy(lfm, lsm_foreign(lsm), buf_size);
174                 RETURN(-ERANGE);
175         }
176
177         /* full foreign LOV is already avail in its cache
178          * no need to translate format fields to little-endian
179          */
180         memcpy(lfm, lsm_foreign(lsm), lsm->lsm_foreign_size);
181
182         RETURN(lfm_size);
183 }
184
185 static unsigned int lov_lsme_pack_foreign(struct lov_stripe_md_entry *lsme,
186                                           void *lmm)
187 {
188         struct lov_foreign_md *lfm = (struct lov_foreign_md *)lmm;
189
190         lfm->lfm_magic = cpu_to_le32(lsme->lsme_magic);
191         lfm->lfm_length = cpu_to_le32(lsme->lsme_length);
192         lfm->lfm_type = cpu_to_le32(lsme->lsme_type);
193         lfm->lfm_flags = cpu_to_le32(lsme->lsme_foreign_flags);
194
195         /* TODO: support for foreign layout other than HSM, i.e. DAOS. */
196         if (lov_hsm_type_supported(lsme->lsme_type))
197                 lov_foreign_hsm_to_le(lfm, &lsme->lsme_hsm);
198
199         return lov_foreign_md_size(lsme->lsme_length);
200 }
201
202 static unsigned int lov_lsme_pack_v1v3(struct lov_stripe_md_entry *lsme,
203                                        struct lov_mds_md *lmm)
204 {
205         struct lov_ost_data_v1 *lmm_objects;
206         __u16 stripe_count;
207         unsigned int i;
208
209         lmm->lmm_magic = cpu_to_le32(lsme->lsme_magic);
210         /* lmm->lmm_oi not set */
211         lmm->lmm_pattern = cpu_to_le32(lsme->lsme_pattern);
212         lmm->lmm_stripe_size = cpu_to_le32(lsme->lsme_stripe_size);
213         lmm->lmm_stripe_count = cpu_to_le16(lsme->lsme_stripe_count);
214         lmm->lmm_layout_gen = cpu_to_le16(lsme->lsme_layout_gen);
215
216         if (lsme->lsme_magic == LOV_MAGIC_V3) {
217                 struct lov_mds_md_v3 *lmmv3 = (struct lov_mds_md_v3 *)lmm;
218
219                 strscpy(lmmv3->lmm_pool_name, lsme->lsme_pool_name,
220                         sizeof(lmmv3->lmm_pool_name));
221                 lmm_objects = lmmv3->lmm_objects;
222         } else {
223                 lmm_objects = ((struct lov_mds_md_v1 *)lmm)->lmm_objects;
224         }
225
226         if (lsme_inited(lsme) && !(lsme->lsme_pattern & LOV_PATTERN_F_RELEASED))
227                 stripe_count = lsme->lsme_stripe_count;
228         else
229                 stripe_count = 0;
230
231         for (i = 0; i < stripe_count; i++) {
232                 struct lov_oinfo *loi = lsme->lsme_oinfo[i];
233
234                 ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
235                 lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
236                 lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
237         }
238
239         return lov_mds_md_size(stripe_count, lsme->lsme_magic);
240 }
241
242 ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf,
243                      size_t buf_size)
244 {
245         struct lov_comp_md_v1 *lcmv1 = buf;
246         struct lov_comp_md_entry_v1 *lcme;
247         size_t lmm_size;
248         unsigned int entry;
249         unsigned int offset;
250         unsigned int size;
251
252         ENTRY;
253
254         if (lsm->lsm_magic == LOV_MAGIC_V1 || lsm->lsm_magic == LOV_MAGIC_V3)
255                 return lov_lsm_pack_v1v3(lsm, buf, buf_size);
256
257         if (lsm->lsm_magic == LOV_MAGIC_FOREIGN)
258                 return lov_lsm_pack_foreign(lsm, buf, buf_size);
259
260         lmm_size = lov_comp_md_size(lsm);
261         if (buf_size == 0)
262                 RETURN(lmm_size);
263
264         if (buf_size < lmm_size)
265                 RETURN(-ERANGE);
266
267         lcmv1->lcm_magic = cpu_to_le32(lsm->lsm_magic);
268         lcmv1->lcm_size = cpu_to_le32(lmm_size);
269         lcmv1->lcm_layout_gen = cpu_to_le32(lsm->lsm_layout_gen);
270         lcmv1->lcm_flags = cpu_to_le16(lsm->lsm_flags);
271         lcmv1->lcm_mirror_count = cpu_to_le16(lsm->lsm_mirror_count);
272         lcmv1->lcm_entry_count = cpu_to_le16(lsm->lsm_entry_count);
273
274         offset = sizeof(*lcmv1) + sizeof(*lcme) * lsm->lsm_entry_count;
275
276         for (entry = 0; entry < lsm->lsm_entry_count; entry++) {
277                 struct lov_stripe_md_entry *lsme;
278                 struct lov_mds_md *lmm;
279
280                 lsme = lsm->lsm_entries[entry];
281                 lcme = &lcmv1->lcm_entries[entry];
282
283                 lcme->lcme_id = cpu_to_le32(lsme->lsme_id);
284                 lcme->lcme_flags = cpu_to_le32(lsme->lsme_flags);
285                 if (lsme->lsme_flags & LCME_FL_NOSYNC)
286                         lcme->lcme_timestamp =
287                                 cpu_to_le64(lsme->lsme_timestamp);
288                 lcme->lcme_extent.e_start =
289                         cpu_to_le64(lsme->lsme_extent.e_start);
290                 lcme->lcme_extent.e_end =
291                         cpu_to_le64(lsme->lsme_extent.e_end);
292                 lcme->lcme_offset = cpu_to_le32(offset);
293
294                 lmm = (struct lov_mds_md *)((char *)lcmv1 + offset);
295                 if (lsme->lsme_magic == LOV_MAGIC_FOREIGN)
296                         size = lov_lsme_pack_foreign(lsme, lmm);
297                 else
298                         size = lov_lsme_pack_v1v3(lsme, lmm);
299                 lcme->lcme_size = cpu_to_le32(size);
300                 offset += size;
301         } /* for each layout component */
302
303         RETURN(lmm_size);
304 }
305
306 /* Find the max stripecount we should use */
307 __u16 lov_get_stripe_count(struct lov_obd *lov, __u32 magic, __u16 stripe_count)
308 {
309         __u32 max_stripes = LOV_MAX_STRIPE_COUNT_OLD;
310
311         if (!stripe_count)
312                 stripe_count = lov->desc.ld_default_stripe_count;
313         if (stripe_count > lov->desc.ld_active_tgt_count)
314                 stripe_count = lov->desc.ld_active_tgt_count;
315         if (!stripe_count)
316                 stripe_count = 1;
317
318         /*
319          * stripe count is based on whether ldiskfs can handle
320          * larger EA sizes
321          */
322         if (lov->lov_ocd.ocd_connect_flags & OBD_CONNECT_MAX_EASIZE &&
323             lov->lov_ocd.ocd_max_easize)
324                 max_stripes = lov_mds_md_max_stripe_count(
325                         lov->lov_ocd.ocd_max_easize, magic);
326
327         if (stripe_count > max_stripes)
328                 stripe_count = max_stripes;
329
330         return stripe_count;
331 }
332
333 int lov_free_memmd(struct lov_stripe_md **lsmp)
334 {
335         struct lov_stripe_md *lsm = *lsmp;
336         int refc;
337
338         *lsmp = NULL;
339         refc = atomic_dec_return(&lsm->lsm_refc);
340         LASSERT(refc >= 0);
341         if (refc == 0)
342                 lsm_free(lsm);
343
344         return refc;
345 }
346
347 /*
348  * Unpack LOV object metadata from disk storage.  It is packed in LE byte
349  * order and is opaque to the networking layer.
350  */
351 struct lov_stripe_md *lov_unpackmd(struct lov_obd *lov, void *buf,
352                                    size_t buf_size)
353 {
354         const struct lsm_operations *op;
355         struct lov_stripe_md *lsm;
356         u32 magic;
357
358         ENTRY;
359
360         if (buf_size < sizeof(magic))
361                 RETURN(ERR_PTR(-EINVAL));
362
363         magic = le32_to_cpu(*(u32 *)buf);
364         op = lsm_op_find(magic);
365         if (!op)
366                 RETURN(ERR_PTR(-EINVAL));
367
368         lsm = op->lsm_unpackmd(lov, buf, buf_size);
369
370         RETURN(lsm);
371 }
372
373 /*
374  * Retrieve object striping information.
375  *
376  * @lump is a pointer to an in-core struct with lmm_ost_count indicating
377  * the maximum number of OST indices which will fit in the user buffer.
378  * lmm_magic must be LOV_USER_MAGIC.
379  *
380  * If @size > 0, User specified limited buffer size, usually the buffer is from
381  * ll_lov_setstripe(), and the buffer can only hold basic layout template info.
382  */
383 int lov_getstripe(const struct lu_env *env, struct lov_object *obj,
384                   struct lov_stripe_md *lsm, struct lov_user_md __user *lump,
385                   size_t size)
386 {
387         /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
388         struct lov_mds_md *lmmk, *lmm;
389         struct lov_foreign_md *lfm;
390         struct lov_user_md_v1 lum;
391         size_t lmmk_size, lum_size = 0;
392         ssize_t lmm_size;
393         int rc = 0;
394
395         ENTRY;
396
397         if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3 &&
398             lsm->lsm_magic != LOV_MAGIC_COMP_V1 &&
399             lsm->lsm_magic != LOV_MAGIC_FOREIGN) {
400                 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
401                        lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
402                 GOTO(out, rc = -EIO);
403         }
404
405         lmmk_size = lov_comp_md_size(lsm);
406
407         OBD_ALLOC_LARGE(lmmk, lmmk_size);
408         if (!lmmk)
409                 GOTO(out, rc = -ENOMEM);
410
411         lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
412         if (lmm_size < 0)
413                 GOTO(out_free, rc = lmm_size);
414
415         if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC) {
416                 if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) ||
417                     lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
418                         lustre_swab_lov_mds_md(lmmk);
419                         lustre_swab_lov_user_md_objects(
420                                 (struct lov_user_ost_data *)lmmk->lmm_objects,
421                                 lmmk->lmm_stripe_count);
422                 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_COMP_V1)) {
423                         lustre_swab_lov_comp_md_v1(
424                                         (struct lov_comp_md_v1 *)lmmk);
425                 } else if (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_FOREIGN)) {
426                         lfm = (struct lov_foreign_md *)lmmk;
427                         __swab32s(&lfm->lfm_magic);
428                         __swab32s(&lfm->lfm_length);
429                         __swab32s(&lfm->lfm_type);
430                         __swab32s(&lfm->lfm_flags);
431                 }
432         }
433
434         /*
435          * Legacy appication passes limited buffer, we need to figure out
436          * the user buffer size by the passed in lmm_stripe_count.
437          */
438         if (lsm->lsm_magic != LOV_MAGIC_FOREIGN)
439                 if (copy_from_user(&lum, lump, sizeof(struct lov_user_md_v1)))
440                         GOTO(out_free, rc = -EFAULT);
441
442         if (lum.lmm_magic == LOV_USER_MAGIC_V1 ||
443             lum.lmm_magic == LOV_USER_MAGIC_V3)
444                 lum_size = lov_user_md_size(lum.lmm_stripe_count,
445                                             lum.lmm_magic);
446
447         if (lum_size != 0) {
448                 struct lov_mds_md *comp_md = lmmk;
449
450                 /*
451                  * Legacy app (ADIO for instance) treats the layout as V1/V3
452                  * blindly, we'd return a reasonable V1/V3 for them.
453                  */
454                 if (lmmk->lmm_magic == LOV_MAGIC_COMP_V1) {
455                         struct lov_comp_md_v1 *comp_v1;
456                         struct cl_object *cl_obj;
457                         struct cl_attr attr;
458                         int i;
459
460                         attr.cat_size = 0;
461                         cl_obj = cl_object_top(&obj->lo_cl);
462                         cl_object_attr_lock(cl_obj);
463                         cl_object_attr_get(env, cl_obj, &attr);
464                         cl_object_attr_unlock(cl_obj);
465
466                         /*
467                          * return the last instantiated component if file size
468                          * is non-zero, otherwise, return the last component.
469                          */
470                         comp_v1 = (struct lov_comp_md_v1 *)lmmk;
471                         i = attr.cat_size == 0 ? comp_v1->lcm_entry_count : 0;
472                         for (; i < comp_v1->lcm_entry_count; i++) {
473                                 if (!(comp_v1->lcm_entries[i].lcme_flags &
474                                                 LCME_FL_INIT))
475                                         break;
476                         }
477                         if (i > 0)
478                                 i--;
479                         comp_md = (struct lov_mds_md *)((char *)comp_v1 +
480                                         comp_v1->lcm_entries[i].lcme_offset);
481                         lum_size = comp_v1->lcm_entries[i].lcme_size;
482                 }
483
484                 lmm = comp_md;
485                 lmm_size = min(lum_size, lmmk_size);
486         } else {
487                 lmm = lmmk;
488                 lmm_size = lmmk_size;
489         }
490
491         /**
492          * User specified limited buffer size, usually the buffer is
493          * from ll_lov_setstripe(), and the buffer can only hold basic
494          * layout template info.
495          */
496         if (size == 0 || size > lmm_size)
497                 size = lmm_size;
498         if (copy_to_user(lump, lmm, size))
499                 GOTO(out_free, rc = -EFAULT);
500
501 out_free:
502         OBD_FREE_LARGE(lmmk, lmmk_size);
503 out:
504         RETURN(rc);
505 }