Whamcloud - gitweb
LU-13440 lmv: add default LMV inherit depth
[fs/lustre-release.git] / lustre / include / lustre_lmv.h
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2014, 2016, Intel Corporation.
24  */
25 /*
26  * lustre/include/lustre_lmv.h
27  *
28  * Lustre LMV structures and functions.
29  *
30  * Author: Di Wang <di.wang@intel.com>
31  */
32
33 #ifndef _LUSTRE_LMV_H
34 #define _LUSTRE_LMV_H
35 #include <uapi/linux/lustre/lustre_idl.h>
36
37 struct lmv_oinfo {
38         struct lu_fid   lmo_fid;
39         u32             lmo_mds;
40         struct inode    *lmo_root;
41 };
42
43 struct lmv_stripe_md {
44         __u32   lsm_md_magic;
45         __u32   lsm_md_stripe_count;
46         __u32   lsm_md_master_mdt_index;
47         __u32   lsm_md_hash_type;
48         __u8    lsm_md_max_inherit;
49         __u8    lsm_md_max_inherit_rr;
50         __u32   lsm_md_layout_version;
51         __u32   lsm_md_migrate_offset;
52         __u32   lsm_md_migrate_hash;
53         __u32   lsm_md_default_count;
54         __u32   lsm_md_default_index;
55         char    lsm_md_pool_name[LOV_MAXPOOLNAME + 1];
56         struct lmv_oinfo lsm_md_oinfo[0];
57 };
58
59 static inline bool lmv_dir_striped(const struct lmv_stripe_md *lsm)
60 {
61         return lsm && lsm->lsm_md_magic == LMV_MAGIC;
62 }
63
64 static inline bool lmv_dir_foreign(const struct lmv_stripe_md *lsm)
65 {
66         return lsm && lsm->lsm_md_magic == LMV_MAGIC_FOREIGN;
67 }
68
69 static inline bool lmv_dir_layout_changing(const struct lmv_stripe_md *lsm)
70 {
71         return lmv_dir_striped(lsm) &&
72                lmv_hash_is_layout_changing(lsm->lsm_md_hash_type);
73 }
74
75 static inline bool lmv_dir_bad_hash(const struct lmv_stripe_md *lsm)
76 {
77         if (!lmv_dir_striped(lsm))
78                 return false;
79
80         if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_BAD_TYPE)
81                 return true;
82
83         return !lmv_is_known_hash_type(lsm->lsm_md_hash_type);
84 }
85
86 static inline bool
87 lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
88 {
89         __u32 idx;
90
91         if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
92             lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
93             lsm1->lsm_md_master_mdt_index !=
94                                 lsm2->lsm_md_master_mdt_index ||
95             lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
96             lsm1->lsm_md_layout_version !=
97                                 lsm2->lsm_md_layout_version ||
98             lsm1->lsm_md_migrate_offset !=
99                                 lsm2->lsm_md_migrate_offset ||
100             lsm1->lsm_md_migrate_hash !=
101                                 lsm2->lsm_md_migrate_hash ||
102             strncmp(lsm1->lsm_md_pool_name, lsm2->lsm_md_pool_name,
103                     sizeof(lsm1->lsm_md_pool_name)) != 0)
104                 return false;
105
106         if (lmv_dir_striped(lsm1)) {
107                 for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
108                         if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid,
109                                        &lsm2->lsm_md_oinfo[idx].lmo_fid))
110                                 return false;
111                 }
112         }
113
114         return true;
115 }
116
117 static inline void lsm_md_dump(int mask, const struct lmv_stripe_md *lsm)
118 {
119         int i;
120
121         /* If lsm_md_magic == LMV_MAGIC_FOREIGN pool_name may not be a null
122          * terminated string so only print LOV_MAXPOOLNAME bytes.
123          */
124         CDEBUG(mask,
125                "magic %#x stripe count %d master mdt %d hash type %#x max inherit %hhu version %d migrate offset %d migrate hash %#x pool %.*s\n",
126                lsm->lsm_md_magic, lsm->lsm_md_stripe_count,
127                lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type,
128                lsm->lsm_md_max_inherit, lsm->lsm_md_layout_version,
129                lsm->lsm_md_migrate_offset, lsm->lsm_md_migrate_hash,
130                LOV_MAXPOOLNAME, lsm->lsm_md_pool_name);
131
132         if (!lmv_dir_striped(lsm))
133                 return;
134
135         for (i = 0; i < lsm->lsm_md_stripe_count; i++)
136                 CDEBUG(mask, "stripe[%d] "DFID"\n",
137                        i, PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
138 }
139
140 union lmv_mds_md;
141
142 void lmv_free_memmd(struct lmv_stripe_md *lsm);
143
144 static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
145                                   const struct lmv_mds_md_v1 *lmv_src)
146 {
147         __u32 i;
148
149         lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic);
150         lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count);
151         lmv_dst->lmv_master_mdt_index =
152                                 le32_to_cpu(lmv_src->lmv_master_mdt_index);
153         lmv_dst->lmv_hash_type = le32_to_cpu(lmv_src->lmv_hash_type);
154         lmv_dst->lmv_layout_version = le32_to_cpu(lmv_src->lmv_layout_version);
155         if (lmv_src->lmv_stripe_count > LMV_MAX_STRIPE_COUNT)
156                 return;
157         for (i = 0; i < lmv_src->lmv_stripe_count; i++)
158                 fid_le_to_cpu(&lmv_dst->lmv_stripe_fids[i],
159                               &lmv_src->lmv_stripe_fids[i]);
160 }
161
162 static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst,
163                                  const union lmv_mds_md *lmv_src)
164 {
165         switch (le32_to_cpu(lmv_src->lmv_magic)) {
166         case LMV_MAGIC_V1:
167                 lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1);
168                 break;
169         default:
170                 break;
171         }
172 }
173
174 /* This hash is only for testing purpose */
175 static inline unsigned int
176 lmv_hash_all_chars(unsigned int count, const char *name, int namelen)
177 {
178         unsigned int c = 0;
179         const unsigned char *p = (const unsigned char *)name;
180
181         while (--namelen >= 0)
182                 c += p[namelen];
183
184         c = c % count;
185
186         return c;
187 }
188
189 static inline unsigned int
190 lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
191 {
192         __u64 hash;
193
194         hash = lustre_hash_fnv_1a_64(name, namelen);
195
196         return do_div(hash, count);
197 }
198
199 /*
200  * Robert Jenkins' function for mixing 32-bit values
201  * http://burtleburtle.net/bob/hash/evahash.html
202  * a, b = random bits, c = input and output
203  *
204  * Mixing inputs to generate an evenly distributed hash.
205  */
206 #define crush_hashmix(a, b, c)                          \
207 do {                                                    \
208         a = a - b;  a = a - c;  a = a ^ (c >> 13);      \
209         b = b - c;  b = b - a;  b = b ^ (a << 8);       \
210         c = c - a;  c = c - b;  c = c ^ (b >> 13);      \
211         a = a - b;  a = a - c;  a = a ^ (c >> 12);      \
212         b = b - c;  b = b - a;  b = b ^ (a << 16);      \
213         c = c - a;  c = c - b;  c = c ^ (b >> 5);       \
214         a = a - b;  a = a - c;  a = a ^ (c >> 3);       \
215         b = b - c;  b = b - a;  b = b ^ (a << 10);      \
216         c = c - a;  c = c - b;  c = c ^ (b >> 15);      \
217 } while (0)
218
219 #define crush_hash_seed 1315423911
220
221 static inline __u32 crush_hash(__u32 a, __u32 b)
222 {
223         __u32 hash = crush_hash_seed ^ a ^ b;
224         __u32 x = 231232;
225         __u32 y = 1232;
226
227         crush_hashmix(a, b, hash);
228         crush_hashmix(x, a, hash);
229         crush_hashmix(b, y, hash);
230
231         return hash;
232 }
233
234 /* refer to https://github.com/ceph/ceph/blob/master/src/crush/hash.c and
235  * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf for details of CRUSH
236  * algorithm.
237  */
238 static inline unsigned int
239 lmv_hash_crush(unsigned int count, const char *name, int namelen)
240 {
241         unsigned long long straw;
242         unsigned long long highest_straw = 0;
243         unsigned int pg_id;
244         unsigned int idx = 0;
245         int i;
246
247         /* put temp and backup file on the same MDT where target is located.
248          * temporary file naming rule:
249          * 1. rsync: .<target>.XXXXXX
250          * 2. dstripe: <target>.XXXXXXXX
251          */
252         if (lu_name_is_temp_file(name, namelen, true, 6)) {
253                 name++;
254                 namelen -= 8;
255         } else if (lu_name_is_temp_file(name, namelen, false, 8)) {
256                 namelen -= 9;
257         } else if (lu_name_is_backup_file(name, namelen, &i)) {
258                 LASSERT(i < namelen);
259                 namelen -= i;
260         }
261
262         pg_id = lmv_hash_fnv1a(LMV_CRUSH_PG_COUNT, name, namelen);
263
264         /* distribute PG among all stripes pseudo-randomly, so they are almost
265          * evenly distributed, and when stripe count changes, only (delta /
266          * total) sub files need to be moved, herein 'delta' is added or removed
267          * stripe count, 'total' is total stripe count before change for
268          * removal, or count after change for addition.
269          */
270         for (i = 0; i < count; i++) {
271                 straw = crush_hash(pg_id, i);
272                 if (straw > highest_straw) {
273                         highest_straw = straw;
274                         idx = i;
275                 }
276         }
277         LASSERT(idx < count);
278
279         return idx;
280 }
281
282 /* directory layout may change in three ways:
283  * 1. directory migration, in its LMV source stripes are appended after
284  *    target stripes, \a migrate_hash is source hash type, \a migrate_offset is
285  *    target stripe count,
286  * 2. directory split, \a migrate_hash is hash type before split,
287  *    \a migrate_offset is stripe count before split.
288  * 3. directory merge, \a migrate_hash is hash type after merge,
289  *    \a migrate_offset is stripe count after merge.
290  */
291 static inline int
292 __lmv_name_to_stripe_index(__u32 hash_type, __u32 stripe_count,
293                            __u32 migrate_hash, __u32 migrate_offset,
294                            const char *name, int namelen, bool new_layout)
295 {
296         __u32 saved_hash = hash_type;
297         __u32 saved_count = stripe_count;
298         int stripe_index = 0;
299
300         LASSERT(namelen > 0);
301         LASSERT(stripe_count > 0);
302
303         if (lmv_hash_is_splitting(hash_type)) {
304                 if (!new_layout) {
305                         hash_type = migrate_hash;
306                         stripe_count = migrate_offset;
307                 }
308         } else if (lmv_hash_is_merging(hash_type)) {
309                 if (new_layout) {
310                         hash_type = migrate_hash;
311                         stripe_count = migrate_offset;
312                 }
313         } else if (lmv_hash_is_migrating(hash_type)) {
314                 if (new_layout) {
315                         stripe_count = migrate_offset;
316                 } else {
317                         hash_type = migrate_hash;
318                         stripe_count -= migrate_offset;
319                 }
320         }
321
322         if (stripe_count > 1) {
323                 switch (hash_type & LMV_HASH_TYPE_MASK) {
324                 case LMV_HASH_TYPE_ALL_CHARS:
325                         stripe_index = lmv_hash_all_chars(stripe_count, name,
326                                                           namelen);
327                         break;
328                 case LMV_HASH_TYPE_FNV_1A_64:
329                         stripe_index = lmv_hash_fnv1a(stripe_count, name,
330                                                       namelen);
331                         break;
332                 case LMV_HASH_TYPE_CRUSH:
333                         stripe_index = lmv_hash_crush(stripe_count, name,
334                                                       namelen);
335                         break;
336                 default:
337                         return -EBADFD;
338                 }
339         }
340
341         LASSERT(stripe_index < stripe_count);
342
343         if (!new_layout && lmv_hash_is_migrating(saved_hash))
344                 stripe_index += migrate_offset;
345
346         LASSERT(stripe_index < saved_count);
347
348         CDEBUG(D_INFO, "name %.*s hash=%#x/%#x idx=%d/%u/%u under %s layout\n",
349                namelen, name, saved_hash, migrate_hash, stripe_index,
350                saved_count, migrate_offset, new_layout ? "new" : "old");
351
352         return stripe_index;
353 }
354
355 static inline int lmv_name_to_stripe_index(struct lmv_mds_md_v1 *lmv,
356                                            const char *name, int namelen)
357 {
358         if (lmv->lmv_magic == LMV_MAGIC_V1)
359                 return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
360                                                   lmv->lmv_stripe_count,
361                                                   lmv->lmv_migrate_hash,
362                                                   lmv->lmv_migrate_offset,
363                                                   name, namelen, true);
364
365         if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1))
366                 return __lmv_name_to_stripe_index(
367                                         le32_to_cpu(lmv->lmv_hash_type),
368                                         le32_to_cpu(lmv->lmv_stripe_count),
369                                         le32_to_cpu(lmv->lmv_migrate_hash),
370                                         le32_to_cpu(lmv->lmv_migrate_offset),
371                                         name, namelen, true);
372
373         return -EINVAL;
374 }
375
376 static inline int lmv_name_to_stripe_index_old(struct lmv_mds_md_v1 *lmv,
377                                                const char *name, int namelen)
378 {
379         if (lmv->lmv_magic == LMV_MAGIC_V1 ||
380             lmv->lmv_magic == LMV_MAGIC_STRIPE)
381                 return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
382                                                   lmv->lmv_stripe_count,
383                                                   lmv->lmv_migrate_hash,
384                                                   lmv->lmv_migrate_offset,
385                                                   name, namelen, false);
386
387         if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
388             lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
389                 return __lmv_name_to_stripe_index(
390                                         le32_to_cpu(lmv->lmv_hash_type),
391                                         le32_to_cpu(lmv->lmv_stripe_count),
392                                         le32_to_cpu(lmv->lmv_migrate_hash),
393                                         le32_to_cpu(lmv->lmv_migrate_offset),
394                                         name, namelen, false);
395
396         return -EINVAL;
397 }
398
399 static inline bool lmv_user_magic_supported(__u32 lum_magic)
400 {
401         return lum_magic == LMV_USER_MAGIC ||
402                lum_magic == LMV_USER_MAGIC_SPECIFIC ||
403                lum_magic == LMV_MAGIC_FOREIGN;
404 }
405
406 /* master LMV is sane */
407 static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv)
408 {
409         if (!lmv)
410                 return false;
411
412         if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1)
413                 goto insane;
414
415         if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
416                 goto insane;
417
418         if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
419                 goto insane;
420
421         return true;
422 insane:
423         LMV_DEBUG(D_ERROR, lmv, "insane");
424         return false;
425 }
426
427 /* LMV can be either master or stripe LMV */
428 static inline bool lmv_is_sane2(const struct lmv_mds_md_v1 *lmv)
429 {
430         if (!lmv)
431                 return false;
432
433         if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1 &&
434             le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_STRIPE)
435                 goto insane;
436
437         if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
438                 goto insane;
439
440         if (!lmv_is_known_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
441                 goto insane;
442
443         return true;
444 insane:
445         LMV_DEBUG(D_ERROR, lmv, "insane");
446         return false;
447 }
448
449 static inline bool lmv_is_splitting(const struct lmv_mds_md_v1 *lmv)
450 {
451         if (!lmv_is_sane2(lmv))
452                 return false;
453
454         return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type));
455 }
456
457 static inline bool lmv_is_merging(const struct lmv_mds_md_v1 *lmv)
458 {
459         if (!lmv_is_sane2(lmv))
460                 return false;
461
462         return lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
463 }
464
465 static inline bool lmv_is_migrating(const struct lmv_mds_md_v1 *lmv)
466 {
467         if (!lmv_is_sane(lmv))
468                 return false;
469
470         return lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
471 }
472
473 static inline bool lmv_is_restriping(const struct lmv_mds_md_v1 *lmv)
474 {
475         if (!lmv_is_sane2(lmv))
476                 return false;
477
478         return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
479                lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
480 }
481
482 static inline bool lmv_is_layout_changing(const struct lmv_mds_md_v1 *lmv)
483 {
484         if (!lmv_is_sane2(lmv))
485                 return false;
486
487         return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
488                lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type)) ||
489                lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
490 }
491
492 #endif