Whamcloud - gitweb
New tag 2.15.63
[fs/lustre-release.git] / lustre / include / lustre_lmv.h
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License version 2 for more details.  A copy is
14  * included in the COPYING file that accompanied this code.
15
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2014, 2016, Intel Corporation.
24  */
25 /*
26  * lustre/include/lustre_lmv.h
27  *
28  * Lustre LMV structures and functions.
29  *
30  * Author: Di Wang <di.wang@intel.com>
31  */
32
33 #ifndef _LUSTRE_LMV_H
34 #define _LUSTRE_LMV_H
35 #include <uapi/linux/lustre/lustre_idl.h>
36
37 struct lmv_oinfo {
38         struct lu_fid   lmo_fid;
39         u32             lmo_mds;
40         struct inode    *lmo_root;
41 };
42
43 struct lmv_stripe_md {
44         __u32   lsm_md_magic;
45         __u32   lsm_md_stripe_count;
46         __u32   lsm_md_master_mdt_index;
47         __u32   lsm_md_hash_type;
48         __u8    lsm_md_max_inherit;
49         __u8    lsm_md_max_inherit_rr;
50         __u32   lsm_md_layout_version;
51         __u32   lsm_md_migrate_offset;
52         __u32   lsm_md_migrate_hash;
53         char    lsm_md_pool_name[LOV_MAXPOOLNAME + 1];
54         struct lmv_oinfo lsm_md_oinfo[0];
55 };
56
57 struct lmv_stripe_object {
58         atomic_t                        lso_refs;
59         union {
60                 struct lmv_stripe_md    lso_lsm;
61                 struct lmv_foreign_md   lso_lfm;
62         };
63 };
64
65 static inline bool lmv_dir_striped(const struct lmv_stripe_object *lso)
66 {
67         return lso && lso->lso_lsm.lsm_md_magic == LMV_MAGIC;
68 }
69
70 static inline bool lmv_dir_foreign(const struct lmv_stripe_object *lso)
71 {
72         return lso && lso->lso_lsm.lsm_md_magic == LMV_MAGIC_FOREIGN;
73 }
74
75 static inline bool lmv_dir_layout_changing(const struct lmv_stripe_object *lso)
76 {
77         return lmv_dir_striped(lso) &&
78                lmv_hash_is_layout_changing(lso->lso_lsm.lsm_md_hash_type);
79 }
80
81 static inline bool lmv_dir_bad_hash(const struct lmv_stripe_object *lso)
82 {
83         if (!lmv_dir_striped(lso))
84                 return false;
85
86         if (lso->lso_lsm.lsm_md_hash_type & LMV_HASH_FLAG_BAD_TYPE)
87                 return true;
88
89         return !lmv_is_known_hash_type(lso->lso_lsm.lsm_md_hash_type);
90 }
91
92 static inline __u8 lmv_inherit_next(__u8 inherit)
93 {
94         if (inherit == LMV_INHERIT_END || inherit == LMV_INHERIT_NONE)
95                 return LMV_INHERIT_NONE;
96
97         if (inherit == LMV_INHERIT_UNLIMITED || inherit > LMV_INHERIT_MAX)
98                 return inherit;
99
100         return inherit - 1;
101 }
102
103 static inline __u8 lmv_inherit_rr_next(__u8 inherit_rr)
104 {
105         if (inherit_rr == LMV_INHERIT_RR_NONE ||
106             inherit_rr == LMV_INHERIT_RR_UNLIMITED ||
107             inherit_rr > LMV_INHERIT_RR_MAX)
108                 return inherit_rr;
109
110         return inherit_rr - 1;
111 }
112
113 static inline bool lmv_is_inheritable(__u8 inherit)
114 {
115         return inherit == LMV_INHERIT_UNLIMITED ||
116                (inherit > LMV_INHERIT_END && inherit <= LMV_INHERIT_MAX);
117 }
118
119 static inline bool lsm_md_eq(const struct lmv_stripe_object *lso1,
120                              const struct lmv_stripe_object *lso2)
121 {
122         const struct lmv_stripe_md *lsm1 = &lso1->lso_lsm;
123         const struct lmv_stripe_md *lsm2 = &lso2->lso_lsm;
124         __u32 idx;
125
126         if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
127             lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
128             lsm1->lsm_md_master_mdt_index !=
129                                 lsm2->lsm_md_master_mdt_index ||
130             lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
131             lsm1->lsm_md_max_inherit != lsm2->lsm_md_max_inherit ||
132             lsm1->lsm_md_max_inherit_rr != lsm2->lsm_md_max_inherit_rr ||
133             lsm1->lsm_md_layout_version !=
134                                 lsm2->lsm_md_layout_version ||
135             lsm1->lsm_md_migrate_offset !=
136                                 lsm2->lsm_md_migrate_offset ||
137             lsm1->lsm_md_migrate_hash !=
138                                 lsm2->lsm_md_migrate_hash ||
139             strncmp(lsm1->lsm_md_pool_name, lsm2->lsm_md_pool_name,
140                     sizeof(lsm1->lsm_md_pool_name)) != 0)
141                 return false;
142
143         if (lmv_dir_striped(lso1)) {
144                 for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
145                         if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid,
146                                        &lsm2->lsm_md_oinfo[idx].lmo_fid))
147                                 return false;
148                 }
149         } else if (lsm1->lsm_md_magic == LMV_USER_MAGIC_SPECIFIC) {
150                 for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
151                         if (lsm1->lsm_md_oinfo[idx].lmo_mds !=
152                             lsm2->lsm_md_oinfo[idx].lmo_mds)
153                                 return false;
154                 }
155         }
156
157         return true;
158 }
159
160 static inline void
161 lmv_stripe_object_dump(int mask, const struct lmv_stripe_object *lsmo)
162 {
163         const struct lmv_stripe_md *lsm = &lsmo->lso_lsm;
164         int i;
165
166         CDEBUG(mask,
167                "dump LMV: refs %u magic=%#x count=%u index=%u hash=%s:%#x max_inherit=%hhu max_inherit_rr=%hhu version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",
168                lsm->lsm_md_magic, atomic_read(&lsmo->lso_refs),
169                lsm->lsm_md_stripe_count, lsm->lsm_md_master_mdt_index,
170                lmv_is_known_hash_type(lsm->lsm_md_hash_type) ?
171                 mdt_hash_name[lsm->lsm_md_hash_type & LMV_HASH_TYPE_MASK] :
172                 "invalid", lsm->lsm_md_hash_type,
173                lsm->lsm_md_max_inherit, lsm->lsm_md_max_inherit_rr,
174                lsm->lsm_md_layout_version, lsm->lsm_md_migrate_offset,
175                lmv_is_known_hash_type(lsm->lsm_md_migrate_hash) ?
176                 mdt_hash_name[lsm->lsm_md_migrate_hash & LMV_HASH_TYPE_MASK] :
177                 "invalid", lsm->lsm_md_migrate_hash,
178                LOV_MAXPOOLNAME, lsm->lsm_md_pool_name);
179
180         if (!lmv_dir_striped(lsmo))
181                 return;
182
183         for (i = 0; i < lsm->lsm_md_stripe_count; i++)
184                 CDEBUG_LIMIT(mask, "stripe[%d] "DFID"\n",
185                              i, PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
186 }
187
188 static inline bool
189 lmv_object_inherited(const struct lmv_stripe_object *plsm,
190                      const struct lmv_stripe_object *clsm)
191 {
192         return plsm && clsm &&
193                plsm->lso_lsm.lsm_md_magic ==
194                         clsm->lso_lsm.lsm_md_magic &&
195                plsm->lso_lsm.lsm_md_stripe_count ==
196                         clsm->lso_lsm.lsm_md_stripe_count &&
197                plsm->lso_lsm.lsm_md_master_mdt_index ==
198                         clsm->lso_lsm.lsm_md_master_mdt_index &&
199                plsm->lso_lsm.lsm_md_hash_type ==
200                         clsm->lso_lsm.lsm_md_hash_type &&
201                lmv_inherit_next(plsm->lso_lsm.lsm_md_max_inherit) ==
202                         clsm->lso_lsm.lsm_md_max_inherit &&
203                lmv_inherit_rr_next(plsm->lso_lsm.lsm_md_max_inherit_rr) ==
204                         clsm->lso_lsm.lsm_md_max_inherit_rr;
205 }
206
207 union lmv_mds_md;
208
209 struct lmv_stripe_object *lmv_stripe_object_alloc(__u32 magic,
210                                                   const union lmv_mds_md *lmm,
211                                                   size_t lmm_size);
212
213 void lmv_stripe_object_put(struct lmv_stripe_object **lsm_obj);
214
215 struct lmv_stripe_object *
216         lmv_stripe_object_get(struct lmv_stripe_object *lsm_obj);
217
218 static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
219                                   const struct lmv_mds_md_v1 *lmv_src)
220 {
221         __u32 i;
222
223         lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic);
224         lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count);
225         lmv_dst->lmv_master_mdt_index =
226                                 le32_to_cpu(lmv_src->lmv_master_mdt_index);
227         lmv_dst->lmv_hash_type = le32_to_cpu(lmv_src->lmv_hash_type);
228         lmv_dst->lmv_layout_version = le32_to_cpu(lmv_src->lmv_layout_version);
229         if (lmv_src->lmv_stripe_count > LMV_MAX_STRIPE_COUNT)
230                 return;
231         for (i = 0; i < lmv_src->lmv_stripe_count; i++)
232                 fid_le_to_cpu(&lmv_dst->lmv_stripe_fids[i],
233                               &lmv_src->lmv_stripe_fids[i]);
234 }
235
236 static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst,
237                                  const union lmv_mds_md *lmv_src)
238 {
239         switch (le32_to_cpu(lmv_src->lmv_magic)) {
240         case LMV_MAGIC_V1:
241                 lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1);
242                 break;
243         default:
244                 break;
245         }
246 }
247
248 /* This hash is only for testing purpose */
249 static inline unsigned int
250 lmv_hash_all_chars(unsigned int count, const char *name, int namelen)
251 {
252         unsigned int c = 0;
253         const unsigned char *p = (const unsigned char *)name;
254
255         while (--namelen >= 0)
256                 c += p[namelen];
257
258         c = c % count;
259
260         return c;
261 }
262
263 static inline unsigned int
264 lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
265 {
266         __u64 hash;
267
268         hash = lustre_hash_fnv_1a_64(name, namelen);
269
270         return do_div(hash, count);
271 }
272
273 /*
274  * Robert Jenkins' function for mixing 32-bit values
275  * http://burtleburtle.net/bob/hash/evahash.html
276  * a, b = random bits, c = input and output
277  *
278  * Mixing inputs to generate an evenly distributed hash.
279  */
280 #define crush_hashmix(a, b, c)                          \
281 do {                                                    \
282         a = a - b;  a = a - c;  a = a ^ (c >> 13);      \
283         b = b - c;  b = b - a;  b = b ^ (a << 8);       \
284         c = c - a;  c = c - b;  c = c ^ (b >> 13);      \
285         a = a - b;  a = a - c;  a = a ^ (c >> 12);      \
286         b = b - c;  b = b - a;  b = b ^ (a << 16);      \
287         c = c - a;  c = c - b;  c = c ^ (b >> 5);       \
288         a = a - b;  a = a - c;  a = a ^ (c >> 3);       \
289         b = b - c;  b = b - a;  b = b ^ (a << 10);      \
290         c = c - a;  c = c - b;  c = c ^ (b >> 15);      \
291 } while (0)
292
293 #define crush_hash_seed 1315423911
294
295 static inline __u32 crush_hash(__u32 a, __u32 b)
296 {
297         __u32 hash = crush_hash_seed ^ a ^ b;
298         __u32 x = 231232;
299         __u32 y = 1232;
300
301         crush_hashmix(a, b, hash);
302         crush_hashmix(x, a, hash);
303         crush_hashmix(b, y, hash);
304
305         return hash;
306 }
307
308 /* refer to https://github.com/ceph/ceph/blob/master/src/crush/hash.c and
309  * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf for details of CRUSH
310  * algorithm.
311  */
312 static inline unsigned int
313 lmv_hash_crush(unsigned int count, const char *name, int namelen, bool crush2)
314 {
315         unsigned long long straw;
316         unsigned long long highest_straw = 0;
317         unsigned int pg_id;
318         unsigned int idx = 0;
319         int i;
320
321         /* put temp and backup file on the same MDT where target is located.
322          * temporary file naming rule:
323          * 1. rsync: .<target>.XXXXXX
324          * 2. dstripe: <target>.XXXXXXXX
325          */
326         if (lu_name_is_temp_file(name, namelen, true, 6, crush2)) {
327                 name++;
328                 namelen -= 8;
329         } else if (lu_name_is_temp_file(name, namelen, false, 8, crush2)) {
330                 namelen -= 9;
331         } else if (lu_name_is_backup_file(name, namelen, &i)) {
332                 LASSERT(i < namelen);
333                 namelen -= i;
334         }
335
336         pg_id = lmv_hash_fnv1a(LMV_CRUSH_PG_COUNT, name, namelen);
337
338         /* distribute PG among all stripes pseudo-randomly, so they are almost
339          * evenly distributed, and when stripe count changes, only (delta /
340          * total) sub files need to be moved, herein 'delta' is added or removed
341          * stripe count, 'total' is total stripe count before change for
342          * removal, or count after change for addition.
343          */
344         for (i = 0; i < count; i++) {
345                 straw = crush_hash(pg_id, i);
346                 if (straw > highest_straw) {
347                         highest_straw = straw;
348                         idx = i;
349                 }
350         }
351         LASSERT(idx < count);
352
353         return idx;
354 }
355
356 /* directory layout may change in three ways:
357  * 1. directory migration, in its LMV source stripes are appended after
358  *    target stripes, \a migrate_hash is source hash type, \a migrate_offset is
359  *    target stripe count,
360  * 2. directory split, \a migrate_hash is hash type before split,
361  *    \a migrate_offset is stripe count before split.
362  * 3. directory merge, \a migrate_hash is hash type after merge,
363  *    \a migrate_offset is stripe count after merge.
364  */
365 static inline int
366 __lmv_name_to_stripe_index(__u32 hash_type, __u32 stripe_count,
367                            __u32 migrate_hash, __u32 migrate_offset,
368                            const char *name, int namelen, bool new_layout)
369 {
370         __u32 saved_hash = hash_type;
371         __u32 saved_count = stripe_count;
372         int stripe_index = 0;
373
374         LASSERT(namelen > 0);
375         LASSERT(stripe_count > 0);
376
377         if (lmv_hash_is_splitting(hash_type)) {
378                 if (!new_layout) {
379                         hash_type = migrate_hash;
380                         stripe_count = migrate_offset;
381                 }
382         } else if (lmv_hash_is_merging(hash_type)) {
383                 if (new_layout) {
384                         hash_type = migrate_hash;
385                         stripe_count = migrate_offset;
386                 }
387         } else if (lmv_hash_is_migrating(hash_type)) {
388                 if (new_layout) {
389                         stripe_count = migrate_offset;
390                 } else {
391                         hash_type = migrate_hash;
392                         stripe_count -= migrate_offset;
393                 }
394         }
395
396         if (stripe_count > 1) {
397                 switch (hash_type & LMV_HASH_TYPE_MASK) {
398                 case LMV_HASH_TYPE_ALL_CHARS:
399                         stripe_index = lmv_hash_all_chars(stripe_count, name,
400                                                           namelen);
401                         break;
402                 case LMV_HASH_TYPE_FNV_1A_64:
403                         stripe_index = lmv_hash_fnv1a(stripe_count, name,
404                                                       namelen);
405                         break;
406                 case LMV_HASH_TYPE_CRUSH:
407                         stripe_index = lmv_hash_crush(stripe_count, name,
408                                                       namelen, false);
409                         break;
410                 case LMV_HASH_TYPE_CRUSH2:
411                         stripe_index = lmv_hash_crush(stripe_count, name,
412                                                       namelen, true);
413                         break;
414                 default:
415                         return -EBADFD;
416                 }
417         }
418
419         LASSERT(stripe_index < stripe_count);
420
421         if (!new_layout && lmv_hash_is_migrating(saved_hash))
422                 stripe_index += migrate_offset;
423
424         LASSERT(stripe_index < saved_count);
425
426         CDEBUG(D_INFO, "name %.*s hash=%#x/%#x idx=%d/%u/%u under %s layout\n",
427                namelen, name, saved_hash, migrate_hash, stripe_index,
428                saved_count, migrate_offset, new_layout ? "new" : "old");
429
430         return stripe_index;
431 }
432
433 static inline int lmv_name_to_stripe_index(struct lmv_mds_md_v1 *lmv,
434                                            const char *name, int namelen)
435 {
436         if (lmv->lmv_magic == LMV_MAGIC_V1 ||
437             lmv->lmv_magic == LMV_MAGIC_STRIPE)
438                 return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
439                                                   lmv->lmv_stripe_count,
440                                                   lmv->lmv_migrate_hash,
441                                                   lmv->lmv_migrate_offset,
442                                                   name, namelen, true);
443
444         if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
445             lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
446                 return __lmv_name_to_stripe_index(
447                                         le32_to_cpu(lmv->lmv_hash_type),
448                                         le32_to_cpu(lmv->lmv_stripe_count),
449                                         le32_to_cpu(lmv->lmv_migrate_hash),
450                                         le32_to_cpu(lmv->lmv_migrate_offset),
451                                         name, namelen, true);
452
453         return -EINVAL;
454 }
455
456 static inline int lmv_name_to_stripe_index_old(struct lmv_mds_md_v1 *lmv,
457                                                const char *name, int namelen)
458 {
459         if (lmv->lmv_magic == LMV_MAGIC_V1 ||
460             lmv->lmv_magic == LMV_MAGIC_STRIPE)
461                 return __lmv_name_to_stripe_index(lmv->lmv_hash_type,
462                                                   lmv->lmv_stripe_count,
463                                                   lmv->lmv_migrate_hash,
464                                                   lmv->lmv_migrate_offset,
465                                                   name, namelen, false);
466
467         if (lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_V1) ||
468             lmv->lmv_magic == cpu_to_le32(LMV_MAGIC_STRIPE))
469                 return __lmv_name_to_stripe_index(
470                                         le32_to_cpu(lmv->lmv_hash_type),
471                                         le32_to_cpu(lmv->lmv_stripe_count),
472                                         le32_to_cpu(lmv->lmv_migrate_hash),
473                                         le32_to_cpu(lmv->lmv_migrate_offset),
474                                         name, namelen, false);
475
476         return -EINVAL;
477 }
478
479 static inline bool lmv_user_magic_supported(__u32 lum_magic)
480 {
481         return lum_magic == LMV_USER_MAGIC ||
482                lum_magic == LMV_USER_MAGIC_SPECIFIC ||
483                lum_magic == LMV_MAGIC_FOREIGN;
484 }
485
486 #define LMV_DEBUG(mask, lmv, msg)                                             \
487         CDEBUG_LIMIT(mask,                                                    \
488                "%s LMV: magic=%#x count=%u index=%u hash=%s:%#x version=%u migrate_offset=%u migrate_hash=%s:%x pool=%.*s\n",\
489                msg, (lmv)->lmv_magic, (lmv)->lmv_stripe_count,                \
490                (lmv)->lmv_master_mdt_index,                                   \
491                lmv_is_known_hash_type((lmv)->lmv_hash_type) ?                 \
492                 mdt_hash_name[(lmv)->lmv_hash_type & LMV_HASH_TYPE_MASK] :    \
493                 "invalid", (lmv)->lmv_hash_type,                              \
494                (lmv)->lmv_layout_version, (lmv)->lmv_migrate_offset,          \
495                lmv_is_known_hash_type((lmv)->lmv_migrate_hash) ?              \
496                 mdt_hash_name[(lmv)->lmv_migrate_hash & LMV_HASH_TYPE_MASK] : \
497                 "invalid", (lmv)->lmv_migrate_hash,                           \
498                LOV_MAXPOOLNAME, lmv->lmv_pool_name)
499
500 /* master LMV is sane */
501 static inline bool lmv_is_sane(const struct lmv_mds_md_v1 *lmv)
502 {
503         if (!lmv)
504                 return false;
505
506         if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1)
507                 goto insane;
508
509         if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
510                 goto insane;
511
512         if (!lmv_is_sane_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
513                 goto insane;
514
515         return true;
516 insane:
517         LMV_DEBUG(D_ERROR, lmv, "unknown layout");
518         return false;
519 }
520
521 /* LMV can be either master or stripe LMV */
522 static inline bool lmv_is_sane2(const struct lmv_mds_md_v1 *lmv)
523 {
524         if (!lmv)
525                 return false;
526
527         if (le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_V1 &&
528             le32_to_cpu(lmv->lmv_magic) != LMV_MAGIC_STRIPE)
529                 goto insane;
530
531         if (le32_to_cpu(lmv->lmv_stripe_count) == 0)
532                 goto insane;
533
534         if (!lmv_is_sane_hash_type(le32_to_cpu(lmv->lmv_hash_type)))
535                 goto insane;
536
537         return true;
538 insane:
539         LMV_DEBUG(D_ERROR, lmv, "unknown layout");
540         return false;
541 }
542
543 static inline bool lmv_is_splitting(const struct lmv_mds_md_v1 *lmv)
544 {
545         if (!lmv_is_sane2(lmv))
546                 return false;
547
548         return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type));
549 }
550
551 static inline bool lmv_is_merging(const struct lmv_mds_md_v1 *lmv)
552 {
553         if (!lmv_is_sane2(lmv))
554                 return false;
555
556         return lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
557 }
558
559 static inline bool lmv_is_migrating(const struct lmv_mds_md_v1 *lmv)
560 {
561         if (!lmv_is_sane(lmv))
562                 return false;
563
564         return lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
565 }
566
567 static inline bool lmv_is_restriping(const struct lmv_mds_md_v1 *lmv)
568 {
569         if (!lmv_is_sane2(lmv))
570                 return false;
571
572         return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
573                lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type));
574 }
575
576 static inline bool lmv_is_layout_changing(const struct lmv_mds_md_v1 *lmv)
577 {
578         if (!lmv_is_sane2(lmv))
579                 return false;
580
581         return lmv_hash_is_splitting(cpu_to_le32(lmv->lmv_hash_type)) ||
582                lmv_hash_is_merging(cpu_to_le32(lmv->lmv_hash_type)) ||
583                lmv_hash_is_migrating(cpu_to_le32(lmv->lmv_hash_type));
584 }
585
586 static inline bool lmv_is_fixed(const struct lmv_mds_md_v1 *lmv)
587 {
588         return cpu_to_le32(lmv->lmv_hash_type) & LMV_HASH_FLAG_FIXED;
589 }
590
591 #endif