4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License version 2 for more details. A copy is
14 * included in the COPYING file that accompanied this code.
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * lustre/lod/lod_lov.c
31 * A set of helpers to maintain Logical Object Volume (LOV)
32 * Extended Attribute (EA) and known OST targets
34 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37 #define DEBUG_SUBSYSTEM S_MDS
39 #include <obd_class.h>
40 #include <lustre_lfsck.h>
41 #include <lustre_lmv.h>
42 #include <lustre_swab.h>
44 #include "lod_internal.h"
47 * Increase reference count on the target table.
49 * Increase reference count on the target table usage to prevent racing with
50 * addition/deletion. Any function that expects the table to remain
51 * stationary must take a ref.
53 * \param[in] ltd target table (lod_ost_descs or lod_mdt_descs)
55 void lod_getref(struct lod_tgt_descs *ltd)
57 down_read(<d->ltd_rw_sem);
58 mutex_lock(<d->ltd_mutex);
60 mutex_unlock(<d->ltd_mutex);
64 * Decrease reference count on the target table.
66 * Companion of lod_getref() to release a reference on the target table.
67 * If this is the last reference and the OST entry was scheduled for deletion,
68 * the descriptor is removed from the table.
70 * \param[in] lod LOD device from which we release a reference
71 * \param[in] ltd target table (lod_ost_descs or lod_mdt_descs)
73 void lod_putref(struct lod_device *lod, struct lod_tgt_descs *ltd)
75 mutex_lock(<d->ltd_mutex);
77 if (ltd->ltd_refcount == 0 && ltd->ltd_death_row) {
78 struct lod_tgt_desc *tgt_desc, *tmp;
79 struct list_head kill;
82 CDEBUG(D_CONFIG, "destroying %d ltd desc\n",
85 INIT_LIST_HEAD(&kill);
87 cfs_foreach_bit(ltd->ltd_tgt_bitmap, idx) {
88 tgt_desc = LTD_TGT(ltd, idx);
91 if (!tgt_desc->ltd_reap)
94 list_add(&tgt_desc->ltd_kill, &kill);
95 LTD_TGT(ltd, idx) = NULL;
96 /*FIXME: only support ost pool for now */
97 if (ltd == &lod->lod_ost_descs) {
98 lod_ost_pool_remove(&lod->lod_pool_info, idx);
99 if (tgt_desc->ltd_active)
100 lod->lod_desc.ld_active_tgt_count--;
103 cfs_bitmap_clear(ltd->ltd_tgt_bitmap, idx);
104 ltd->ltd_death_row--;
106 mutex_unlock(<d->ltd_mutex);
107 up_read(<d->ltd_rw_sem);
109 list_for_each_entry_safe(tgt_desc, tmp, &kill, ltd_kill) {
111 list_del(&tgt_desc->ltd_kill);
112 if (ltd == &lod->lod_ost_descs) {
113 /* remove from QoS structures */
114 rc = qos_del_tgt(lod, tgt_desc);
116 CERROR("%s: qos_del_tgt(%s) failed:"
118 lod2obd(lod)->obd_name,
119 obd_uuid2str(&tgt_desc->ltd_uuid),
122 rc = obd_disconnect(tgt_desc->ltd_exp);
124 CERROR("%s: failed to disconnect %s: rc = %d\n",
125 lod2obd(lod)->obd_name,
126 obd_uuid2str(&tgt_desc->ltd_uuid), rc);
127 OBD_FREE_PTR(tgt_desc);
130 mutex_unlock(<d->ltd_mutex);
131 up_read(<d->ltd_rw_sem);
136 * Expand size of target table.
138 * When the target table is full, we have to extend the table. To do so,
139 * we allocate new memory with some reserve, move data from the old table
140 * to the new one and release memory consumed by the old table.
141 * Notice we take ltd_rw_sem exclusively to ensure atomic switch.
143 * \param[in] ltd target table
144 * \param[in] newsize new size of the table
146 * \retval 0 on success
147 * \retval -ENOMEM if reallocation failed
149 static int ltd_bitmap_resize(struct lod_tgt_descs *ltd, __u32 newsize)
151 struct cfs_bitmap *new_bitmap, *old_bitmap = NULL;
155 /* grab write reference on the lod. Relocating the array requires
156 * exclusive access */
158 down_write(<d->ltd_rw_sem);
159 if (newsize <= ltd->ltd_tgts_size)
160 /* someone else has already resize the array */
163 /* allocate new bitmap */
164 new_bitmap = CFS_ALLOCATE_BITMAP(newsize);
166 GOTO(out, rc = -ENOMEM);
168 if (ltd->ltd_tgts_size > 0) {
169 /* the bitmap already exists, we need
170 * to copy data from old one */
171 cfs_bitmap_copy(new_bitmap, ltd->ltd_tgt_bitmap);
172 old_bitmap = ltd->ltd_tgt_bitmap;
175 ltd->ltd_tgts_size = newsize;
176 ltd->ltd_tgt_bitmap = new_bitmap;
179 CFS_FREE_BITMAP(old_bitmap);
181 CDEBUG(D_CONFIG, "tgt size: %d\n", ltd->ltd_tgts_size);
185 up_write(<d->ltd_rw_sem);
190 * Connect LOD to a new OSP and add it to the target table.
192 * Connect to the OSP device passed, initialize all the internal
193 * structures related to the device and add it to the target table.
195 * \param[in] env execution environment for this thread
196 * \param[in] lod LOD device to be connected to the new OSP
197 * \param[in] osp name of OSP device name to be added
198 * \param[in] index index of the new target
199 * \param[in] gen target's generation number
200 * \param[in] tgt_index OSP's group
201 * \param[in] type type of device (mdc or osc)
202 * \param[in] active state of OSP: 0 - inactive, 1 - active
204 * \retval 0 if added successfully
205 * \retval negative error number on failure
207 int lod_add_device(const struct lu_env *env, struct lod_device *lod,
208 char *osp, unsigned index, unsigned gen, int tgt_index,
209 char *type, int active)
211 struct obd_connect_data *data = NULL;
212 struct obd_export *exp = NULL;
213 struct obd_device *obd;
214 struct lu_device *lu_dev;
215 struct dt_device *dt_dev;
217 struct lod_tgt_desc *tgt_desc;
218 struct lod_tgt_descs *ltd;
219 struct lustre_cfg *lcfg;
220 struct obd_uuid obd_uuid;
225 CDEBUG(D_CONFIG, "osp:%s idx:%d gen:%d\n", osp, index, gen);
228 CERROR("request to add OBD %s with invalid generation: %d\n",
233 obd_str2uuid(&obd_uuid, osp);
235 obd = class_find_client_obd(&obd_uuid, LUSTRE_OSP_NAME,
236 &lod->lod_dt_dev.dd_lu_dev.ld_obd->obd_uuid);
238 CERROR("can't find %s device\n", osp);
242 LASSERT(obd->obd_lu_dev != NULL);
243 LASSERT(obd->obd_lu_dev->ld_site == lod->lod_dt_dev.dd_lu_dev.ld_site);
245 lu_dev = obd->obd_lu_dev;
246 dt_dev = lu2dt_dev(lu_dev);
250 GOTO(out_cleanup, rc = -ENOMEM);
252 data->ocd_connect_flags = OBD_CONNECT_INDEX | OBD_CONNECT_VERSION;
253 data->ocd_version = LUSTRE_VERSION_CODE;
254 data->ocd_index = index;
256 if (strcmp(LUSTRE_OSC_NAME, type) == 0) {
258 data->ocd_connect_flags |= OBD_CONNECT_AT |
261 #ifdef HAVE_LRU_RESIZE_SUPPORT
262 OBD_CONNECT_LRU_RESIZE |
265 OBD_CONNECT_REQPORTAL |
266 OBD_CONNECT_SKIP_ORPHAN |
268 OBD_CONNECT_LVB_TYPE |
269 OBD_CONNECT_VERSION |
270 OBD_CONNECT_PINGLESS |
272 OBD_CONNECT_BULK_MBITS;
274 data->ocd_group = tgt_index;
275 ltd = &lod->lod_ost_descs;
277 struct obd_import *imp = obd->u.cli.cl_import;
280 data->ocd_ibits_known = MDS_INODELOCK_UPDATE;
281 data->ocd_connect_flags |= OBD_CONNECT_ACL |
283 OBD_CONNECT_MDS_MDS |
288 OBD_CONNECT_BULK_MBITS;
289 spin_lock(&imp->imp_lock);
290 imp->imp_server_timeout = 1;
291 spin_unlock(&imp->imp_lock);
292 imp->imp_client->cli_request_portal = OUT_PORTAL;
293 CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
295 ltd = &lod->lod_mdt_descs;
298 rc = obd_connect(env, &exp, obd, &obd->obd_uuid, data, NULL);
301 CERROR("%s: cannot connect to next dev %s (%d)\n",
302 obd->obd_name, osp, rc);
303 GOTO(out_cleanup, rc);
306 /* Allocate ost descriptor and fill it */
307 OBD_ALLOC_PTR(tgt_desc);
309 GOTO(out_conn, rc = -ENOMEM);
311 tgt_desc->ltd_tgt = dt_dev;
312 tgt_desc->ltd_exp = exp;
313 tgt_desc->ltd_uuid = obd->u.cli.cl_target_uuid;
314 tgt_desc->ltd_gen = gen;
315 tgt_desc->ltd_index = index;
316 tgt_desc->ltd_active = active;
319 if (index >= ltd->ltd_tgts_size) {
320 /* we have to increase the size of the lod_osts array */
323 newsize = max(ltd->ltd_tgts_size, (__u32)2);
324 while (newsize < index + 1)
325 newsize = newsize << 1;
327 /* lod_bitmap_resize() needs lod_rw_sem
328 * which we hold with th reference */
329 lod_putref(lod, ltd);
331 rc = ltd_bitmap_resize(ltd, newsize);
338 mutex_lock(<d->ltd_mutex);
340 if (cfs_bitmap_check(ltd->ltd_tgt_bitmap, index)) {
341 CERROR("%s: device %d is registered already\n", obd->obd_name,
343 GOTO(out_mutex, rc = -EEXIST);
346 if (ltd->ltd_tgt_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
347 OBD_ALLOC_PTR(ltd->ltd_tgt_idx[index / TGT_PTRS_PER_BLOCK]);
348 if (ltd->ltd_tgt_idx[index / TGT_PTRS_PER_BLOCK] == NULL) {
349 CERROR("can't allocate index to add %s\n",
351 GOTO(out_mutex, rc = -ENOMEM);
356 /* pool and qos are not supported for MDS stack yet */
357 rc = lod_ost_pool_add(&lod->lod_pool_info, index,
360 CERROR("%s: can't set up pool, failed with %d\n",
365 rc = qos_add_tgt(lod, tgt_desc);
367 CERROR("%s: qos_add_tgt failed with %d\n",
372 /* The new OST is now a full citizen */
373 if (index >= lod->lod_desc.ld_tgt_count)
374 lod->lod_desc.ld_tgt_count = index + 1;
376 lod->lod_desc.ld_active_tgt_count++;
379 LTD_TGT(ltd, index) = tgt_desc;
380 cfs_bitmap_set(ltd->ltd_tgt_bitmap, index);
382 mutex_unlock(<d->ltd_mutex);
383 lod_putref(lod, ltd);
385 if (lod->lod_recovery_completed)
386 lu_dev->ld_ops->ldo_recovery_complete(env, lu_dev);
388 if (!for_ost && lod->lod_initialized) {
389 rc = lod_sub_init_llog(env, lod, tgt_desc->ltd_tgt);
391 CERROR("%s: cannot start llog on %s:rc = %d\n",
392 lod2obd(lod)->obd_name, osp, rc);
397 rc = lfsck_add_target(env, lod->lod_child, dt_dev, exp, index, for_ost);
399 CERROR("Fail to add LFSCK target: name = %s, type = %s, "
400 "index = %u, rc = %d\n", osp, type, index, rc);
401 GOTO(out_fini_llog, rc);
405 lod_sub_fini_llog(env, tgt_desc->ltd_tgt,
406 tgt_desc->ltd_recovery_thread);
409 mutex_lock(<d->ltd_mutex);
411 if (!for_ost && LTD_TGT(ltd, index)->ltd_recovery_thread != NULL) {
412 struct ptlrpc_thread *thread;
414 thread = LTD_TGT(ltd, index)->ltd_recovery_thread;
415 OBD_FREE_PTR(thread);
418 cfs_bitmap_clear(ltd->ltd_tgt_bitmap, index);
419 LTD_TGT(ltd, index) = NULL;
421 lod_ost_pool_remove(&lod->lod_pool_info, index);
424 mutex_unlock(<d->ltd_mutex);
425 lod_putref(lod, ltd);
428 OBD_FREE_PTR(tgt_desc);
432 /* XXX OSP needs us to send down LCFG_CLEANUP because it uses
433 * objects from the MDT stack. See LU-7184. */
434 lcfg = &lod_env_info(env)->lti_lustre_cfg;
435 memset(lcfg, 0, sizeof(*lcfg));
436 lcfg->lcfg_version = LUSTRE_CFG_VERSION;
437 lcfg->lcfg_command = LCFG_CLEANUP;
438 lu_dev->ld_ops->ldo_process_config(env, lu_dev, lcfg);
444 * Schedule target removal from the target table.
446 * Mark the device as dead. The device is not removed here because it may
447 * still be in use. The device will be removed in lod_putref() when the
448 * last reference is released.
450 * \param[in] env execution environment for this thread
451 * \param[in] lod LOD device the target table belongs to
452 * \param[in] ltd target table
453 * \param[in] idx index of the target
454 * \param[in] for_ost type of the target: 0 - MDT, 1 - OST
456 static void __lod_del_device(const struct lu_env *env, struct lod_device *lod,
457 struct lod_tgt_descs *ltd, unsigned idx,
460 LASSERT(LTD_TGT(ltd, idx));
462 lfsck_del_target(env, lod->lod_child, LTD_TGT(ltd, idx)->ltd_tgt,
465 if (!for_ost && LTD_TGT(ltd, idx)->ltd_recovery_thread != NULL) {
466 struct ptlrpc_thread *thread;
468 thread = LTD_TGT(ltd, idx)->ltd_recovery_thread;
469 OBD_FREE_PTR(thread);
472 if (LTD_TGT(ltd, idx)->ltd_reap == 0) {
473 LTD_TGT(ltd, idx)->ltd_reap = 1;
474 ltd->ltd_death_row++;
479 * Schedule removal of all the targets from the given target table.
481 * See more details in the description for __lod_del_device()
483 * \param[in] env execution environment for this thread
484 * \param[in] lod LOD device the target table belongs to
485 * \param[in] ltd target table
486 * \param[in] for_ost type of the target: MDT or OST
490 int lod_fini_tgt(const struct lu_env *env, struct lod_device *lod,
491 struct lod_tgt_descs *ltd, bool for_ost)
495 if (ltd->ltd_tgts_size <= 0)
498 mutex_lock(<d->ltd_mutex);
499 cfs_foreach_bit(ltd->ltd_tgt_bitmap, idx)
500 __lod_del_device(env, lod, ltd, idx, for_ost);
501 mutex_unlock(<d->ltd_mutex);
502 lod_putref(lod, ltd);
503 CFS_FREE_BITMAP(ltd->ltd_tgt_bitmap);
504 for (idx = 0; idx < TGT_PTRS; idx++) {
505 if (ltd->ltd_tgt_idx[idx])
506 OBD_FREE_PTR(ltd->ltd_tgt_idx[idx]);
508 ltd->ltd_tgts_size = 0;
513 * Remove device by name.
515 * Remove a device identified by \a osp from the target table. Given
516 * the device can be in use, the real deletion happens in lod_putref().
518 * \param[in] env execution environment for this thread
519 * \param[in] lod LOD device to be connected to the new OSP
520 * \param[in] ltd target table
521 * \param[in] osp name of OSP device to be removed
522 * \param[in] idx index of the target
523 * \param[in] gen generation number, not used currently
524 * \param[in] for_ost type of the target: 0 - MDT, 1 - OST
526 * \retval 0 if the device was scheduled for removal
527 * \retval -EINVAL if no device was found
529 int lod_del_device(const struct lu_env *env, struct lod_device *lod,
530 struct lod_tgt_descs *ltd, char *osp, unsigned idx,
531 unsigned gen, bool for_ost)
533 struct obd_device *obd;
535 struct obd_uuid uuid;
538 CDEBUG(D_CONFIG, "osp:%s idx:%d gen:%d\n", osp, idx, gen);
540 obd_str2uuid(&uuid, osp);
542 obd = class_find_client_obd(&uuid, LUSTRE_OSP_NAME,
543 &lod->lod_dt_dev.dd_lu_dev.ld_obd->obd_uuid);
545 CERROR("can't find %s device\n", osp);
550 CERROR("%s: request to remove OBD %s with invalid generation %d"
551 "\n", obd->obd_name, osp, gen);
555 obd_str2uuid(&uuid, osp);
558 mutex_lock(<d->ltd_mutex);
559 /* check that the index is allocated in the bitmap */
560 if (!cfs_bitmap_check(ltd->ltd_tgt_bitmap, idx) ||
561 !LTD_TGT(ltd, idx)) {
562 CERROR("%s: device %d is not set up\n", obd->obd_name, idx);
563 GOTO(out, rc = -EINVAL);
566 /* check that the UUID matches */
567 if (!obd_uuid_equals(&uuid, <D_TGT(ltd, idx)->ltd_uuid)) {
568 CERROR("%s: LOD target UUID %s at index %d does not match %s\n",
569 obd->obd_name, obd_uuid2str(<D_TGT(ltd,idx)->ltd_uuid),
571 GOTO(out, rc = -EINVAL);
574 __lod_del_device(env, lod, ltd, idx, for_ost);
577 mutex_unlock(<d->ltd_mutex);
578 lod_putref(lod, ltd);
583 * Resize per-thread storage to hold specified size.
585 * A helper function to resize per-thread temporary storage. This storage
586 * is used to process LOV/LVM EAs and may be quite large. We do not want to
587 * allocate/release it every time, so instead we put it into the env and
588 * reallocate on demand. The memory is released when the correspondent thread
591 * \param[in] info LOD-specific storage in the environment
592 * \param[in] size new size to grow the buffer to
594 * \retval 0 on success, -ENOMEM if reallocation failed
596 int lod_ea_store_resize(struct lod_thread_info *info, size_t size)
598 __u32 round = size_roundup_power2(size);
601 lov_mds_md_size(LOV_MAX_STRIPE_COUNT, LOV_MAGIC_V3));
602 if (info->lti_ea_store) {
603 LASSERT(info->lti_ea_store_size);
604 LASSERT(info->lti_ea_store_size < round);
605 CDEBUG(D_INFO, "EA store size %d is not enough, need %d\n",
606 info->lti_ea_store_size, round);
607 OBD_FREE_LARGE(info->lti_ea_store, info->lti_ea_store_size);
608 info->lti_ea_store = NULL;
609 info->lti_ea_store_size = 0;
612 OBD_ALLOC_LARGE(info->lti_ea_store, round);
613 if (info->lti_ea_store == NULL)
615 info->lti_ea_store_size = round;
620 static void lod_free_comp_buffer(struct lod_layout_component *entries,
621 __u16 count, __u32 bufsize)
623 struct lod_layout_component *entry;
626 for (i = 0; i < count; i++) {
628 if (entry->llc_pool != NULL)
629 lod_set_pool(&entry->llc_pool, NULL);
630 if (entry->llc_ostlist.op_array)
631 OBD_FREE(entry->llc_ostlist.op_array,
632 entry->llc_ostlist.op_size);
633 LASSERT(entry->llc_stripe == NULL);
634 LASSERT(entry->llc_stripes_allocated == 0);
638 OBD_FREE_LARGE(entries, bufsize);
641 void lod_free_def_comp_entries(struct lod_default_striping *lds)
643 lod_free_comp_buffer(lds->lds_def_comp_entries,
644 lds->lds_def_comp_size_cnt,
646 sizeof(*lds->lds_def_comp_entries) *
647 lds->lds_def_comp_size_cnt));
648 lds->lds_def_comp_entries = NULL;
649 lds->lds_def_comp_cnt = 0;
650 lds->lds_def_striping_is_composite = 0;
651 lds->lds_def_comp_size_cnt = 0;
655 * Resize per-thread storage to hold default striping component entries
657 * A helper function to resize per-thread temporary storage. This storage
658 * is used to hold default LOV/LVM EAs and may be quite large. We do not want
659 * to allocate/release it every time, so instead we put it into the env and
660 * reallocate it on demand. The memory is released when the correspondent
661 * thread is finished.
663 * \param[in,out] lds default striping
664 * \param[in] count new component count to grow the buffer to
666 * \retval 0 on success, -ENOMEM if reallocation failed
668 int lod_def_striping_comp_resize(struct lod_default_striping *lds, __u16 count)
670 struct lod_layout_component *entries;
671 __u32 new = size_roundup_power2(sizeof(*lds->lds_def_comp_entries) *
673 __u32 old = size_roundup_power2(sizeof(*lds->lds_def_comp_entries) *
674 lds->lds_def_comp_size_cnt);
679 OBD_ALLOC_LARGE(entries, new);
683 if (lds->lds_def_comp_entries != NULL) {
684 CDEBUG(D_INFO, "default striping component size %d is not "
685 "enough, need %d\n", old, new);
686 lod_free_def_comp_entries(lds);
689 lds->lds_def_comp_entries = entries;
690 lds->lds_def_comp_size_cnt = count;
695 void lod_free_comp_entries(struct lod_object *lo)
697 if (lo->ldo_mirrors) {
698 OBD_FREE(lo->ldo_mirrors,
699 sizeof(*lo->ldo_mirrors) * lo->ldo_mirror_count);
700 lo->ldo_mirrors = NULL;
701 lo->ldo_mirror_count = 0;
703 lod_free_comp_buffer(lo->ldo_comp_entries,
705 sizeof(*lo->ldo_comp_entries) * lo->ldo_comp_cnt);
706 lo->ldo_comp_entries = NULL;
707 lo->ldo_comp_cnt = 0;
708 lo->ldo_is_composite = 0;
711 int lod_alloc_comp_entries(struct lod_object *lo,
712 int mirror_count, int comp_count)
714 LASSERT(comp_count != 0);
715 LASSERT(lo->ldo_comp_cnt == 0 && lo->ldo_comp_entries == NULL);
717 if (mirror_count > 0) {
718 OBD_ALLOC(lo->ldo_mirrors,
719 sizeof(*lo->ldo_mirrors) * mirror_count);
720 if (!lo->ldo_mirrors)
723 lo->ldo_mirror_count = mirror_count;
726 OBD_ALLOC_LARGE(lo->ldo_comp_entries,
727 sizeof(*lo->ldo_comp_entries) * comp_count);
728 if (lo->ldo_comp_entries == NULL) {
729 OBD_FREE(lo->ldo_mirrors,
730 sizeof(*lo->ldo_mirrors) * mirror_count);
731 lo->ldo_mirror_count = 0;
735 lo->ldo_comp_cnt = comp_count;
739 int lod_fill_mirrors(struct lod_object *lo)
741 struct lod_layout_component *lod_comp;
743 __u16 mirror_id = 0xffff;
747 LASSERT(equi(!lo->ldo_is_composite, lo->ldo_mirror_count == 0));
749 if (!lo->ldo_is_composite)
752 lod_comp = &lo->ldo_comp_entries[0];
753 for (i = 0; i < lo->ldo_comp_cnt; i++, lod_comp++) {
754 int stale = !!(lod_comp->llc_flags & LCME_FL_STALE);
755 int preferred = !!(lod_comp->llc_flags & LCME_FL_PREF_WR);
757 if (mirror_id_of(lod_comp->llc_id) == mirror_id) {
758 lo->ldo_mirrors[mirror_idx].lme_stale |= stale;
759 lo->ldo_mirrors[mirror_idx].lme_primary |= preferred;
760 lo->ldo_mirrors[mirror_idx].lme_end = i;
766 if (mirror_idx >= lo->ldo_mirror_count)
769 mirror_id = mirror_id_of(lod_comp->llc_id);
771 lo->ldo_mirrors[mirror_idx].lme_id = mirror_id;
772 lo->ldo_mirrors[mirror_idx].lme_stale = stale;
773 lo->ldo_mirrors[mirror_idx].lme_primary = preferred;
774 lo->ldo_mirrors[mirror_idx].lme_start = i;
775 lo->ldo_mirrors[mirror_idx].lme_end = i;
777 if (mirror_idx != lo->ldo_mirror_count - 1)
784 * Generate on-disk lov_mds_md structure for each layout component based on
785 * the information in lod_object->ldo_comp_entries[i].
787 * \param[in] env execution environment for this thread
788 * \param[in] lo LOD object
789 * \param[in] comp_idx index of ldo_comp_entries
790 * \param[in] lmm buffer to cotain the on-disk lov_mds_md
791 * \param[in|out] lmm_size buffer size/lmm size
792 * \param[in] is_dir generate lov ea for dir or file? For dir case,
793 * the stripe info is from the default stripe
794 * template, which is collected in lod_ah_init(),
795 * either from parent object or root object; for
796 * file case, it's from the @lo object
798 * \retval 0 if on disk structure is created successfully
799 * \retval negative error number on failure
801 static int lod_gen_component_ea(const struct lu_env *env,
802 struct lod_object *lo, int comp_idx,
803 struct lov_mds_md *lmm, int *lmm_size,
806 struct lod_thread_info *info = lod_env_info(env);
807 const struct lu_fid *fid = lu_object_fid(&lo->ldo_obj.do_lu);
808 struct lod_device *lod;
809 struct lov_ost_data_v1 *objs;
810 struct lod_layout_component *lod_comp;
819 &lo->ldo_def_striping->lds_def_comp_entries[comp_idx];
821 lod_comp = &lo->ldo_comp_entries[comp_idx];
823 magic = lod_comp->llc_pool != NULL ? LOV_MAGIC_V3 : LOV_MAGIC_V1;
824 if (lod_comp->llc_pattern == 0) /* default striping */
825 lod_comp->llc_pattern = LOV_PATTERN_RAID0;
827 lmm->lmm_magic = cpu_to_le32(magic);
828 lmm->lmm_pattern = cpu_to_le32(lod_comp->llc_pattern);
829 fid_to_lmm_oi(fid, &lmm->lmm_oi);
830 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_LMMOI))
831 lmm->lmm_oi.oi.oi_id++;
832 lmm_oi_cpu_to_le(&lmm->lmm_oi, &lmm->lmm_oi);
834 lmm->lmm_stripe_size = cpu_to_le32(lod_comp->llc_stripe_size);
835 lmm->lmm_stripe_count = cpu_to_le16(lod_comp->llc_stripe_count);
837 * for dir and uninstantiated component, lmm_layout_gen stores
838 * default stripe offset.
840 lmm->lmm_layout_gen =
841 (is_dir || !lod_comp_inited(lod_comp)) ?
842 cpu_to_le16(lod_comp->llc_stripe_offset) :
843 cpu_to_le16(lod_comp->llc_layout_gen);
845 if (magic == LOV_MAGIC_V1) {
846 objs = &lmm->lmm_objects[0];
848 struct lov_mds_md_v3 *v3 = (struct lov_mds_md_v3 *)lmm;
849 size_t cplen = strlcpy(v3->lmm_pool_name,
851 sizeof(v3->lmm_pool_name));
852 if (cplen >= sizeof(v3->lmm_pool_name))
854 objs = &v3->lmm_objects[0];
856 stripe_count = lod_comp_entry_stripe_count(lo, lod_comp, is_dir);
857 if (!is_dir && lo->ldo_is_composite)
858 lod_comp_shrink_stripe_count(lod_comp, &stripe_count);
860 if (is_dir || lod_comp->llc_pattern & LOV_PATTERN_F_RELEASED)
863 /* generate ost_idx of this component stripe */
864 lod = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
865 for (i = 0; i < stripe_count; i++) {
866 struct dt_object *object;
867 __u32 ost_idx = (__u32)-1UL;
868 int type = LU_SEQ_RANGE_OST;
870 if (lod_comp->llc_stripe && lod_comp->llc_stripe[i]) {
871 object = lod_comp->llc_stripe[i];
872 /* instantiated component */
873 info->lti_fid = *lu_object_fid(&object->do_lu);
875 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_MULTIPLE_REF) &&
877 if (cfs_fail_val == 0)
878 cfs_fail_val = info->lti_fid.f_oid;
880 info->lti_fid.f_oid = cfs_fail_val;
883 rc = fid_to_ostid(&info->lti_fid, &info->lti_ostid);
886 ostid_cpu_to_le(&info->lti_ostid, &objs[i].l_ost_oi);
887 objs[i].l_ost_gen = cpu_to_le32(0);
888 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_FLD_LOOKUP))
891 rc = lod_fld_lookup(env, lod, &info->lti_fid,
894 CERROR("%s: Can not locate "DFID": rc = %d\n",
895 lod2obd(lod)->obd_name,
896 PFID(&info->lti_fid), rc);
899 } else if (lod_comp->llc_ostlist.op_array &&
900 lod_comp->llc_ostlist.op_count) {
901 /* user specified ost list */
902 ost_idx = lod_comp->llc_ostlist.op_array[i];
905 * with un-instantiated or with no specified ost list
906 * component, its l_ost_idx does not matter.
908 objs[i].l_ost_idx = cpu_to_le32(ost_idx);
911 if (lmm_size != NULL)
912 *lmm_size = lov_mds_md_size(stripe_count, magic);
917 * Generate on-disk lov_mds_md structure based on the information in
918 * the lod_object->ldo_comp_entries.
920 * \param[in] env execution environment for this thread
921 * \param[in] lo LOD object
922 * \param[in] lmm buffer to cotain the on-disk lov_mds_md
923 * \param[in|out] lmm_size buffer size/lmm size
924 * \param[in] is_dir generate lov ea for dir or file? For dir case,
925 * the stripe info is from the default stripe
926 * template, which is collected in lod_ah_init(),
927 * either from parent object or root object; for
928 * file case, it's from the @lo object
930 * \retval 0 if on disk structure is created successfully
931 * \retval negative error number on failure
933 int lod_generate_lovea(const struct lu_env *env, struct lod_object *lo,
934 struct lov_mds_md *lmm, int *lmm_size, bool is_dir)
936 struct lov_comp_md_entry_v1 *lcme;
937 struct lov_comp_md_v1 *lcm;
938 struct lod_layout_component *comp_entries;
939 __u16 comp_cnt, mirror_cnt;
941 int i, rc = 0, offset;
945 comp_cnt = lo->ldo_def_striping->lds_def_comp_cnt;
946 mirror_cnt = lo->ldo_def_striping->lds_def_mirror_cnt;
947 comp_entries = lo->ldo_def_striping->lds_def_comp_entries;
949 lo->ldo_def_striping->lds_def_striping_is_composite;
951 comp_cnt = lo->ldo_comp_cnt;
952 mirror_cnt = lo->ldo_mirror_count;
953 comp_entries = lo->ldo_comp_entries;
954 is_composite = lo->ldo_is_composite;
957 LASSERT(lmm_size != NULL);
958 LASSERT(comp_cnt != 0 && comp_entries != NULL);
961 rc = lod_gen_component_ea(env, lo, 0, lmm, lmm_size, is_dir);
965 lcm = (struct lov_comp_md_v1 *)lmm;
966 memset(lcm, 0, sizeof(*lcm));
968 lcm->lcm_magic = cpu_to_le32(LOV_MAGIC_COMP_V1);
969 lcm->lcm_entry_count = cpu_to_le16(comp_cnt);
970 lcm->lcm_mirror_count = cpu_to_le16(mirror_cnt - 1);
971 lcm->lcm_flags = cpu_to_le16(lo->ldo_flr_state);
973 offset = sizeof(*lcm) + sizeof(*lcme) * comp_cnt;
974 LASSERT(offset % sizeof(__u64) == 0);
976 for (i = 0; i < comp_cnt; i++) {
977 struct lod_layout_component *lod_comp;
978 struct lov_mds_md *sub_md;
981 lod_comp = &comp_entries[i];
982 lcme = &lcm->lcm_entries[i];
984 LASSERT(ergo(!is_dir, lod_comp->llc_id != LCME_ID_INVAL));
985 lcme->lcme_id = cpu_to_le32(lod_comp->llc_id);
987 /* component could be un-inistantiated */
988 lcme->lcme_flags = cpu_to_le32(lod_comp->llc_flags);
989 lcme->lcme_extent.e_start =
990 cpu_to_le64(lod_comp->llc_extent.e_start);
991 lcme->lcme_extent.e_end =
992 cpu_to_le64(lod_comp->llc_extent.e_end);
993 lcme->lcme_offset = cpu_to_le32(offset);
995 sub_md = (struct lov_mds_md *)((char *)lcm + offset);
996 rc = lod_gen_component_ea(env, lo, i, sub_md, &size, is_dir);
999 lcme->lcme_size = cpu_to_le32(size);
1001 LASSERTF((offset <= *lmm_size) && (offset % sizeof(__u64) == 0),
1002 "offset:%d lmm_size:%d\n", offset, *lmm_size);
1004 lcm->lcm_size = cpu_to_le32(offset);
1005 lcm->lcm_layout_gen = cpu_to_le32(is_dir ? 0 : lo->ldo_layout_gen);
1007 lustre_print_user_md(D_LAYOUT, (struct lov_user_md *)lmm,
1018 * Fill lti_ea_store buffer in the environment with a value for the given
1019 * EA. The buffer is reallocated if the value doesn't fit.
1021 * \param[in,out] env execution environment for this thread
1022 * .lti_ea_store buffer is filled with EA's value
1023 * \param[in] lo LOD object
1024 * \param[in] name name of the EA
1026 * \retval > 0 if EA is fetched successfully
1027 * \retval 0 if EA is empty
1028 * \retval negative error number on failure
1030 int lod_get_ea(const struct lu_env *env, struct lod_object *lo,
1033 struct lod_thread_info *info = lod_env_info(env);
1034 struct dt_object *next = dt_object_child(&lo->ldo_obj);
1040 if (unlikely(info->lti_ea_store == NULL)) {
1041 /* just to enter in allocation block below */
1045 info->lti_buf.lb_buf = info->lti_ea_store;
1046 info->lti_buf.lb_len = info->lti_ea_store_size;
1047 rc = dt_xattr_get(env, next, &info->lti_buf, name);
1050 /* if object is not striped or inaccessible */
1051 if (rc == -ENODATA || rc == -ENOENT)
1054 if (rc == -ERANGE) {
1055 /* EA doesn't fit, reallocate new buffer */
1056 rc = dt_xattr_get(env, next, &LU_BUF_NULL, name);
1057 if (rc == -ENODATA || rc == -ENOENT)
1063 rc = lod_ea_store_resize(info, rc);
1073 * Verify the target index is present in the current configuration.
1075 * \param[in] md LOD device where the target table is stored
1076 * \param[in] idx target's index
1078 * \retval 0 if the index is present
1079 * \retval -EINVAL if not
1081 static int validate_lod_and_idx(struct lod_device *md, __u32 idx)
1083 if (unlikely(idx >= md->lod_ost_descs.ltd_tgts_size ||
1084 !cfs_bitmap_check(md->lod_ost_bitmap, idx))) {
1085 CERROR("%s: bad idx: %d of %d\n", lod2obd(md)->obd_name, idx,
1086 md->lod_ost_descs.ltd_tgts_size);
1090 if (unlikely(OST_TGT(md, idx) == NULL)) {
1091 CERROR("%s: bad lod_tgt_desc for idx: %d\n",
1092 lod2obd(md)->obd_name, idx);
1096 if (unlikely(OST_TGT(md, idx)->ltd_ost == NULL)) {
1097 CERROR("%s: invalid lod device, for idx: %d\n",
1098 lod2obd(md)->obd_name , idx);
1106 * Instantiate objects for stripes.
1108 * Allocate and initialize LU-objects representing the stripes. The number
1109 * of the stripes (ldo_stripe_count) must be initialized already. The caller
1110 * must ensure nobody else is calling the function on the object at the same
1111 * time. FLDB service must be running to be able to map a FID to the targets
1112 * and find appropriate device representing that target.
1114 * \param[in] env execution environment for this thread
1115 * \param[in,out] lo LOD object
1116 * \param[in] objs an array of IDs to creates the objects from
1117 * \param[in] comp_idx index of ldo_comp_entries
1119 * \retval 0 if the objects are instantiated successfully
1120 * \retval negative error number on failure
1122 int lod_initialize_objects(const struct lu_env *env, struct lod_object *lo,
1123 struct lov_ost_data_v1 *objs, int comp_idx)
1125 struct lod_layout_component *lod_comp;
1126 struct lod_thread_info *info = lod_env_info(env);
1127 struct lod_device *md;
1128 struct lu_object *o, *n;
1129 struct lu_device *nd;
1130 struct dt_object **stripe = NULL;
1131 __u32 *ost_indices = NULL;
1137 LASSERT(lo != NULL);
1138 md = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev);
1140 LASSERT(lo->ldo_comp_cnt != 0 && lo->ldo_comp_entries != NULL);
1141 lod_comp = &lo->ldo_comp_entries[comp_idx];
1143 LASSERT(lod_comp->llc_stripe == NULL);
1144 LASSERT(lod_comp->llc_stripe_count > 0);
1145 LASSERT(lod_comp->llc_stripe_size > 0);
1147 stripe_len = lod_comp->llc_stripe_count;
1148 OBD_ALLOC(stripe, sizeof(stripe[0]) * stripe_len);
1151 OBD_ALLOC(ost_indices, sizeof(*ost_indices) * stripe_len);
1153 GOTO(out, rc = -ENOMEM);
1155 for (i = 0; i < lod_comp->llc_stripe_count; i++) {
1156 if (unlikely(lovea_slot_is_dummy(&objs[i])))
1159 ostid_le_to_cpu(&objs[i].l_ost_oi, &info->lti_ostid);
1160 idx = le32_to_cpu(objs[i].l_ost_idx);
1161 rc = ostid_to_fid(&info->lti_fid, &info->lti_ostid, idx);
1164 LASSERTF(fid_is_sane(&info->lti_fid), ""DFID" insane!\n",
1165 PFID(&info->lti_fid));
1166 lod_getref(&md->lod_ost_descs);
1168 rc = validate_lod_and_idx(md, idx);
1169 if (unlikely(rc != 0)) {
1170 lod_putref(md, &md->lod_ost_descs);
1174 nd = &OST_TGT(md,idx)->ltd_ost->dd_lu_dev;
1175 lod_putref(md, &md->lod_ost_descs);
1177 /* In the function below, .hs_keycmp resolves to
1178 * u_obj_hop_keycmp() */
1179 /* coverity[overrun-buffer-val] */
1180 o = lu_object_find_at(env, nd, &info->lti_fid, NULL);
1182 GOTO(out, rc = PTR_ERR(o));
1184 n = lu_object_locate(o->lo_header, nd->ld_type);
1187 stripe[i] = container_of(n, struct dt_object, do_lu);
1188 ost_indices[i] = idx;
1193 for (i = 0; i < stripe_len; i++)
1194 if (stripe[i] != NULL)
1195 dt_object_put(env, stripe[i]);
1197 OBD_FREE(stripe, sizeof(stripe[0]) * stripe_len);
1198 lod_comp->llc_stripe_count = 0;
1200 OBD_FREE(ost_indices,
1201 sizeof(*ost_indices) * stripe_len);
1203 lod_comp->llc_stripe = stripe;
1204 lod_comp->llc_ost_indices = ost_indices;
1205 lod_comp->llc_stripes_allocated = stripe_len;
1212 * Instantiate objects for striping.
1214 * Parse striping information in \a buf and instantiate the objects
1215 * representing the stripes.
1217 * \param[in] env execution environment for this thread
1218 * \param[in] lo LOD object
1219 * \param[in] buf buffer storing LOV EA to parse
1221 * \retval 0 if parsing and objects creation succeed
1222 * \retval negative error number on failure
1224 int lod_parse_striping(const struct lu_env *env, struct lod_object *lo,
1225 const struct lu_buf *buf)
1227 struct lov_mds_md_v1 *lmm;
1228 struct lov_comp_md_v1 *comp_v1 = NULL;
1229 struct lov_ost_data_v1 *objs;
1230 __u32 magic, pattern;
1233 __u16 mirror_cnt = 0;
1237 LASSERT(buf->lb_buf);
1238 LASSERT(buf->lb_len);
1239 LASSERT(mutex_is_locked(&lo->ldo_layout_mutex));
1241 lmm = (struct lov_mds_md_v1 *)buf->lb_buf;
1242 magic = le32_to_cpu(lmm->lmm_magic);
1244 if (magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3 &&
1245 magic != LOV_MAGIC_COMP_V1)
1246 GOTO(out, rc = -EINVAL);
1248 lod_free_comp_entries(lo);
1250 if (magic == LOV_MAGIC_COMP_V1) {
1251 comp_v1 = (struct lov_comp_md_v1 *)lmm;
1252 comp_cnt = le16_to_cpu(comp_v1->lcm_entry_count);
1254 GOTO(out, rc = -EINVAL);
1255 lo->ldo_layout_gen = le32_to_cpu(comp_v1->lcm_layout_gen);
1256 lo->ldo_is_composite = 1;
1257 lo->ldo_flr_state = le16_to_cpu(comp_v1->lcm_flags) &
1259 mirror_cnt = le16_to_cpu(comp_v1->lcm_mirror_count) + 1;
1262 lo->ldo_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
1263 lo->ldo_is_composite = 0;
1266 rc = lod_alloc_comp_entries(lo, mirror_cnt, comp_cnt);
1270 for (i = 0; i < comp_cnt; i++) {
1271 struct lod_layout_component *lod_comp;
1272 struct lu_extent *ext;
1275 lod_comp = &lo->ldo_comp_entries[i];
1276 if (lo->ldo_is_composite) {
1277 offs = le32_to_cpu(comp_v1->lcm_entries[i].lcme_offset);
1278 lmm = (struct lov_mds_md_v1 *)((char *)comp_v1 + offs);
1279 magic = le32_to_cpu(lmm->lmm_magic);
1281 ext = &comp_v1->lcm_entries[i].lcme_extent;
1282 lod_comp->llc_extent.e_start =
1283 le64_to_cpu(ext->e_start);
1284 lod_comp->llc_extent.e_end = le64_to_cpu(ext->e_end);
1285 lod_comp->llc_flags =
1286 le32_to_cpu(comp_v1->lcm_entries[i].lcme_flags);
1288 le32_to_cpu(comp_v1->lcm_entries[i].lcme_id);
1289 if (lod_comp->llc_id == LCME_ID_INVAL)
1290 GOTO(out, rc = -EINVAL);
1292 lod_comp_set_init(lod_comp);
1295 pattern = le32_to_cpu(lmm->lmm_pattern);
1296 if (lov_pattern(pattern) != LOV_PATTERN_RAID0 &&
1297 lov_pattern(pattern) != LOV_PATTERN_MDT)
1298 GOTO(out, rc = -EINVAL);
1300 lod_comp->llc_pattern = pattern;
1301 lod_comp->llc_stripe_size = le32_to_cpu(lmm->lmm_stripe_size);
1302 lod_comp->llc_stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
1303 lod_comp->llc_layout_gen = le16_to_cpu(lmm->lmm_layout_gen);
1305 if (magic == LOV_MAGIC_V3) {
1306 struct lov_mds_md_v3 *v3 = (struct lov_mds_md_v3 *)lmm;
1307 lod_set_pool(&lod_comp->llc_pool, v3->lmm_pool_name);
1308 objs = &v3->lmm_objects[0];
1310 lod_set_pool(&lod_comp->llc_pool, NULL);
1311 objs = &lmm->lmm_objects[0];
1315 * If uninstantiated template component has valid l_ost_idx,
1316 * then user has specified ost list for this component.
1318 if (!lod_comp_inited(lod_comp)) {
1321 if (objs[0].l_ost_idx != (__u32)-1UL) {
1322 stripe_count = lod_comp_entry_stripe_count(
1323 lo, lod_comp, false);
1325 * load the user specified ost list, when this
1326 * component is instantiated later, it will be
1327 * used in lod_alloc_ost_list().
1329 lod_comp->llc_ostlist.op_count = stripe_count;
1330 lod_comp->llc_ostlist.op_size =
1331 stripe_count * sizeof(__u32);
1332 OBD_ALLOC(lod_comp->llc_ostlist.op_array,
1333 lod_comp->llc_ostlist.op_size);
1334 if (!lod_comp->llc_ostlist.op_array)
1335 GOTO(out, rc = -ENOMEM);
1337 for (j = 0; j < stripe_count; j++)
1338 lod_comp->llc_ostlist.op_array[j] =
1339 le32_to_cpu(objs[j].l_ost_idx);
1342 * this component OST objects starts from the
1343 * first ost_idx, lod_alloc_ost_list() will
1346 lod_comp->llc_stripe_offset = objs[0].l_ost_idx;
1349 * for uninstantiated component,
1350 * lmm_layout_gen stores default stripe offset.
1352 lod_comp->llc_stripe_offset =
1353 lmm->lmm_layout_gen;
1357 /* skip un-instantiated component object initialization */
1358 if (!lod_comp_inited(lod_comp))
1361 if (!(lod_comp->llc_pattern & LOV_PATTERN_F_RELEASED) &&
1362 !(lod_comp->llc_pattern & LOV_PATTERN_MDT)) {
1363 rc = lod_initialize_objects(env, lo, objs, i);
1369 rc = lod_fill_mirrors(lo);
1375 lod_striping_free_nolock(env, lo);
1380 * Check whether the striping (LOVEA for regular file, LMVEA for directory)
1381 * is already cached.
1383 * \param[in] lo LOD object
1385 * \retval True if the striping is cached, otherwise
1388 static bool lod_striping_loaded(struct lod_object *lo)
1390 if (S_ISREG(lod2lu_obj(lo)->lo_header->loh_attr) &&
1391 lo->ldo_comp_cached)
1394 if (S_ISDIR(lod2lu_obj(lo)->lo_header->loh_attr)) {
1395 if (lo->ldo_dir_stripe_loaded)
1398 /* Never load LMV stripe for slaves of striped dir */
1399 if (lo->ldo_dir_slave_stripe)
1407 * A generic function to initialize the stripe objects.
1409 * A protected version of lod_striping_load_locked() - load the striping
1410 * information from storage, parse that and instantiate LU objects to
1411 * represent the stripes. The LOD object \a lo supplies a pointer to the
1412 * next sub-object in the LU stack so we can lock it. Also use \a lo to
1413 * return an array of references to the newly instantiated objects.
1415 * \param[in] env execution environment for this thread
1416 * \param[in,out] lo LOD object, where striping is stored and
1417 * which gets an array of references
1419 * \retval 0 if parsing and object creation succeed
1420 * \retval negative error number on failure
1422 int lod_striping_load(const struct lu_env *env, struct lod_object *lo)
1424 struct lod_thread_info *info = lod_env_info(env);
1425 struct dt_object *next = dt_object_child(&lo->ldo_obj);
1426 struct lu_buf *buf = &info->lti_buf;
1431 if (!dt_object_exists(next))
1434 if (lod_striping_loaded(lo))
1437 mutex_lock(&lo->ldo_layout_mutex);
1438 if (lod_striping_loaded(lo))
1439 GOTO(unlock, rc = 0);
1441 if (S_ISREG(lod2lu_obj(lo)->lo_header->loh_attr)) {
1442 rc = lod_get_lov_ea(env, lo);
1447 * there is LOV EA (striping information) in this object
1448 * let's parse it and create in-core objects for the stripes
1450 buf->lb_buf = info->lti_ea_store;
1451 buf->lb_len = info->lti_ea_store_size;
1452 rc = lod_parse_striping(env, lo, buf);
1454 lo->ldo_comp_cached = 1;
1455 } else if (S_ISDIR(lod2lu_obj(lo)->lo_header->loh_attr)) {
1456 rc = lod_get_lmv_ea(env, lo);
1457 if (rc < (typeof(rc))sizeof(struct lmv_mds_md_v1)) {
1458 /* Let's set stripe_loaded to avoid further
1459 * stripe loading especially for non-stripe directory,
1460 * which can hurt performance. (See LU-9840)
1463 lo->ldo_dir_stripe_loaded = 1;
1464 GOTO(unlock, rc = rc > 0 ? -EINVAL : rc);
1466 buf->lb_buf = info->lti_ea_store;
1467 buf->lb_len = info->lti_ea_store_size;
1468 if (rc == sizeof(struct lmv_mds_md_v1)) {
1469 rc = lod_load_lmv_shards(env, lo, buf, true);
1470 if (buf->lb_buf != info->lti_ea_store) {
1471 OBD_FREE_LARGE(info->lti_ea_store,
1472 info->lti_ea_store_size);
1473 info->lti_ea_store = buf->lb_buf;
1474 info->lti_ea_store_size = buf->lb_len;
1482 * there is LMV EA (striping information) in this object
1483 * let's parse it and create in-core objects for the stripes
1485 rc = lod_parse_dir_striping(env, lo, buf);
1487 lo->ldo_dir_stripe_loaded = 1;
1491 mutex_unlock(&lo->ldo_layout_mutex);
1496 int lod_striping_reload(const struct lu_env *env, struct lod_object *lo,
1497 const struct lu_buf *buf)
1503 mutex_lock(&lo->ldo_layout_mutex);
1504 lod_striping_free_nolock(env, lo);
1505 rc = lod_parse_striping(env, lo, buf);
1506 mutex_unlock(&lo->ldo_layout_mutex);
1512 * Verify lov_user_md_v1/v3 striping.
1514 * Check the validity of all fields including the magic, stripe size,
1515 * stripe count, stripe offset and that the pool is present. Also check
1516 * that each target index points to an existing target. The additional
1517 * \a is_from_disk turns additional checks. In some cases zero fields
1518 * are allowed (like pattern=0).
1520 * \param[in] d LOD device
1521 * \param[in] buf buffer with LOV EA to verify
1522 * \param[in] is_from_disk 0 - from user, allow some fields to be 0
1523 * 1 - from disk, do not allow
1525 * \retval 0 if the striping is valid
1526 * \retval -EINVAL if striping is invalid
1528 static int lod_verify_v1v3(struct lod_device *d, const struct lu_buf *buf,
1531 struct lov_user_md_v1 *lum;
1532 struct lov_user_md_v3 *lum3;
1533 struct pool_desc *pool = NULL;
1537 __u16 stripe_offset;
1544 if (buf->lb_len < sizeof(*lum)) {
1545 CDEBUG(D_LAYOUT, "buf len %zu too small for lov_user_md\n",
1547 GOTO(out, rc = -EINVAL);
1550 magic = le32_to_cpu(lum->lmm_magic) & ~LOV_MAGIC_DEFINED;
1551 if (magic != LOV_USER_MAGIC_V1 &&
1552 magic != LOV_USER_MAGIC_V3 &&
1553 magic != LOV_USER_MAGIC_SPECIFIC) {
1554 CDEBUG(D_LAYOUT, "bad userland LOV MAGIC: %#x\n",
1555 le32_to_cpu(lum->lmm_magic));
1556 GOTO(out, rc = -EINVAL);
1559 /* the user uses "0" for default stripe pattern normally. */
1560 if (!is_from_disk && lum->lmm_pattern == LOV_PATTERN_NONE)
1561 lum->lmm_pattern = cpu_to_le32(LOV_PATTERN_RAID0);
1563 if (!lov_pattern_supported(le32_to_cpu(lum->lmm_pattern))) {
1564 CDEBUG(D_LAYOUT, "bad userland stripe pattern: %#x\n",
1565 le32_to_cpu(lum->lmm_pattern));
1566 GOTO(out, rc = -EINVAL);
1569 /* a released lum comes from creating orphan on hsm release,
1570 * doesn't make sense to verify it. */
1571 if (le32_to_cpu(lum->lmm_pattern) & LOV_PATTERN_F_RELEASED)
1574 /* 64kB is the largest common page size we see (ia64), and matches the
1576 stripe_size = le32_to_cpu(lum->lmm_stripe_size);
1577 if (stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) {
1578 CDEBUG(D_LAYOUT, "stripe size %u not a multiple of %u\n",
1579 stripe_size, LOV_MIN_STRIPE_SIZE);
1580 GOTO(out, rc = -EINVAL);
1583 stripe_offset = le16_to_cpu(lum->lmm_stripe_offset);
1584 if (!is_from_disk && stripe_offset != LOV_OFFSET_DEFAULT &&
1585 lov_pattern(le32_to_cpu(lum->lmm_pattern)) != LOV_PATTERN_MDT) {
1586 /* if offset is not within valid range [0, osts_size) */
1587 if (stripe_offset >= d->lod_osts_size) {
1588 CDEBUG(D_LAYOUT, "stripe offset %u >= bitmap size %u\n",
1589 stripe_offset, d->lod_osts_size);
1590 GOTO(out, rc = -EINVAL);
1593 /* if lmm_stripe_offset is *not* in bitmap */
1594 if (!cfs_bitmap_check(d->lod_ost_bitmap, stripe_offset)) {
1595 CDEBUG(D_LAYOUT, "stripe offset %u not in bitmap\n",
1597 GOTO(out, rc = -EINVAL);
1601 if (magic == LOV_USER_MAGIC_V1)
1602 lum_size = offsetof(struct lov_user_md_v1,
1604 else if (magic == LOV_USER_MAGIC_V3 || magic == LOV_USER_MAGIC_SPECIFIC)
1605 lum_size = offsetof(struct lov_user_md_v3,
1608 GOTO(out, rc = -EINVAL);
1610 stripe_count = le16_to_cpu(lum->lmm_stripe_count);
1611 if (buf->lb_len < lum_size) {
1612 CDEBUG(D_LAYOUT, "invalid buf len %zu/%zu for lov_user_md with "
1613 "magic %#x and stripe_count %u\n",
1614 buf->lb_len, lum_size, magic, stripe_count);
1615 GOTO(out, rc = -EINVAL);
1618 if (!(magic == LOV_USER_MAGIC_V3 || magic == LOV_USER_MAGIC_SPECIFIC))
1622 /* In the function below, .hs_keycmp resolves to
1623 * pool_hashkey_keycmp() */
1624 /* coverity[overrun-buffer-val] */
1625 pool = lod_find_pool(d, lum3->lmm_pool_name);
1629 if (!is_from_disk && stripe_offset != LOV_OFFSET_DEFAULT) {
1630 rc = lod_check_index_in_pool(stripe_offset, pool);
1632 GOTO(out, rc = -EINVAL);
1635 if (is_from_disk && stripe_count > pool_tgt_count(pool)) {
1636 CDEBUG(D_LAYOUT, "stripe count %u > # OSTs %u in the pool\n",
1637 stripe_count, pool_tgt_count(pool));
1638 GOTO(out, rc = -EINVAL);
1643 lod_pool_putref(pool);
1649 struct lov_comp_md_entry_v1 *comp_entry_v1(struct lov_comp_md_v1 *comp, int i)
1651 LASSERTF((le32_to_cpu(comp->lcm_magic) & ~LOV_MAGIC_DEFINED) ==
1652 LOV_USER_MAGIC_COMP_V1, "Wrong magic %x\n",
1653 le32_to_cpu(comp->lcm_magic));
1654 LASSERTF(i >= 0 && i < le16_to_cpu(comp->lcm_entry_count),
1655 "bad index %d, max = %d\n",
1656 i, le16_to_cpu(comp->lcm_entry_count));
1658 return &comp->lcm_entries[i];
1661 #define for_each_comp_entry_v1(comp, entry) \
1662 for (entry = comp_entry_v1(comp, 0); \
1663 entry <= comp_entry_v1(comp, \
1664 le16_to_cpu(comp->lcm_entry_count) - 1); \
1667 int lod_erase_dom_stripe(struct lov_comp_md_v1 *comp_v1)
1669 struct lov_comp_md_entry_v1 *ent, *dom_ent;
1671 __u32 dom_off, dom_size, comp_size;
1672 void *blob_src, *blob_dst;
1673 unsigned int blob_size, blob_shift;
1675 entries = le16_to_cpu(comp_v1->lcm_entry_count) - 1;
1676 /* if file has only DoM stripe return just error */
1680 comp_size = le32_to_cpu(comp_v1->lcm_size);
1681 dom_ent = &comp_v1->lcm_entries[0];
1682 dom_off = le32_to_cpu(dom_ent->lcme_offset);
1683 dom_size = le32_to_cpu(dom_ent->lcme_size);
1685 /* shift entries array first */
1686 comp_v1->lcm_entry_count = cpu_to_le16(entries);
1687 memmove(dom_ent, dom_ent + 1,
1688 entries * sizeof(struct lov_comp_md_entry_v1));
1690 /* now move blob of layouts */
1691 blob_dst = (void *)comp_v1 + dom_off - sizeof(*dom_ent);
1692 blob_src = (void *)comp_v1 + dom_off + dom_size;
1693 blob_size = (unsigned long)((void *)comp_v1 + comp_size - blob_src);
1694 blob_shift = sizeof(*dom_ent) + dom_size;
1696 memmove(blob_dst, blob_src, blob_size);
1698 for_each_comp_entry_v1(comp_v1, ent) {
1701 off = le32_to_cpu(ent->lcme_offset);
1702 ent->lcme_offset = cpu_to_le32(off - blob_shift);
1705 comp_v1->lcm_size = cpu_to_le32(comp_size - blob_shift);
1707 /* notify a caller to re-check entry */
1711 int lod_fix_dom_stripe(struct lod_device *d, struct lov_comp_md_v1 *comp_v1)
1713 struct lov_comp_md_entry_v1 *ent, *dom_ent;
1714 struct lu_extent *dom_ext, *ext;
1715 struct lov_user_md_v1 *lum;
1720 dom_ent = &comp_v1->lcm_entries[0];
1721 dom_ext = &dom_ent->lcme_extent;
1722 dom_mid = mirror_id_of(le32_to_cpu(dom_ent->lcme_id));
1723 stripe_size = d->lod_dom_max_stripesize;
1725 lum = (void *)comp_v1 + le32_to_cpu(dom_ent->lcme_offset);
1726 CDEBUG(D_LAYOUT, "DoM component size %u was bigger than MDT limit %u, "
1727 "new size is %u\n", le32_to_cpu(lum->lmm_stripe_size),
1728 d->lod_dom_max_stripesize, stripe_size);
1729 lum->lmm_stripe_size = cpu_to_le32(stripe_size);
1731 for_each_comp_entry_v1(comp_v1, ent) {
1735 mid = mirror_id_of(le32_to_cpu(ent->lcme_id));
1739 ext = &ent->lcme_extent;
1740 if (ext->e_start != dom_ext->e_end)
1743 /* Found next component after the DoM one with the same
1744 * mirror_id and adjust its start with DoM component end.
1746 * NOTE: we are considering here that there can be only one
1747 * DoM component in a file, all replicas are located on OSTs
1748 * always and don't need adjustment since use own layouts.
1750 ext->e_start = cpu_to_le64(stripe_size);
1754 if (stripe_size == 0) {
1755 /* DoM component size is zero due to server setting,
1756 * remove it from the layout */
1757 rc = lod_erase_dom_stripe(comp_v1);
1759 /* Update DoM extent end finally */
1760 dom_ext->e_end = cpu_to_le64(stripe_size);
1767 * Verify LOV striping.
1769 * \param[in] d LOD device
1770 * \param[in] buf buffer with LOV EA to verify
1771 * \param[in] is_from_disk 0 - from user, allow some fields to be 0
1772 * 1 - from disk, do not allow
1773 * \param[in] start extent start for composite layout
1775 * \retval 0 if the striping is valid
1776 * \retval -EINVAL if striping is invalid
1778 int lod_verify_striping(struct lod_device *d, struct lod_object *lo,
1779 const struct lu_buf *buf, bool is_from_disk)
1781 struct lov_desc *desc = &d->lod_desc;
1782 struct lov_user_md_v1 *lum;
1783 struct lov_comp_md_v1 *comp_v1;
1784 struct lov_comp_md_entry_v1 *ent;
1785 struct lu_extent *ext;
1788 __u32 stripe_size = 0;
1789 __u16 prev_mid = -1, mirror_id = -1;
1797 if (buf->lb_len < sizeof(*lum)) {
1798 CDEBUG(D_LAYOUT, "buf len %zu too small for lov_user_md\n",
1803 magic = le32_to_cpu(lum->lmm_magic) & ~LOV_MAGIC_DEFINED;
1804 if (magic != LOV_USER_MAGIC_V1 &&
1805 magic != LOV_USER_MAGIC_V3 &&
1806 magic != LOV_USER_MAGIC_SPECIFIC &&
1807 magic != LOV_USER_MAGIC_COMP_V1) {
1808 CDEBUG(D_LAYOUT, "bad userland LOV MAGIC: %#x\n",
1809 le32_to_cpu(lum->lmm_magic));
1813 if (magic != LOV_USER_MAGIC_COMP_V1)
1814 RETURN(lod_verify_v1v3(d, buf, is_from_disk));
1816 /* magic == LOV_USER_MAGIC_COMP_V1 */
1817 comp_v1 = buf->lb_buf;
1818 if (buf->lb_len < le32_to_cpu(comp_v1->lcm_size)) {
1819 CDEBUG(D_LAYOUT, "buf len %zu is less than %u\n",
1820 buf->lb_len, le32_to_cpu(comp_v1->lcm_size));
1826 if (le16_to_cpu(comp_v1->lcm_entry_count) == 0) {
1827 CDEBUG(D_LAYOUT, "entry count is zero\n");
1831 if (S_ISREG(lod2lu_obj(lo)->lo_header->loh_attr) &&
1832 lo->ldo_comp_cnt > 0) {
1833 /* could be called from lustre.lov.add */
1834 __u32 cnt = lo->ldo_comp_cnt;
1836 ext = &lo->ldo_comp_entries[cnt - 1].llc_extent;
1837 prev_end = ext->e_end;
1842 for_each_comp_entry_v1(comp_v1, ent) {
1843 ext = &ent->lcme_extent;
1845 if (le64_to_cpu(ext->e_start) >= le64_to_cpu(ext->e_end)) {
1846 CDEBUG(D_LAYOUT, "invalid extent "DEXT"\n",
1847 le64_to_cpu(ext->e_start),
1848 le64_to_cpu(ext->e_end));
1853 /* lcme_id contains valid value */
1854 if (le32_to_cpu(ent->lcme_id) == 0 ||
1855 le32_to_cpu(ent->lcme_id) > LCME_ID_MAX) {
1856 CDEBUG(D_LAYOUT, "invalid id %u\n",
1857 le32_to_cpu(ent->lcme_id));
1861 if (le16_to_cpu(comp_v1->lcm_mirror_count) > 0) {
1862 mirror_id = mirror_id_of(
1863 le32_to_cpu(ent->lcme_id));
1865 /* first component must start with 0 */
1866 if (mirror_id != prev_mid &&
1867 le64_to_cpu(ext->e_start) != 0) {
1869 "invalid start:%llu, expect:0\n",
1870 le64_to_cpu(ext->e_start));
1874 prev_mid = mirror_id;
1878 if (le64_to_cpu(ext->e_start) == 0) {
1883 /* the next must be adjacent with the previous one */
1884 if (le64_to_cpu(ext->e_start) != prev_end) {
1886 "invalid start actual:%llu, expect:%llu\n",
1887 le64_to_cpu(ext->e_start), prev_end);
1891 tmp.lb_buf = (char *)comp_v1 + le32_to_cpu(ent->lcme_offset);
1892 tmp.lb_len = le32_to_cpu(ent->lcme_size);
1894 /* Check DoM entry is always the first one */
1896 if (lov_pattern(le32_to_cpu(lum->lmm_pattern)) ==
1898 /* DoM component can be only the first stripe */
1899 if (le64_to_cpu(ext->e_start) > 0) {
1900 CDEBUG(D_LAYOUT, "invalid DoM component "
1901 "with %llu extent start\n",
1902 le64_to_cpu(ext->e_start));
1905 stripe_size = le32_to_cpu(lum->lmm_stripe_size);
1906 /* There is just one stripe on MDT and it must
1907 * cover whole component size. */
1908 if (stripe_size != le64_to_cpu(ext->e_end)) {
1909 CDEBUG(D_LAYOUT, "invalid DoM layout "
1910 "stripe size %u != %llu "
1911 "(component size)\n",
1912 stripe_size, prev_end);
1915 /* Check stripe size againts per-MDT limit */
1916 if (stripe_size > d->lod_dom_max_stripesize) {
1917 CDEBUG(D_LAYOUT, "DoM component size "
1918 "%u is bigger than MDT limit %u, check "
1919 "dom_max_stripesize parameter\n",
1920 stripe_size, d->lod_dom_max_stripesize);
1921 rc = lod_fix_dom_stripe(d, comp_v1);
1922 if (rc == -ERESTART) {
1923 /* DoM entry was removed, re-check
1924 * new layout from start */
1932 prev_end = le64_to_cpu(ext->e_end);
1934 rc = lod_verify_v1v3(d, &tmp, is_from_disk);
1938 if (prev_end == LUSTRE_EOF)
1941 /* extent end must be aligned with the stripe_size */
1942 stripe_size = le32_to_cpu(lum->lmm_stripe_size);
1943 if (stripe_size == 0)
1944 stripe_size = desc->ld_default_stripe_size;
1945 if (stripe_size == 0 || (prev_end & (stripe_size - 1))) {
1946 CDEBUG(D_LAYOUT, "stripe size isn't aligned, "
1947 "stripe_sz: %u, [%llu, %llu)\n",
1948 stripe_size, ext->e_start, prev_end);
1953 /* make sure that the mirror_count is telling the truth */
1954 if (mirror_count != le16_to_cpu(comp_v1->lcm_mirror_count) + 1)
1961 * set the default stripe size, if unset.
1963 * \param[in,out] val number of bytes per OST stripe
1965 * The minimum stripe size is 64KB to ensure that a single stripe is an
1966 * even multiple of a client PAGE_SIZE (IA64, PPC, etc). Otherwise, it
1967 * is difficult to split dirty pages across OSCs during writes.
1969 void lod_fix_desc_stripe_size(__u64 *val)
1971 if (*val < LOV_MIN_STRIPE_SIZE) {
1973 LCONSOLE_INFO("Increasing default stripe size to "
1974 "minimum value %u\n",
1975 LOV_DESC_STRIPE_SIZE_DEFAULT);
1976 *val = LOV_DESC_STRIPE_SIZE_DEFAULT;
1977 } else if (*val & (LOV_MIN_STRIPE_SIZE - 1)) {
1978 *val &= ~(LOV_MIN_STRIPE_SIZE - 1);
1979 LCONSOLE_WARN("Changing default stripe size to %llu (a "
1980 "multiple of %u)\n",
1981 *val, LOV_MIN_STRIPE_SIZE);
1986 * set the filesystem default number of stripes, if unset.
1988 * \param[in,out] val number of stripes
1990 * A value of "0" means "use the system-wide default stripe count", which
1991 * has either been inherited by now, or falls back to 1 stripe per file.
1992 * A value of "-1" (0xffffffff) means "stripe over all available OSTs",
1993 * and is a valid value, so is left unchanged here.
1995 void lod_fix_desc_stripe_count(__u32 *val)
2002 * set the filesystem default layout pattern
2004 * \param[in,out] val LOV_PATTERN_* layout
2006 * A value of "0" means "use the system-wide default layout type", which
2007 * has either been inherited by now, or falls back to plain RAID0 striping.
2009 void lod_fix_desc_pattern(__u32 *val)
2011 /* from lov_setstripe */
2012 if ((*val != 0) && (*val != LOV_PATTERN_RAID0) &&
2013 (*val != LOV_PATTERN_MDT)) {
2014 LCONSOLE_WARN("Unknown stripe pattern: %#x\n", *val);
2019 void lod_fix_desc_qos_maxage(__u32 *val)
2021 /* fix qos_maxage */
2023 *val = LOV_DESC_QOS_MAXAGE_DEFAULT;
2027 * Used to fix insane default striping.
2029 * \param[in] desc striping description
2031 void lod_fix_desc(struct lov_desc *desc)
2033 lod_fix_desc_stripe_size(&desc->ld_default_stripe_size);
2034 lod_fix_desc_stripe_count(&desc->ld_default_stripe_count);
2035 lod_fix_desc_pattern(&desc->ld_pattern);
2036 lod_fix_desc_qos_maxage(&desc->ld_qos_maxage);
2040 * Initialize the structures used to store pools and default striping.
2042 * \param[in] lod LOD device
2043 * \param[in] lcfg configuration structure storing default striping.
2045 * \retval 0 if initialization succeeds
2046 * \retval negative error number on failure
2048 int lod_pools_init(struct lod_device *lod, struct lustre_cfg *lcfg)
2050 struct obd_device *obd;
2051 struct lov_desc *desc;
2055 obd = class_name2obd(lustre_cfg_string(lcfg, 0));
2056 LASSERT(obd != NULL);
2057 obd->obd_lu_dev = &lod->lod_dt_dev.dd_lu_dev;
2059 if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
2060 CERROR("LOD setup requires a descriptor\n");
2064 desc = (struct lov_desc *)lustre_cfg_buf(lcfg, 1);
2066 if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
2067 CERROR("descriptor size wrong: %d > %d\n",
2068 (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
2072 if (desc->ld_magic != LOV_DESC_MAGIC) {
2073 if (desc->ld_magic == __swab32(LOV_DESC_MAGIC)) {
2074 CDEBUG(D_OTHER, "%s: Swabbing lov desc %p\n",
2075 obd->obd_name, desc);
2076 lustre_swab_lov_desc(desc);
2078 CERROR("%s: Bad lov desc magic: %#x\n",
2079 obd->obd_name, desc->ld_magic);
2086 desc->ld_active_tgt_count = 0;
2087 lod->lod_desc = *desc;
2089 lod->lod_sp_me = LUSTRE_SP_CLI;
2091 /* Set up allocation policy (QoS and RR) */
2092 INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list);
2093 init_rwsem(&lod->lod_qos.lq_rw_sem);
2094 lod->lod_qos.lq_dirty = 1;
2095 lod->lod_qos.lq_rr.lqr_dirty = 1;
2096 lod->lod_qos.lq_reset = 1;
2097 /* Default priority is toward free space balance */
2098 lod->lod_qos.lq_prio_free = 232;
2099 /* Default threshold for rr (roughly 17%) */
2100 lod->lod_qos.lq_threshold_rr = 43;
2102 /* Set up OST pool environment */
2103 lod->lod_pools_hash_body = cfs_hash_create("POOLS", HASH_POOLS_CUR_BITS,
2104 HASH_POOLS_MAX_BITS,
2105 HASH_POOLS_BKT_BITS, 0,
2108 &pool_hash_operations,
2110 if (lod->lod_pools_hash_body == NULL)
2113 INIT_LIST_HEAD(&lod->lod_pool_list);
2114 lod->lod_pool_count = 0;
2115 rc = lod_ost_pool_init(&lod->lod_pool_info, 0);
2118 lod_qos_rr_init(&lod->lod_qos.lq_rr);
2119 rc = lod_ost_pool_init(&lod->lod_qos.lq_rr.lqr_pool, 0);
2121 GOTO(out_pool_info, rc);
2126 lod_ost_pool_free(&lod->lod_pool_info);
2128 cfs_hash_putref(lod->lod_pools_hash_body);
2134 * Release the structures describing the pools.
2136 * \param[in] lod LOD device from which we release the structures
2140 int lod_pools_fini(struct lod_device *lod)
2142 struct obd_device *obd = lod2obd(lod);
2143 struct pool_desc *pool, *tmp;
2146 list_for_each_entry_safe(pool, tmp, &lod->lod_pool_list, pool_list) {
2147 /* free pool structs */
2148 CDEBUG(D_INFO, "delete pool %p\n", pool);
2149 /* In the function below, .hs_keycmp resolves to
2150 * pool_hashkey_keycmp() */
2151 /* coverity[overrun-buffer-val] */
2152 lod_pool_del(obd, pool->pool_name);
2155 cfs_hash_putref(lod->lod_pools_hash_body);
2156 lod_ost_pool_free(&(lod->lod_qos.lq_rr.lqr_pool));
2157 lod_ost_pool_free(&lod->lod_pool_info);