__u64 llsd_touch_gen;
struct dt_object *llsd_rb_obj;
struct rb_root llsd_rb_root;
- rwlock_t llsd_rb_lock;
+ struct rw_semaphore llsd_rb_rwsem;
unsigned int llsd_rbtree_valid:1;
};
struct lfsck_layout_slave_target *llsaa_llst;
};
-static inline bool lfsck_comp_extent_aligned(__u64 size)
+static inline bool lfsck_comp_extent_aligned(__u64 border, __u32 size)
{
- return (size & (LOV_MIN_STRIPE_SIZE - 1)) == 0;
+ return (border & (size - 1)) == 0;
}
static inline void
struct lfsck_assistant_req *lar)
{
struct lfsck_layout_req *llr =
- container_of0(lar, struct lfsck_layout_req, llr_lar);
+ container_of(lar, struct lfsck_layout_req, llr_lar);
lfsck_object_put(env, llr->llr_child);
lfsck_assistant_object_put(env, lar->lar_parent);
int rc = 0;
ENTRY;
- if (!lad->lad_incomplete)
+ if (!test_bit(LAD_INCOMPLETE, &lad->lad_flags))
RETURN_EXIT;
/* If the MDT has ever failed to verfiy some OST-objects,
up_read(<ds->ltd_rw_sem);
if (rc == 0 && atomic_read(&count) > 0)
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
}
static int lfsck_layout_verify_header_v1v3(struct dt_object *obj,
- struct lov_mds_md_v1 *lmm)
+ struct lov_mds_md_v1 *lmm,
+ __u64 start, __u64 end,
+ __u32 comp_id,
+ bool ext, bool *dom)
{
__u32 magic;
__u32 pattern;
+ __u32 size;
magic = le32_to_cpu(lmm->lmm_magic);
/* If magic crashed, keep it there. Sometime later, during OST-object
else
rc = -EINVAL;
- CDEBUG(D_LFSCK, "%s LOV EA magic %u for the file "DFID"\n",
+ CDEBUG(D_LFSCK, "%s LOV EA magic 0x%X for the file "DFID"\n",
rc == -EINVAL ? "Unknown" : "Unsupported",
magic, PFID(lfsck_dto2fid(obj)));
}
pattern = le32_to_cpu(lmm->lmm_pattern);
- /* XXX: currently, we only support LOV_PATTERN_RAID0. */
- if (lov_pattern(pattern) != LOV_PATTERN_RAID0) {
+ *dom = !!(lov_pattern(pattern) == LOV_PATTERN_MDT);
+
+ /* XXX: DoM file verification will be supportted via LU-11081. */
+ if (lov_pattern(pattern) == LOV_PATTERN_MDT) {
+#if 0
+ if (start != 0) {
+ CDEBUG(D_LFSCK, "The DoM entry for "DFID" is not "
+ "the first component in the mirror %x/%llu\n",
+ PFID(lfsck_dto2fid(obj)), comp_id, start);
+
+ return -EINVAL;
+ }
+#endif
+ } else if (!lov_pattern_supported_normal_comp(lov_pattern(pattern))) {
CDEBUG(D_LFSCK, "Unsupported LOV EA pattern %u for the file "
- DFID"\n", pattern, PFID(lfsck_dto2fid(obj)));
+ DFID" in the component %x\n",
+ pattern, PFID(lfsck_dto2fid(obj)), comp_id);
return -EOPNOTSUPP;
}
+ size = le32_to_cpu(lmm->lmm_stripe_size);
+ if (!ext && end != LUSTRE_EOF && start != end &&
+ !lfsck_comp_extent_aligned(end, size)){
+ CDEBUG(D_LFSCK, "not aligned border in PFL extent range "
+ "[%llu - %llu) stripesize %u for the file "DFID
+ " at idx %d\n", start, end, size,
+ PFID(lfsck_dto2fid(obj)), comp_id);
+
+ return -EINVAL;
+ }
+
return 0;
}
+static int lfsck_layout_verify_header_foreign(struct dt_object *obj,
+ struct lov_foreign_md *lfm,
+ size_t len)
+{
+ /* magic has been verified already */
+ __u32 value_len = le32_to_cpu(lfm->lfm_length);
+ /* type and flags are not checked for instance */
+
+ CDEBUG(D_INFO, "foreign LOV EA, magic %x, len %u, type %x, flags %x, for file "DFID"\n",
+ le32_to_cpu(lfm->lfm_magic), value_len,
+ le32_to_cpu(lfm->lfm_type), le32_to_cpu(lfm->lfm_flags),
+ PFID(lfsck_dto2fid(obj)));
+
+ if (len != value_len + offsetof(typeof(*lfm), lfm_value))
+ CDEBUG(D_LFSCK, "foreign LOV EA internal size %u does not match EA full size %zu for file "DFID"\n",
+ value_len, len, PFID(lfsck_dto2fid(obj)));
+
+ /* nothing to repair */
+ return -ENODATA;
+}
+
static int lfsck_layout_verify_header(struct dt_object *obj,
- struct lov_mds_md_v1 *lmm)
+ struct lov_mds_md_v1 *lmm, size_t len)
{
+ bool p_dom = false;
int rc = 0;
- if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_COMP_V1) {
+ if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_COMP_V1 ||
+ le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_SEL) {
struct lov_comp_md_v1 *lcm = (struct lov_comp_md_v1 *)lmm;
+ bool p_zero = false;
int i;
__u16 count = le16_to_cpu(lcm->lcm_entry_count);
return -EINVAL;
}
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count && !rc; i++) {
struct lov_comp_md_entry_v1 *lcme =
&lcm->lcm_entries[i];
__u64 start = le64_to_cpu(lcme->lcme_extent.e_start);
__u64 end = le64_to_cpu(lcme->lcme_extent.e_end);
__u32 comp_id = le32_to_cpu(lcme->lcme_id);
+ bool ext, inited, zero;
+ __u32 flags;
if (unlikely(comp_id == LCME_ID_INVAL ||
comp_id > LCME_ID_MAX)) {
- CDEBUG(D_LFSCK, "found invalid FPL ID %u "
+ CDEBUG(D_LFSCK, "found invalid PFL ID %u "
"for the file "DFID" at idx %d\n",
comp_id, PFID(lfsck_dto2fid(obj)), i);
return -EINVAL;
}
- if (unlikely(start >= end ||
- !lfsck_comp_extent_aligned(start) ||
- (!lfsck_comp_extent_aligned(end) &&
- end != LUSTRE_EOF))) {
- CDEBUG(D_LFSCK, "found invalid FPL extent "
- "range [%llu - %llu) for the file "
- DFID" at idx %d\n",
- start, end, PFID(lfsck_dto2fid(obj)), i);
+ flags = le32_to_cpu(lcme->lcme_flags);
+ ext = flags & LCME_FL_EXTENSION;
+ inited = flags & LCME_FL_INIT;
+ zero = !!(start == end);
+
+ if ((i == 0) && zero) {
+ CDEBUG(D_LFSCK, "invalid PFL comp %d: [%llu "
+ "- %llu) for "DFID"\n", i, start, end,
+ PFID(lfsck_dto2fid(obj)));
+ return -EINVAL;
+ }
+
+ if ((zero && (inited || (i + 1 == count))) ||
+ (start > end)) {
+ CDEBUG(D_LFSCK, "invalid PFL comp %d/%d: "
+ "[%llu, %llu) for "DFID", %sinited\n",
+ i, count, start, end,
+ PFID(lfsck_dto2fid(obj)),
+ inited ? "" : "NOT ");
+ return -EINVAL;
+ }
+
+ if (!ext && p_zero) {
+ CDEBUG(D_LFSCK, "invalid PFL comp %d: [%llu, "
+ "%llu) for "DFID": NOT extension "
+ "after 0-length component\n", i,
+ start, end, PFID(lfsck_dto2fid(obj)));
+ return -EINVAL;
+ }
+ if (ext && (inited || p_dom || zero)) {
+ CDEBUG(D_LFSCK, "invalid PFL comp %d: [%llu, "
+ "%llu) for "DFID": %s\n", i,
+ start, end, PFID(lfsck_dto2fid(obj)),
+ inited ? "inited extension" :
+ p_dom ? "extension follows DOM" :
+ zero ? "zero length extension" : "");
return -EINVAL;
}
rc = lfsck_layout_verify_header_v1v3(obj,
- (struct lov_mds_md_v1 *)((char *)lmm +
- le32_to_cpu(lcme->lcme_offset)));
- if (rc)
- return rc;
+ (struct lov_mds_md_v1 *)((char *)lmm +
+ le32_to_cpu(lcme->lcme_offset)), start,
+ end, comp_id, ext, &p_dom);
+
+ p_zero = zero;
}
+ } else if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_FOREIGN) {
+ rc = lfsck_layout_verify_header_foreign(obj,
+ (struct lov_foreign_md *)lmm,
+ len);
} else {
- rc = lfsck_layout_verify_header_v1v3(obj, lmm);
+ rc = lfsck_layout_verify_header_v1v3(obj, lmm, 0, LUSTRE_EOF,
+ 0, false, &p_dom);
}
return rc;
if (rc == -ERANGE) {
rc = dt_xattr_get(env, obj, &LU_BUF_NULL, XATTR_NAME_LOV);
if (rc <= 0)
- return rc;
+ return !rc ? -ENODATA : rc;
lu_buf_realloc(buf, rc);
if (buf->lb_buf == NULL)
goto again;
}
- if (rc == -ENODATA)
- rc = 0;
-
if (rc <= 0)
- return rc;
+ return !rc ? -ENODATA : rc;
if (unlikely(buf->lb_buf == NULL)) {
lu_buf_alloc(buf, rc);
goto again;
}
- rc1 = lfsck_layout_verify_header(obj, buf->lb_buf);
+ rc1 = lfsck_layout_verify_header(obj, buf->lb_buf, rc);
return rc1 ? rc1 : rc;
}
return lrn;
}
-extern const struct dt_index_operations lfsck_orphan_index_ops;
+static const struct dt_index_operations lfsck_orphan_index_ops;
static int lfsck_rbtree_setup(const struct lu_env *env,
struct lfsck_component *com)
lfsck->li_bottom->dd_record_fid_accessed = 0;
/* Invalid the rbtree, then no others will use it. */
- write_lock(&llsd->llsd_rb_lock);
+ down_write(&llsd->llsd_rb_rwsem);
llsd->llsd_rbtree_valid = 0;
- write_unlock(&llsd->llsd_rb_lock);
+ up_write(&llsd->llsd_rb_rwsem);
while (node != NULL) {
next = rb_next(node);
if (!fid_is_idif(fid) && !fid_is_norm(fid))
RETURN_EXIT;
- read_lock(&llsd->llsd_rb_lock);
+ down_read(&llsd->llsd_rb_rwsem);
if (!llsd->llsd_rbtree_valid)
GOTO(unlock, rc = 0);
LASSERT(!insert);
- read_unlock(&llsd->llsd_rb_lock);
+ up_read(&llsd->llsd_rb_rwsem);
tmp = lfsck_rbtree_new(env, fid);
if (IS_ERR(tmp))
GOTO(out, rc = PTR_ERR(tmp));
insert = true;
- write_lock(&llsd->llsd_rb_lock);
+ down_write(&llsd->llsd_rb_rwsem);
if (!llsd->llsd_rbtree_valid) {
lfsck_rbtree_free(tmp);
GOTO(unlock, rc = 0);
unlock:
if (insert)
- write_unlock(&llsd->llsd_rb_lock);
+ up_write(&llsd->llsd_rb_rwsem);
else
- read_unlock(&llsd->llsd_rb_lock);
+ up_read(&llsd->llsd_rb_rwsem);
out:
if (rc != 0 && accessed) {
struct lfsck_layout *lo = com->lc_file_ram;
}
if (lo->ll_bitmap_size == 0) {
- lad->lad_incomplete = 0;
+ clear_bit(LAD_INCOMPLETE, &lad->lad_flags);
CFS_RESET_BITMAP(bitmap);
RETURN(0);
RETURN(rc >= 0 ? -EINVAL : rc);
if (cfs_bitmap_check_empty(bitmap))
- lad->lad_incomplete = 0;
+ clear_bit(LAD_INCOMPLETE, &lad->lad_flags);
else
- lad->lad_incomplete = 1;
+ set_bit(LAD_INCOMPLETE, &lad->lad_flags);
RETURN(0);
}
loa = &lfsck_env_info(env)->lti_loa;
rc = dt_xattr_get(env, obj, lfsck_buf_get(env, loa, sizeof(*loa)),
XATTR_NAME_LMA);
- if (rc >= sizeof(struct lustre_mdt_attrs)) {
+ if (rc >= (int)sizeof(struct lustre_mdt_attrs)) {
lustre_lma_swab(&loa->loa_lma);
return loa->loa_lma.lma_compat & LMAC_FID_ON_OST ? 1 : 0;
memset(dof, 0, sizeof(*dof));
dof->dof_type = dt_mode_to_dft(S_IFREG);
- th = dt_trans_create(env, dt);
+ th = lfsck_trans_create(env, dt, lfsck);
if (IS_ERR(th))
GOTO(log, rc = PTR_ERR(th));
continue;
}
- th = dt_trans_create(env, dt);
+ th = lfsck_trans_create(env, dt, lfsck);
if (IS_ERR(th)) {
rc1 = PTR_ERR(th);
CDEBUG(D_LFSCK, "%s: layout LFSCK failed to store "
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY4) &&
cfs_fail_val > 0) {
- struct l_wait_info lwi = LWI_TIMEOUT(
- cfs_time_seconds(cfs_fail_val),
- NULL, NULL);
-
- /* Some others may changed the cfs_fail_val
- * as zero after above check, re-check it for
- * sure to avoid falling into wait for ever. */
- if (likely(lwi.lwi_timeout > 0)) {
- struct ptlrpc_thread *thread =
- &lfsck->li_thread;
-
- up_write(&com->lc_sem);
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
- down_write(&com->lc_sem);
- }
+ struct ptlrpc_thread *thread =
+ &lfsck->li_thread;
+
+ up_write(&com->lc_sem);
+ wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ cfs_time_seconds(cfs_fail_val));
+ down_write(&com->lc_sem);
}
}
if (lfsck->li_master) {
struct lfsck_assistant_data *lad = com->lc_data;
- if (lad->lad_incomplete)
+ if (test_bit(LAD_INCOMPLETE, &lad->lad_flags))
lo->ll_status = LS_PARTIAL;
else
lo->ll_status = LS_COMPLETED;
mutex_lock(&com->lc_sub_trace_objs[idx].lsto_mutex);
- th = dt_trans_create(env, dev);
+ th = lfsck_trans_create(env, dev, com->lc_lfsck);
if (IS_ERR(th))
GOTO(unlock, rc = PTR_ERR(th));
GOTO(unlock, rc);
rc = dt_insert(env, obj, (const struct dt_rec *)rec,
- (const struct dt_key *)key, th, 1);
+ (const struct dt_key *)key, th);
GOTO(unlock, rc);
mutex_lock(&com->lc_sub_trace_objs[idx].lsto_mutex);
- th = dt_trans_create(env, dev);
+ th = lfsck_trans_create(env, dev, com->lc_lfsck);
if (IS_ERR(th))
GOTO(unlock, rc = PTR_ERR(th));
}
static int lfsck_layout_new_comp_lovea(const struct lu_env *env,
- struct ost_layout *ol,
- struct dt_object *parent,
- struct lu_buf *buf, __u32 ea_off,
- struct lov_mds_md_v1 **lmm,
- struct lov_ost_data_v1 **objs)
+ struct lu_orphan_rec_v3 *rec,
+ struct dt_object *parent,
+ struct lu_buf *buf, __u32 ea_off,
+ struct lov_mds_md_v1 **lmm,
+ struct lov_ost_data_v1 **objs)
{
+ struct ost_layout *ol = &rec->lor_layout;
struct lov_comp_md_v1 *lcm;
struct lov_comp_md_entry_v1 *lcme;
__u32 pattern = LOV_PATTERN_RAID0;
lcm = buf->lb_buf;
lcm->lcm_magic = cpu_to_le32(LOV_MAGIC_COMP_V1);
lcm->lcm_size = cpu_to_le32(size);
- lcm->lcm_layout_gen = cpu_to_le32(1);
- lcm->lcm_flags = 0;
+ if (rec->lor_range) {
+ lcm->lcm_layout_gen = cpu_to_le32(rec->lor_layout_version +
+ rec->lor_range);
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_WRITE_PENDING);
+ } else if (rec->lor_layout_version) {
+ lcm->lcm_layout_gen = cpu_to_le32(rec->lor_layout_version +
+ rec->lor_range);
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_NONE);
+ } else {
+ lcm->lcm_layout_gen = cpu_to_le32(1);
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_NONE);
+ }
lcm->lcm_entry_count = cpu_to_le16(1);
+ /* Currently, we do not know how many mirrors will be, set it as zero
+ * at the beginning. It will be updated when more mirrors are found. */
+ lcm->lcm_mirror_count = 0;
lcme = &lcm->lcm_entries[0];
lcme->lcme_id = cpu_to_le32(ol->ol_comp_id);
lcme->lcme_extent.e_end = cpu_to_le64(ol->ol_comp_end);
lcme->lcme_offset = cpu_to_le32(offset);
lcme->lcme_size = cpu_to_le32(lcme_size);
+ lcme->lcme_layout_gen = lcm->lcm_layout_gen;
if (ol->ol_stripe_count > 1)
pattern |= LOV_PATTERN_F_HOLE;
return size;
}
-static int lfsck_layout_add_comp_comp(const struct lu_env *env,
- struct lfsck_instance *lfsck,
- struct thandle *handle,
- struct ost_layout *ol,
- struct dt_object *parent,
- const struct lu_fid *cfid,
- struct lu_buf *buf, __u32 ost_idx,
- __u32 ea_off, int pos)
+static void lfsck_layout_update_lcm(struct lov_comp_md_v1 *lcm,
+ struct lov_comp_md_entry_v1 *lcme,
+ __u32 version, __u32 range)
+{
+ struct lov_comp_md_entry_v1 *tmp;
+ __u64 start = le64_to_cpu(lcme->lcme_extent.e_start);
+ __u64 end = le64_to_cpu(lcme->lcme_extent.e_end);
+ __u32 gen = version + range;
+ __u32 tmp_gen;
+ int i;
+ __u16 count = le16_to_cpu(lcm->lcm_entry_count);
+ __u16 flags = le16_to_cpu(lcm->lcm_flags);
+
+ if (!gen)
+ gen = 1;
+ lcme->lcme_layout_gen = cpu_to_le32(gen);
+ if (le32_to_cpu(lcm->lcm_layout_gen) < gen)
+ lcm->lcm_layout_gen = cpu_to_le32(gen);
+
+ if (range)
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_WRITE_PENDING);
+ else if (flags == LCM_FL_NONE && le16_to_cpu(lcm->lcm_mirror_count) > 0)
+ lcm->lcm_flags = cpu_to_le16(LCM_FL_RDONLY);
+
+ for (i = 0; i < count; i++) {
+ tmp = &lcm->lcm_entries[i];
+ if (le64_to_cpu(tmp->lcme_extent.e_end) <= start)
+ continue;
+
+ if (le64_to_cpu(tmp->lcme_extent.e_start) >= end)
+ continue;
+
+ if (le32_to_cpu(tmp->lcme_flags) & LCME_FL_STALE)
+ continue;
+
+ tmp_gen = le32_to_cpu(tmp->lcme_layout_gen);
+ /* "lcme_layout_gen == 0" but without LCME_FL_STALE flag,
+ * then it should be the latest version of all mirrors. */
+ if (tmp_gen == 0 || tmp_gen > gen) {
+ lcme->lcme_flags = cpu_to_le32(
+ le32_to_cpu(lcme->lcme_flags) | LCME_FL_STALE);
+ break;
+ }
+
+ if (tmp_gen < gen)
+ tmp->lcme_flags = cpu_to_le32(
+ le32_to_cpu(tmp->lcme_flags) | LCME_FL_STALE);
+ }
+}
+
+static int lfsck_layout_add_comp(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
+ struct thandle *handle,
+ struct lu_orphan_rec_v3 *rec,
+ struct dt_object *parent,
+ const struct lu_fid *cfid,
+ struct lu_buf *buf, __u32 ost_idx,
+ __u32 ea_off, int pos, bool new_mirror)
{
+ struct ost_layout *ol = &rec->lor_layout;
struct lov_comp_md_v1 *lcm = buf->lb_buf;
struct lov_comp_md_entry_v1 *lcme;
struct lov_mds_md_v1 *lmm;
* have reallocated the buf. */
lcm = buf->lb_buf;
lcm->lcm_size = cpu_to_le32(size);
- le32_add_cpu(&lcm->lcm_layout_gen, 1);
lcm->lcm_entry_count = cpu_to_le16(count + 1);
+ if (new_mirror)
+ le16_add_cpu(&lcm->lcm_mirror_count, 1);
/* 1. Move the component bodies from [pos, count-1] to [pos+1, count]
* with distance of 'added'. */
ol->ol_stripe_size, ea_off,
pattern, ol->ol_stripe_count);
+ /* 6. Update mirror related flags and version. */
+ lfsck_layout_update_lcm(lcm, lcme, rec->lor_layout_version,
+ rec->lor_range);
+
rc = lfsck_layout_refill_lovea(env, lfsck, handle, parent, cfid, buf,
lmm, objs, LU_XATTR_REPLACE, ost_idx,
le32_to_cpu(lcm->lcm_size));
CDEBUG(D_LFSCK, "%s: layout LFSCK assistant add new COMP for "
DFID": parent "DFID", OST-index %u, stripe-index %u, "
"stripe_size %u, stripe_count %u, comp_id %u, comp_start %llu, "
- "comp_end %llu, %s LOV EA hole: rc = %d\n",
+ "comp_end %llu, layout version %u, range %u, "
+ "%s LOV EA hole: rc = %d\n",
lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
ost_idx, ea_off, ol->ol_stripe_size, ol->ol_stripe_count,
ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end,
+ rec->lor_layout_version, rec->lor_range,
le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE ?
"with" : "without", rc);
static int lfsck_layout_update_lovea(const struct lu_env *env,
struct lfsck_instance *lfsck,
struct thandle *handle,
- struct ost_layout *ol,
+ struct lu_orphan_rec_v3 *rec,
struct dt_object *parent,
const struct lu_fid *cfid,
struct lu_buf *buf, int fl,
__u32 ost_idx, __u32 ea_off)
{
+ struct ost_layout *ol = &rec->lor_layout;
struct lov_mds_md_v1 *lmm = NULL;
struct lov_ost_data_v1 *objs = NULL;
int rc = 0;
ENTRY;
if (ol->ol_comp_id != 0)
- rc = lfsck_layout_new_comp_lovea(env, ol, parent, buf, ea_off,
- &lmm, &objs);
+ rc = lfsck_layout_new_comp_lovea(env, rec, parent, buf, ea_off,
+ &lmm, &objs);
else
- rc = lfsck_layout_new_v1_lovea(env, lfsck, ol, parent, buf,
- ea_off, &lmm, &objs);
-
+ rc = lfsck_layout_new_v1_lovea(env, lfsck, &rec->lor_layout,
+ parent, buf, ea_off, &lmm,
+ &objs);
if (rc > 0)
rc = lfsck_layout_refill_lovea(env, lfsck, handle, parent, cfid,
buf, lmm, objs, fl, ost_idx, rc);
CDEBUG(D_LFSCK, "%s: layout LFSCK assistant created layout EA for "
DFID": parent "DFID", OST-index %u, stripe-index %u, "
"stripe_size %u, stripe_count %u, comp_id %u, comp_start %llu, "
- "comp_end %llu, fl %d, %s LOV EA hole: rc = %d\n",
+ "comp_end %llu, layout version %u, range %u, fl %d, "
+ "%s LOV EA hole: rc = %d\n",
lfsck_lfsck2name(lfsck), PFID(cfid), PFID(lfsck_dto2fid(parent)),
ost_idx, ea_off, ol->ol_stripe_size, ol->ol_stripe_count,
- ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end, fl,
+ ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end,
+ rec->lor_layout_version, rec->lor_range, fl,
le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_HOLE ?
"with" : "without", rc);
}
static int __lfsck_layout_update_pfid(const struct lu_env *env,
+ struct lfsck_component *com,
struct dt_object *child,
const struct lu_fid *pfid,
- const struct ost_layout *ol, __u32 offset)
+ const struct ost_layout *ol, __u32 offset,
+ __u32 version, __u32 range)
{
struct dt_device *dev = lfsck_obj2dev(child);
struct filter_fid *ff = &lfsck_env_info(env)->lti_ff;
* parent MDT-object's layout EA. */
ff->ff_parent.f_stripe_idx = cpu_to_le32(offset);
ost_layout_cpu_to_le(&ff->ff_layout, ol);
+ ff->ff_layout_version = cpu_to_le32(version);
+ ff->ff_range = cpu_to_le32(range);
lfsck_buf_init(&buf, ff, sizeof(*ff));
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, com->lc_lfsck);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
struct dt_object *parent,
struct lu_fid *cfid,
struct dt_device *cdev,
- struct ost_layout *ol, __u32 ea_off)
+ struct lu_orphan_rec_v3 *rec, __u32 ea_off)
{
struct dt_object *child;
int rc = 0;
if (IS_ERR(child))
RETURN(PTR_ERR(child));
- rc = __lfsck_layout_update_pfid(env, child,
+ rc = __lfsck_layout_update_pfid(env, com, child,
lu_object_fid(&parent->do_lu),
- ol, ea_off);
+ &rec->lor_layout, ea_off,
+ rec->lor_layout_version,
+ rec->lor_range);
lfsck_object_put(env, child);
RETURN(rc == 0 ? 1 : rc);
static int lfsck_layout_recreate_parent(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_tgt_desc *ltd,
- struct lu_orphan_rec_v2 *rec,
+ struct lu_orphan_rec_v3 *rec,
struct lu_fid *cfid,
const char *infix,
const char *type,
struct lu_attr *la = &info->lti_la2;
struct dt_object_format *dof = &info->lti_dof;
struct lfsck_instance *lfsck = com->lc_lfsck;
- struct ost_layout *ol = &rec->lor_layout;
struct lu_fid *pfid = &rec->lor_rec.lor_fid;
struct lu_fid *tfid = &info->lti_fid3;
struct dt_device *dev = lfsck->li_bottom;
int rc = 0;
ENTRY;
+ if (lfsck_is_dryrun(lfsck))
+ GOTO(log, rc = 0);
+
if (unlikely(lpf == NULL))
GOTO(log, rc = -ENXIO);
* the stripe(s). The LFSCK will specify the LOV EA via
* lfsck_layout_update_lovea(). */
- size = lfsck_lovea_size(ol, ea_off);
+ size = lfsck_lovea_size(&rec->lor_layout, ea_off);
if (ea_buf->lb_len < size) {
lu_buf_realloc(ea_buf, size);
if (ea_buf->lb_buf == NULL)
do {
snprintf(name, NAME_MAX, DFID"%s-%s-%d", PFID(pfid), infix,
type, idx++);
- rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
- (const struct dt_key *)name);
+ rc = dt_lookup_dir(env, lfsck->li_lpf_obj, name, tfid);
if (rc != 0 && rc != -ENOENT)
GOTO(log, rc);
} while (rc == 0);
/* Re-check whether the name conflict with othrs after taken
* the ldlm lock. */
- rc = dt_lookup(env, lfsck->li_lpf_obj, (struct dt_rec *)tfid,
- (const struct dt_key *)name);
+ rc = dt_lookup_dir(env, lfsck->li_lpf_obj, name, tfid);
if (unlikely(rc == 0)) {
lfsck_unlock(llh);
goto again;
GOTO(unlock, rc);
/* The 1st transaction. */
- th = dt_trans_create(env, dev);
+ th = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(th))
GOTO(unlock, rc = PTR_ERR(th));
dt_write_lock(env, pobj, 0);
rc = dt_create(env, pobj, la, NULL, dof, th);
if (rc == 0)
- rc = lfsck_layout_update_lovea(env, lfsck, th, ol, pobj, cfid,
+ rc = lfsck_layout_update_lovea(env, lfsck, th, rec, pobj, cfid,
&lov_buf, LU_XATTR_CREATE, ltd->ltd_index, ea_off);
dt_write_unlock(env, pobj);
if (rc < 0)
GOTO(stop, rc);
rc = dt_insert(env, lpf, (const struct dt_rec *)dtrec,
- (const struct dt_key *)name, th, 1);
+ (const struct dt_key *)name, th);
if (rc != 0)
GOTO(stop, rc);
th = NULL;
/* The 2nd transaction. */
- rc = __lfsck_layout_update_pfid(env, cobj, pfid, ol, ea_off);
+ rc = __lfsck_layout_update_pfid(env, com, cobj, pfid,
+ &rec->lor_layout, ea_off,
+ rec->lor_layout_version,
+ rec->lor_range);
}
GOTO(stop, rc);
memset(policy, 0, sizeof(*policy));
policy->l_extent.end = OBD_OBJECT_EOF;
ost_fid_build_resid(fid, resid);
- rc = ldlm_cli_enqueue_local(lfsck->li_namespace, resid, LDLM_EXTENT,
- policy, LCK_EX, &flags, ldlm_blocking_ast,
- ldlm_completion_ast, NULL, NULL, 0,
- LVB_T_NONE, NULL, &lh);
+ rc = ldlm_cli_enqueue_local(env, lfsck->li_namespace, resid,
+ LDLM_EXTENT, policy, LCK_EX, &flags,
+ ldlm_blocking_ast, ldlm_completion_ast,
+ NULL, NULL, 0, LVB_T_NONE, NULL, &lh);
if (rc != ELDLM_OK)
GOTO(put, rc = -EIO);
if (la->la_ctime != 0)
GOTO(unlock, rc = -ETXTBSY);
- th = dt_trans_create(env, dev);
+ th = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(th))
GOTO(unlock, rc = PTR_ERR(th));
static int lfsck_layout_conflict_create(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_tgt_desc *ltd,
- struct lu_orphan_rec_v2 *rec,
+ struct lu_orphan_rec_v3 *rec,
struct dt_object *parent,
struct lu_fid *cfid,
struct lu_buf *ea_buf,
if (rc != 0 && rc != -ENOENT)
GOTO(unlock, rc);
- th = dt_trans_create(env, dev);
+ if (lfsck_is_dryrun(com->lc_lfsck))
+ GOTO(unlock, rc = 0);
+
+ th = lfsck_trans_create(env, dev, com->lc_lfsck);
if (IS_ERR(th))
GOTO(unlock, rc = PTR_ERR(th));
static int lfsck_layout_recreate_lovea(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_tgt_desc *ltd,
- struct lu_orphan_rec_v2 *rec,
+ struct lu_orphan_rec_v3 *rec,
struct dt_object *parent,
struct lu_fid *cfid,
__u32 ost_idx, __u32 ea_off)
int rc = 0;
int rc1;
int i;
- __u16 count;
- bool locked = false;
+ int pos = 0;
+ __u16 count;
+ bool locked = false;
+ bool new_mirror = true;
ENTRY;
+ if (lfsck_is_dryrun(lfsck))
+ RETURN(0);
+
rc = lfsck_ibits_lock(env, lfsck, parent, &lh,
MDS_INODELOCK_LAYOUT | MDS_INODELOCK_XATTR,
LCK_EX);
CDEBUG(D_LFSCK, "%s: layout LFSCK assistant failed to recreate "
"LOV EA for "DFID": parent "DFID", OST-index %u, "
"stripe-index %u, comp_id %u, comp_start %llu, "
- "comp_end %llu: rc = %d\n",
+ "comp_end %llu, layout version %u, range %u: rc = %d\n",
lfsck_lfsck2name(lfsck), PFID(cfid),
PFID(lfsck_dto2fid(parent)), ost_idx, ea_off,
ol->ol_comp_id, ol->ol_comp_start,
- ol->ol_comp_end, rc);
+ ol->ol_comp_end, rec->lor_layout_version,
+ rec->lor_range, rc);
RETURN(rc);
}
}
if (!(bk->lb_param & LPF_DRYRUN)) {
- handle = dt_trans_create(env, dt);
+ handle = lfsck_trans_create(env, dt, lfsck);
if (IS_ERR(handle))
GOTO(unlock_layout, rc = PTR_ERR(handle));
LASSERT(buf->lb_len >= lovea_size);
- rc = lfsck_layout_update_lovea(env, lfsck, handle, ol, parent,
+ rc = lfsck_layout_update_lovea(env, lfsck, handle, rec, parent,
cfid, buf, fl, ost_idx, ea_off);
GOTO(unlock_parent, rc);
}
lmm = buf->lb_buf;
- rc1 = lfsck_layout_verify_header(parent, lmm);
+ rc1 = lfsck_layout_verify_header(parent, lmm, lovea_size);
/* If the LOV EA crashed, the rebuild it. */
if (rc1 == -EINVAL) {
LASSERT(buf->lb_len >= lovea_size);
- rc = lfsck_layout_update_lovea(env, lfsck, handle, ol, parent,
+ rc = lfsck_layout_update_lovea(env, lfsck, handle, rec, parent,
cfid, buf, fl, ost_idx, ea_off);
GOTO(unlock_parent, rc);
}
/* For other unknown magic/pattern, keep the current LOV EA. */
- if (rc1 != 0)
+ if (rc1 == -EOPNOTSUPP)
+ GOTO(unlock_parent, rc1 = 0);
+
+ if (rc1)
GOTO(unlock_parent, rc = rc1);
magic = le32_to_cpu(lmm->lmm_magic);
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
__u64 start;
__u64 end;
+ __u16 mirror_id0 = mirror_id_of(ol->ol_comp_id);
+ __u16 mirror_id1;
+
+ if (bk->lb_param & LPF_DRYRUN)
+ GOTO(unlock_parent, rc = 1);
lcm = buf->lb_buf;
count = le16_to_cpu(lcm->lcm_entry_count);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count; pos = ++i) {
lcme = &lcm->lcm_entries[i];
start = le64_to_cpu(lcme->lcme_extent.e_start);
end = le64_to_cpu(lcme->lcme_extent.e_end);
+ mirror_id1 = mirror_id_of(le32_to_cpu(lcme->lcme_id));
+ if (mirror_id0 > mirror_id1)
+ continue;
+
+ if (mirror_id0 < mirror_id1)
+ break;
+
+ new_mirror = false;
if (end <= ol->ol_comp_start)
continue;
goto further;
}
- rc = lfsck_layout_add_comp_comp(env, lfsck, handle, ol, parent,
- cfid, buf, ost_idx, ea_off, i);
+ rc = lfsck_layout_add_comp(env, lfsck, handle, rec, parent,
+ cfid, buf, ost_idx, ea_off, pos, new_mirror);
GOTO(unlock_parent, rc);
}
goto again;
}
- if (lcme && !(flags & LCME_FL_INIT))
+ if (lcm) {
+ LASSERT(lcme);
+
lcme->lcme_flags = cpu_to_le32(flags | LCME_FL_INIT);
+ lfsck_layout_update_lcm(lcm, lcme,
+ rec->lor_layout_version,
+ rec->lor_range);
+ }
rc = lfsck_layout_extend_v1v3_lovea(env, lfsck, handle, ol,
parent, cfid, buf, ost_idx, ea_off);
GOTO(unlock_parent, rc = -EINVAL);
}
- le32_add_cpu(&lcm->lcm_layout_gen, 1);
lovea_size = le32_to_cpu(lcm->lcm_size);
- if (!(flags & LCME_FL_INIT))
- lcme->lcme_flags = cpu_to_le32(flags |
- LCME_FL_INIT);
+ lcme->lcme_flags = cpu_to_le32(flags |
+ LCME_FL_INIT);
+ lfsck_layout_update_lcm(lcm, lcme,
+ rec->lor_layout_version,
+ rec->lor_range);
}
LASSERTF(buf->lb_len >= lovea_size,
lfsck_ibits_unlock(&lh, LCK_EX);
rc = lfsck_layout_update_pfid(env, com, parent,
cfid, ltd->ltd_tgt,
- ol, i);
+ rec, i);
CDEBUG(D_LFSCK, "%s layout LFSCK assistant "
"updated OST-object's pfid for "DFID
static int lfsck_layout_scan_orphan_one(const struct lu_env *env,
struct lfsck_component *com,
struct lfsck_tgt_desc *ltd,
- struct lu_orphan_rec_v2 *rec,
+ struct lu_orphan_rec_v3 *rec,
struct lu_fid *cfid)
{
struct lfsck_layout *lo = com->lc_file_ram;
do {
struct dt_key *key;
- struct lu_orphan_rec_v2 *rec = &info->lti_rec;
+ struct lu_orphan_rec_v3 *rec = &info->lti_rec;
if (CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_DELAY3, cfs_fail_val) &&
unlikely(!thread_is_running(&lfsck->li_thread)))
return rc > 0 ? 0 : rc;
}
-static int lfsck_lmm2layout(struct lov_mds_md_v1 *lmm, struct ost_layout *ol,
+static int lfsck_lov2layout(struct lov_mds_md_v1 *lmm, struct filter_fid *ff,
__u32 comp_id)
{
+ struct ost_layout *ol = &ff->ff_layout;
__u32 magic = le32_to_cpu(lmm->lmm_magic);
int rc = 0;
ENTRY;
ol->ol_comp_start = 0;
ol->ol_comp_end = 0;
ol->ol_comp_id = 0;
- } else if (magic == LOV_MAGIC_COMP_V1) {
+ ff->ff_layout_version = 0;
+ ff->ff_range = 0;
+ } else if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
struct lov_comp_md_v1 *lcm = (struct lov_comp_md_v1 *)lmm;
struct lov_comp_md_entry_v1 *lcme = NULL;
__u16 count = le16_to_cpu(lcm->lcm_entry_count);
ol->ol_comp_start = le64_to_cpu(lcme->lcme_extent.e_start);
ol->ol_comp_end = le64_to_cpu(lcme->lcme_extent.e_end);
ol->ol_comp_id = le32_to_cpu(lcme->lcme_id);
+ ff->ff_layout_version = le32_to_cpu(lcme->lcme_layout_gen);
+ ff->ff_range = 0;
} else {
GOTO(out, rc = -EINVAL);
}
{
struct lfsck_thread_info *info = lfsck_env_info(env);
struct filter_fid *ff = &info->lti_ff;
- struct ost_layout *ol = &ff->ff_layout;
struct dt_object_format *dof = &info->lti_dof;
struct lu_attr *la = &info->lti_la;
struct lfsck_instance *lfsck = com->lc_lfsck;
ff->ff_parent.f_stripe_idx = cpu_to_le32(ea_off);
rc = lfsck_layout_get_lovea(env, parent, tbuf);
- if (rc < 0)
+ if (unlikely(rc == -ENODATA))
+ rc = 0;
+ if (rc <= 0)
GOTO(unlock1, rc);
- rc = lfsck_lmm2layout(tbuf->lb_buf, ol, comp_id);
+ rc = lfsck_lov2layout(tbuf->lb_buf, ff, comp_id);
if (rc)
GOTO(unlock1, rc);
buf = lfsck_buf_get(env, ff, sizeof(struct filter_fid));
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(handle))
GOTO(unlock1, rc = PTR_ERR(handle));
int idx2;
rc = lfsck_layout_get_lovea(env, parent, lovea);
+ if (unlikely(rc == -ENODATA))
+ rc = 0;
if (rc <= 0)
GOTO(unlock2, rc);
lmm = lovea->lb_buf;
magic = le32_to_cpu(lmm->lmm_magic);
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
struct lov_comp_md_v1 *lcm = buf->lb_buf;
struct lov_comp_md_entry_v1 *lcme;
__u16 count = le16_to_cpu(lcm->lcm_entry_count);
{
struct lfsck_thread_info *info = lfsck_env_info(env);
struct filter_fid *ff = &info->lti_ff;
- struct ost_layout *ol = &ff->ff_layout;
struct dt_object *child = llr->llr_child;
struct dt_device *dev = lfsck_obj2dev(child);
const struct lu_fid *tfid = lu_object_fid(&parent->do_lu);
ff->ff_parent.f_stripe_idx = cpu_to_le32(llr->llr_lov_idx);
rc = lfsck_layout_get_lovea(env, parent, tbuf);
- if (rc < 0)
+ if (unlikely(rc == -ENODATA))
+ rc = 0;
+ if (rc <= 0)
GOTO(unlock1, rc);
- rc = lfsck_lmm2layout(tbuf->lb_buf, ol, llr->llr_comp_id);
+ rc = lfsck_lov2layout(tbuf->lb_buf, ff, llr->llr_comp_id);
if (rc)
GOTO(unlock1, rc);
buf = lfsck_buf_get(env, ff, sizeof(*ff));
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, com->lc_lfsck);
if (IS_ERR(handle))
GOTO(unlock1, rc = PTR_ERR(handle));
memset(dof, 0, sizeof(*dof));
dev = lfsck_obj2dev(child);
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(handle))
GOTO(log, rc = PTR_ERR(handle));
if (IS_ERR(parent))
GOTO(log, rc = PTR_ERR(parent));
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(handle))
GOTO(log, rc = PTR_ERR(handle));
GOTO(unlock, rc = 0);
rc = lfsck_layout_get_lovea(env, parent, buf);
- if (unlikely(!rc || rc == -ENODATA))
- GOTO(unlock, rc = 0);
+ if (unlikely(rc == -ENODATA))
+ rc = 0;
+ if (rc <= 0)
+ GOTO(unlock, rc);
lmm = buf->lb_buf;
magic = le32_to_cpu(lmm->lmm_magic);
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
struct lov_comp_md_v1 *lcm = buf->lb_buf;
struct lov_comp_md_entry_v1 *lcme;
__u16 count = le16_to_cpu(lcm->lcm_entry_count);
struct dt_device *dev = lfsck_obj2dev(child);
struct thandle *handle;
int rc;
+ dt_obj_version_t version;
ENTRY;
tla->la_uid = pla->la_uid;
tla->la_gid = pla->la_gid;
tla->la_valid = LA_UID | LA_GID;
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, com->lc_lfsck);
if (IS_ERR(handle))
GOTO(log, rc = PTR_ERR(handle));
if (unlikely(lfsck_is_dead_obj(parent)))
GOTO(unlock, rc = 1);
+ version = dt_version_get(env, child);
+ if (version == -EOPNOTSUPP)
+ version = 0;
+
/* Get the latest parent's owner. */
rc = dt_attr_get(env, parent, pla);
if (rc != 0)
GOTO(unlock, rc);
/* Some others chown/chgrp during the LFSCK, needs to do nothing. */
- if (unlikely(tla->la_uid != pla->la_uid ||
- tla->la_gid != pla->la_gid))
+ if (unlikely((!version && tla->la_ctime == 0) ||
+ tla->la_uid != pla->la_uid || tla->la_gid != pla->la_gid))
rc = 1;
else
rc = dt_attr_set(env, child, tla, handle);
return rc;
}
+#define CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid, msg) \
+ CDEBUG(D_LFSCK, "%s:("DFID"|"DFID")/"DFID":XATTR %s: %s\n", \
+ lfsck_lfsck2name(lfsck), PFID(&lso->lso_fid), PFID(pfid), \
+ PFID(cfid), XATTR_NAME_FID, msg);
+
/* Check whether the OST-object correctly back points to the
* MDT-object (@parent) via the XATTR_NAME_FID xattr (@pfid). */
static int lfsck_layout_check_parent(const struct lu_env *env,
struct lov_mds_md_v1 *lmm;
struct lov_ost_data_v1 *objs;
struct lustre_handle lh = { 0 };
+ struct lfsck_instance *lfsck = com->lc_lfsck;
int rc;
int i;
__u32 magic;
idx = pfid->f_stripe_idx;
pfid->f_ver = 0;
- if (unlikely(!fid_is_sane(pfid)))
+ if (unlikely(!fid_is_sane(pfid))) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent FID is invalid");
+
RETURN(LLIT_UNMATCHED_PAIR);
+ }
if (lu_fid_eq(pfid, &lso->lso_fid)) {
if (likely(llr->llr_lov_idx == idx))
RETURN(0);
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the stripe index is unmatched");
+
RETURN(LLIT_UNMATCHED_PAIR);
}
if (IS_ERR(tobj))
RETURN(PTR_ERR(tobj));
- if (dt_object_exists(tobj) == 0 || lfsck_is_dead_obj(tobj) ||
- !S_ISREG(lfsck_object_type(tobj)))
+ if (dt_object_exists(tobj) == 0) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent is nonexistent");
+
+ GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+ }
+
+ if (lfsck_is_dead_obj(tobj)) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent is dead object");
+
+ GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+ }
+
+ if (!S_ISREG(lfsck_object_type(tobj))) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent is not a regular file");
+
GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+ }
/* Load the tobj's layout EA, in spite of it is a local MDT-object or
* remote one on another MDT. Then check whether the given OST-object
* is in such layout. If yes, it is multiple referenced, otherwise it
* is unmatched referenced case. */
rc = lfsck_layout_get_lovea(env, tobj, buf);
- if (rc == 0 || rc == -ENOENT)
+ if (rc == 0 || rc == -ENODATA || rc == -ENOENT) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent has no stripe data");
+
GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+ }
+
+ if (unlikely(rc == -EOPNOTSUPP))
+ GOTO(out, rc = LLIT_NONE);
if (rc < 0)
GOTO(out, rc);
lmm = buf->lb_buf;
magic = le32_to_cpu(lmm->lmm_magic);
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
struct lov_comp_md_v1 *lcm = buf->lb_buf;
struct lov_comp_md_entry_v1 *lcme;
- if (ff->ff_layout.ol_comp_id == 0)
+ if (ff->ff_layout.ol_comp_id == 0) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent has incorrect comp_id");
+
GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+ }
count = le16_to_cpu(lcm->lcm_entry_count);
for (i = 0; i < count; i++) {
le32_to_cpu(lcme->lcme_offset);
magic = le32_to_cpu(lmm->lmm_magic);
if (!(le32_to_cpu(lcme->lcme_flags) &
- LCME_FL_INIT))
+ LCME_FL_INIT)) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid,
+ cfid,
+ "the parent has uninitialized component");
+
GOTO(out, rc = LLIT_UNMATCHED_PAIR);
+ }
goto further;
}
}
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent has no matched comp_id");
+
GOTO(out, rc = LLIT_UNMATCHED_PAIR);
}
* after taken the lock. */
if (!dt_object_remote(tobj)) {
if (dt_object_exists(tobj) == 0 ||
- lfsck_is_dead_obj(tobj))
+ lfsck_is_dead_obj(tobj)) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid,
+ cfid,
+ "the parent doesn't exist anymore after lock");
+
rc = LLIT_UNMATCHED_PAIR;
- else
+ } else {
rc = LLIT_MULTIPLE_REFERENCED;
+ }
GOTO(unlock, rc);
}
* has been been removed or not. */
rc = dt_xattr_get(env, tobj, &LU_BUF_NULL,
XATTR_NAME_DUMMY);
- if (unlikely(rc == -ENOENT || rc >= 0))
+ if (unlikely(rc == -ENOENT || rc >= 0)) {
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent is remote object and nonexistent after lock");
+
rc = LLIT_UNMATCHED_PAIR;
- else if (rc == -ENODATA)
+ } else if (rc == -ENODATA) {
rc = LLIT_MULTIPLE_REFERENCED;
+ }
GOTO(unlock, rc);
}
}
+ CDEBUG_UNMATCHED_PAIR(lfsck, lso, pfid, cfid,
+ "the parent has no matched stripe");
+
GOTO(out, rc = LLIT_UNMATCHED_PAIR);
unlock:
struct lfsck_assistant_req *lar)
{
struct lfsck_layout_req *llr =
- container_of0(lar, struct lfsck_layout_req, llr_lar);
+ container_of(lar, struct lfsck_layout_req, llr_lar);
struct lfsck_assistant_object *lso = lar->lar_parent;
struct lfsck_layout *lo = com->lc_file_ram;
struct lfsck_thread_info *info = lfsck_env_info(env);
if (lso->lso_dead)
RETURN(0);
- CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_ASSISTANT_DIRECT, cfs_fail_val);
+ CFS_FAIL_TIMEOUT(OBD_FAIL_LFSCK_ENGINE_DELAY, cfs_fail_val);
rc = dt_attr_get(env, child, cla);
if (rc == -ENOENT) {
lfsck_buf_init(&buf, ff, sizeof(*ff));
rc = dt_xattr_get(env, child, &buf, XATTR_NAME_FID);
if (unlikely(rc > 0 && rc < sizeof(struct lu_fid))) {
+ CDEBUG(D_LFSCK, "%s:"DFID"/"DFID": "
+ "the child object's %s is corrupted\n",
+ lfsck_lfsck2name(lfsck), PFID(&lso->lso_fid),
+ PFID(lu_object_fid(&child->do_lu)),
+ XATTR_NAME_FID);
+
type = LLIT_UNMATCHED_PAIR;
goto repair;
}
if (rc < 0) {
struct lfsck_assistant_data *lad = com->lc_data;
- if (unlikely(lad->lad_exit)) {
+ if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags))) {
rc = 0;
} else if (rc == -ENOTCONN || rc == -ESHUTDOWN ||
rc == -ETIMEDOUT || rc == -EHOSTDOWN ||
if (rc != 0 && bk->lb_param & LPF_FAILOUT)
RETURN(rc);
- if (unlikely(lad->lad_exit ||
+ if (unlikely(test_bit(LAD_EXIT, &lad->lad_flags) ||
!thread_is_running(&lfsck->li_thread)))
RETURN(0);
spin_lock(<ds->ltd_lock);
void *args, int rc)
{
struct lfsck_layout_slave_async_args *llsaa = args;
- struct obd_export *exp = llsaa->llsaa_exp;
- struct lfsck_component *com = llsaa->llsaa_com;
- struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
- struct lfsck_layout_slave_data *llsd = com->lc_data;
- struct lfsck_reply *lr = NULL;
- bool done = false;
+ struct obd_export *exp = llsaa->llsaa_exp;
+ struct lfsck_component *com = llsaa->llsaa_com;
+ struct lfsck_layout_slave_target *llst = llsaa->llsaa_llst;
+ struct lfsck_layout_slave_data *llsd = com->lc_data;
+ struct lfsck_reply *lr = NULL;
+ bool done = false;
if (rc != 0) {
/* It is probably caused by network trouble, or target crash,
*tmp = *lr;
ptlrpc_request_set_replen(req);
- llsaa = ptlrpc_req_async_args(req);
+ llsaa = ptlrpc_req_async_args(llsaa, req);
llsaa->llsaa_exp = exp;
llsaa->llsaa_com = lfsck_component_get(com);
llsaa->llsaa_llst = llst;
req->rq_interpret_reply = lfsck_layout_slave_async_interpret;
req->rq_allow_intr = 1;
+ req->rq_no_delay = 1;
ptlrpc_set_add_req(set, req);
RETURN(0);
*tmp = *lr;
ptlrpc_request_set_replen(req);
req->rq_allow_intr = 1;
+ req->rq_no_delay = 1;
ptlrpc_set_add_req(set, req);
RETURN(0);
}
spin_unlock(&llsd->llsd_lock);
- rc = ptlrpc_set_wait(set);
+ rc = ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
GOTO(log, rc = (rc1 != 0 ? rc1 : rc));
}
spin_unlock(&llsd->llsd_lock);
- ptlrpc_set_wait(set);
+ ptlrpc_set_wait(env, set);
ptlrpc_set_destroy(set);
RETURN_EXIT;
if (rc < 0)
GOTO(unlock, rc);
- if (rc == 0)
- GOTO(unlock, rc = -ENODATA);
-
lmm = buf->lb_buf;
magic = le32_to_cpu(lmm->lmm_magic);
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
struct lov_comp_md_v1 *lcm = buf->lb_buf;
struct lov_comp_md_entry_v1 *lcme;
lfsck_is_dead_obj(obj)))
GOTO(unlock, rc = 0);
- rc = __lfsck_layout_update_pfid(env, obj, &lrl->lrl_ff_client.ff_parent,
+ rc = __lfsck_layout_update_pfid(env, com, obj,
+ &lrl->lrl_ff_client.ff_parent,
&lrl->lrl_ff_client.ff_layout,
+ lrl->lrl_ff_client.ff_layout_version,
+ lrl->lrl_ff_client.ff_range,
lrl->lrl_ff_client.ff_parent.f_ver);
GOTO(unlock, rc);
if (com->lc_lfsck->li_master) {
struct lfsck_assistant_data *lad = com->lc_data;
- lad->lad_incomplete = 0;
+ clear_bit(LAD_INCOMPLETE, &lad->lad_flags);
CFS_RESET_BITMAP(lad->lad_bitmap);
}
if (rc == 0 && start != NULL && start->ls_flags & LPF_OST_ORPHAN) {
LASSERT(!llsd->llsd_rbtree_valid);
- write_lock(&llsd->llsd_rb_lock);
+ down_write(&llsd->llsd_rb_rwsem);
rc = lfsck_rbtree_setup(env, com);
- write_unlock(&llsd->llsd_rb_lock);
+ up_write(&llsd->llsd_rb_rwsem);
}
CDEBUG(D_LFSCK, "%s: layout LFSCK slave prep done, start pos ["
struct dt_object *parent,
struct lov_mds_md_v1 *lmm, __u32 comp_id)
{
- struct lfsck_thread_info *info = lfsck_env_info(env);
+ struct lfsck_thread_info *info = lfsck_env_info(env);
struct lfsck_instance *lfsck = com->lc_lfsck;
struct lfsck_bookmark *bk = &lfsck->li_bookmark_ram;
struct lfsck_layout *lo = com->lc_file_ram;
struct lfsck_tgt_descs *ltds = &lfsck->li_ost_descs;
struct ptlrpc_thread *mthread = &lfsck->li_thread;
struct ptlrpc_thread *athread = &lad->lad_thread;
- struct l_wait_info lwi = { 0 };
struct lu_buf buf;
int rc = 0;
int i;
if (unlikely(lovea_slot_is_dummy(objs)))
continue;
- l_wait_event(mthread->t_ctl_waitq,
- lad->lad_prefetched < bk->lb_async_windows ||
- !thread_is_running(mthread) ||
- thread_is_stopped(athread),
- &lwi);
+ wait_event_idle(mthread->t_ctl_waitq,
+ lad->lad_prefetched < bk->lb_async_windows ||
+ !thread_is_running(mthread) ||
+ thread_is_stopped(athread));
if (unlikely(!thread_is_running(mthread)) ||
thread_is_stopped(athread))
goto next;
}
- if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_ASSISTANT_DIRECT)) {
- rc = dt_declare_attr_get(env, cobj);
- if (rc != 0)
- goto next;
+ rc = dt_declare_attr_get(env, cobj);
+ if (rc)
+ goto next;
- rc = dt_declare_xattr_get(env, cobj, &buf,
- XATTR_NAME_FID);
- if (rc != 0)
- goto next;
- }
+ rc = dt_declare_xattr_get(env, cobj, &buf, XATTR_NAME_FID);
+ if (rc)
+ goto next;
if (lso == NULL) {
struct lu_attr *attr = &info->lti_la;
lad->lad_prefetched++;
spin_unlock(&lad->lad_lock);
if (wakeup)
- wake_up_all(&athread->t_ctl_waitq);
+ wake_up(&athread->t_ctl_waitq);
next:
down_write(&com->lc_sem);
GOTO(out, rc = 0);
rc = lfsck_layout_get_lovea(env, obj, buf);
- if (rc <= 0)
+ if (rc == -EINVAL || rc == -ENODATA || rc == -EOPNOTSUPP)
/* Skip bad lov EA during the 1st cycle scanning, and
* try to recover it via orphan in the 2nd scanning. */
- GOTO(out, rc = (rc == -EINVAL ? 0 : rc));
+ rc = 0;
+ if (rc <= 0)
+ GOTO(out, rc);
size = rc;
lmm = buf->lb_buf;
magic = le32_to_cpu(lmm->lmm_magic);
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
+ struct lov_mds_md_v1 *v1;
int i;
lcm = buf->lb_buf;
count = le16_to_cpu(lcm->lcm_entry_count);
for (i = 0; i < count; i++) {
lcme = &lcm->lcm_entries[i];
- lmm = buf->lb_buf + le32_to_cpu(lcme->lcme_offset);
- if (memcmp(oi, &lmm->lmm_oi, sizeof(*oi)) != 0)
+ v1 = buf->lb_buf + le32_to_cpu(lcme->lcme_offset);
+ if (memcmp(oi, &v1->lmm_oi, sizeof(*oi)) != 0)
goto fix;
}
if (rc != 0)
GOTO(out, rc);
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(handle))
GOTO(out, rc = PTR_ERR(handle));
- lfsck_buf_init(&ea_buf, lmm, size);
+ lfsck_buf_init(&ea_buf, buf->lb_buf, size);
rc = dt_declare_xattr_set(env, obj, &ea_buf, XATTR_NAME_LOV,
LU_XATTR_REPLACE, handle);
if (rc != 0)
goto again;
}
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
+ struct lov_mds_md_v1 *v1;
int i;
for (i = 0; i < count; i++) {
lcme = &lcm->lcm_entries[i];
- lmm = buf->lb_buf + le32_to_cpu(lcme->lcme_offset);
- lmm->lmm_oi = *oi;
+ v1 = buf->lb_buf + le32_to_cpu(lcme->lcme_offset);
+ v1->lmm_oi = *oi;
}
} else {
lmm->lmm_oi = *oi;
PFID(lfsck_dto2fid(obj)), rc);
if (stripe) {
- if (magic == LOV_MAGIC_COMP_V1) {
+ if (magic == LOV_MAGIC_COMP_V1 || magic == LOV_MAGIC_SEL) {
int i;
for (i = 0; i < count; i++) {
if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_DELAY5) &&
cfs_fail_val == lfsck_dev_idx(lfsck)) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(1),
- NULL, NULL);
struct ptlrpc_thread *thread = &lfsck->li_thread;
- l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread),
- &lwi);
+ wait_event_idle_timeout(thread->t_ctl_waitq,
+ !thread_is_running(thread),
+ cfs_time_seconds(1));
}
lfsck_rbtree_update_bitmap(env, com, fid, false);
"run_time_phase2: %lld seconds\n"
"average_speed_phase1: %llu items/sec\n"
"average_speed_phase2: N/A\n"
- "real-time_speed_phase1: %llu items/sec\n"
- "real-time_speed_phase2: N/A\n",
+ "real_time_speed_phase1: %llu items/sec\n"
+ "real_time_speed_phase2: N/A\n",
checked,
lo->ll_objs_checked_phase2,
rtime,
"run_time_phase2: %lld seconds\n"
"average_speed_phase1: %llu items/sec\n"
"average_speed_phase2: %llu items/sec\n"
- "real-time_speed_phase1: N/A\n"
- "real-time_speed_phase2: %llu items/sec\n"
+ "real_time_speed_phase1: N/A\n"
+ "real_time_speed_phase2: %llu items/sec\n"
"current_position: "DFID"\n",
lo->ll_objs_checked_phase1,
checked,
"run_time_phase2: %lld seconds\n"
"average_speed_phase1: %llu items/sec\n"
"average_speed_phase2: %llu objs/sec\n"
- "real-time_speed_phase1: N/A\n"
- "real-time_speed_phase2: N/A\n"
+ "real_time_speed_phase1: N/A\n"
+ "real_time_speed_phase2: N/A\n"
"current_position: N/A\n",
lo->ll_objs_checked_phase1,
lo->ll_objs_checked_phase2,
LFSCK_CHECKPOINT_INTERVAL;
while (1) {
- struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(30),
- NULL, NULL);
-
rc = lfsck_layout_slave_query_master(env, com);
if (list_empty(&llsd->llsd_master_list)) {
if (unlikely(!thread_is_running(thread)))
if (rc < 0)
GOTO(done, rc);
- rc = l_wait_event(thread->t_ctl_waitq,
- !thread_is_running(thread) ||
- lo->ll_flags & LF_INCOMPLETE ||
- list_empty(&llsd->llsd_master_list),
- &lwi);
+ rc = wait_event_idle_timeout(
+ thread->t_ctl_waitq,
+ !thread_is_running(thread) ||
+ lo->ll_flags & LF_INCOMPLETE ||
+ list_empty(&llsd->llsd_master_list),
+ cfs_time_seconds(30));
if (unlikely(!thread_is_running(thread)))
GOTO(done, rc = 0);
if (lo->ll_flags & LF_INCOMPLETE)
GOTO(done, rc = 1);
- if (rc == -ETIMEDOUT)
+ if (rc == 0)
continue;
- GOTO(done, rc = (rc < 0 ? rc : 1));
+ GOTO(done, rc = 1);
}
done:
(rc > 0 && lo->ll_flags & LF_INCOMPLETE) ? 0 : rc);
lfsck_layout_slave_quit(env, com);
if (atomic_dec_and_test(&lfsck->li_double_scan_count))
- wake_up_all(&lfsck->li_thread.t_ctl_waitq);
+ wake_up(&lfsck->li_thread.t_ctl_waitq);
CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan finished, "
"status %d: rc = %d\n",
stop->ls_flags = lr->lr_param & ~LPF_BROADCAST;
lfsck_stop(env, lfsck->li_bottom, stop);
} else if (lfsck_phase2_next_ready(lad)) {
- wake_up_all(&lad->lad_thread.t_ctl_waitq);
+ wake_up(&lad->lad_thread.t_ctl_waitq);
}
RETURN(0);
true);
if (llst != NULL) {
lfsck_layout_llst_put(llst);
- wake_up_all(&lfsck->li_thread.t_ctl_waitq);
+ wake_up(&lfsck->li_thread.t_ctl_waitq);
}
}
lfsck_layout_llst_put(llst);
if (list_empty(&llsd->llsd_master_list))
- wake_up_all(&lfsck->li_thread.t_ctl_waitq);
+ wake_up(&lfsck->li_thread.t_ctl_waitq);
if (lr->lr_event == LE_PEER_EXIT &&
(lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT ||
RETURN(rc);
}
-static struct lfsck_operations lfsck_layout_master_ops = {
+static const struct lfsck_operations lfsck_layout_master_ops = {
.lfsck_reset = lfsck_layout_reset,
.lfsck_fail = lfsck_layout_fail,
.lfsck_checkpoint = lfsck_layout_master_checkpoint,
.lfsck_query = lfsck_layout_query,
};
-static struct lfsck_operations lfsck_layout_slave_ops = {
+static const struct lfsck_operations lfsck_layout_slave_ops = {
.lfsck_reset = lfsck_layout_reset,
.lfsck_fail = lfsck_layout_fail,
.lfsck_checkpoint = lfsck_layout_slave_checkpoint,
pos->lp_oit_cookie = llr->llr_lar.lar_parent->lso_oit_cookie - 1;
}
-struct lfsck_assistant_operations lfsck_layout_assistant_ops = {
+const struct lfsck_assistant_operations lfsck_layout_assistant_ops = {
.la_handler_p1 = lfsck_layout_assistant_handler_p1,
.la_handler_p2 = lfsck_layout_assistant_handler_p2,
.la_fill_pos = lfsck_layout_assistant_fill_pos,
INIT_LIST_HEAD(&llsd->llsd_master_list);
spin_lock_init(&llsd->llsd_lock);
llsd->llsd_rb_root = RB_ROOT;
- rwlock_init(&llsd->llsd_rb_lock);
+ init_rwsem(&llsd->llsd_rb_rwsem);
com->lc_data = llsd;
}
com->lc_file_size = sizeof(*lo);
com->lc_obj = obj;
rc = lfsck_layout_load(env, com);
- if (rc > 0)
+ if (rc > 0) {
rc = lfsck_layout_reset(env, com, true);
- else if (rc == -ENOENT)
+ } else if (rc == -ENOENT) {
rc = lfsck_layout_init(env, com);
- else if (lfsck->li_master)
+ } else if (lfsck->li_master) {
rc = lfsck_load_sub_trace_files(env, com,
&dt_lfsck_layout_dangling_features,
LFSCK_LAYOUT, false);
+ if (rc)
+ rc = lfsck_layout_reset(env, com, true);
+ }
if (rc != 0)
GOTO(out, rc);
struct lfsck_rbtree_node *loi_lrn;
struct lfsck_layout_slave_target *loi_llst;
struct lu_fid loi_key;
- struct lu_orphan_rec_v2 loi_rec;
+ struct lu_orphan_rec_v3 loi_rec;
__u64 loi_hash;
unsigned int loi_over:1;
};
}
static void lfsck_layout_destroy_orphan(const struct lu_env *env,
+ struct lfsck_instance *lfsck,
struct dt_object *obj)
{
struct dt_device *dev = lfsck_obj2dev(obj);
int rc;
ENTRY;
- handle = dt_trans_create(env, dev);
+ handle = lfsck_trans_create(env, dev, lfsck);
if (IS_ERR(handle))
RETURN_EXIT;
struct dt_object *dt,
const struct dt_rec *rec,
const struct dt_key *key,
- struct thandle *handle,
- int ignore_quota)
+ struct thandle *handle)
{
return -EOPNOTSUPP;
}
if (dev->dd_record_fid_accessed) {
/* The first iteration against the rbtree, scan the whole rbtree
* to remove the nodes which do NOT need to be handled. */
- write_lock(&llsd->llsd_rb_lock);
+ down_write(&llsd->llsd_rb_rwsem);
if (dev->dd_record_fid_accessed) {
struct rb_node *node;
struct rb_node *next;
node = next;
}
}
- write_unlock(&llsd->llsd_rb_lock);
+ up_write(&llsd->llsd_rb_rwsem);
}
/* read lock the rbtree when init, and unlock when fini */
- read_lock(&llsd->llsd_rb_lock);
+ down_read(&llsd->llsd_rb_rwsem);
it->loi_com = com;
com = NULL;
lfsck_lfsck2name(com->lc_lfsck));
llsd = com->lc_data;
- read_unlock(&llsd->llsd_rb_lock);
+ up_read(&llsd->llsd_rb_rwsem);
llst = it->loi_llst;
LASSERT(llst != NULL);
struct lu_attr *la = &info->lti_la;
struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
struct lu_fid *key = &it->loi_key;
- struct lu_orphan_rec_v2 *rec = &it->loi_rec;
+ struct lu_orphan_rec_v3 *rec = &it->loi_rec;
struct ost_layout *ol = &rec->lor_layout;
struct lfsck_component *com = it->loi_com;
struct lfsck_instance *lfsck = com->lc_lfsck;
* OST-object there. Destroy it now! */
if (unlikely(!(la->la_mode & S_ISUID))) {
dt_read_unlock(env, obj);
- lfsck_layout_destroy_orphan(env, obj);
+ lfsck_layout_destroy_orphan(env, lfsck, obj);
lfsck_object_put(env, obj);
pos++;
goto again1;
rec->lor_rec.lor_uid = la->la_uid;
rec->lor_rec.lor_gid = la->la_gid;
memset(ol, 0, sizeof(*ol));
+ rec->lor_layout_version = 0;
+ rec->lor_range = 0;
GOTO(out, rc = 0);
}
rec->lor_rec.lor_uid = la->la_uid;
rec->lor_rec.lor_gid = la->la_gid;
ost_layout_le_to_cpu(ol, &ff->ff_layout);
+ rec->lor_layout_version =
+ le32_to_cpu(ff->ff_layout_version & ~LU_LAYOUT_RESYNC);
+ rec->lor_range = le32_to_cpu(ff->ff_range);
CDEBUG(D_LFSCK, "%s: return orphan "DFID", PFID "DFID", owner %u:%u, "
"stripe size %u, stripe count %u, COMP id %u, COMP start %llu, "
- "COMP end %llu\n", lfsck_lfsck2name(com->lc_lfsck), PFID(key),
+ "COMP end %llu, layout version %u, range %u\n",
+ lfsck_lfsck2name(com->lc_lfsck), PFID(key),
PFID(&rec->lor_rec.lor_fid), rec->lor_rec.lor_uid,
rec->lor_rec.lor_gid, ol->ol_stripe_size, ol->ol_stripe_count,
- ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end);
+ ol->ol_comp_id, ol->ol_comp_start, ol->ol_comp_end,
+ rec->lor_layout_version, rec->lor_range);
GOTO(out, rc = 0);
{
struct lfsck_orphan_it *it = (struct lfsck_orphan_it *)di;
- *(struct lu_orphan_rec_v2 *)rec = it->loi_rec;
+ *(struct lu_orphan_rec_v3 *)rec = it->loi_rec;
return 0;
}
return 0;
}
-const struct dt_index_operations lfsck_orphan_index_ops = {
+static const struct dt_index_operations lfsck_orphan_index_ops = {
.dio_lookup = lfsck_orphan_index_lookup,
.dio_declare_insert = lfsck_orphan_index_declare_insert,
.dio_insert = lfsck_orphan_index_insert,