+ if (mlc->mlc_opc == MD_LAYOUT_WRITE) {
+ struct layout_intent *layout = mlc->mlc_intent;
+ int write = layout->li_opc == LAYOUT_INTENT_WRITE;
+
+ LASSERT(mlc->mlc_intent != NULL);
+
+ extent = mlc->mlc_intent->li_extent;
+
+ CDEBUG(D_LAYOUT, DFID": intent to write: "DEXT"\n",
+ PFID(lod_object_fid(lo)), PEXT(&extent));
+
+ /* 1. Update extents of primary before staling */
+ rc = lod_declare_update_extents(env, lo, &extent, th, primary,
+ write);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ if (mlc->mlc_intent->li_opc == LAYOUT_INTENT_TRUNC) {
+ /**
+ * trunc transfers [0, size) in the intent extent, we'd
+ * stale components overlapping [size, eof).
+ */
+ extent.e_start = extent.e_end;
+ extent.e_end = OBD_OBJECT_EOF;
+ }
+
+ /* 2. stale overlapping components */
+ rc = lod_stale_components(env, lo, primary, &extent, th);
+ if (rc < 0)
+ GOTO(out, rc);
+
+ /* 3. find the components which need instantiating.
+ * instantiate [0, mlc->mlc_intent->e_end) */
+
+ /* restore truncate intent extent */
+ if (mlc->mlc_intent->li_opc == LAYOUT_INTENT_TRUNC)
+ extent.e_end = extent.e_start;
+ extent.e_start = 0;
+
+ lod_foreach_mirror_comp(lod_comp, lo, primary) {
+ if (!lu_extent_is_overlapped(&extent,
+ &lod_comp->llc_extent))
+ break;
+
+ if (!lod_is_instantiation_needed(lod_comp))
+ continue;
+
+ CDEBUG(D_LAYOUT, "write instantiate %d / %d\n",
+ primary, lod_comp_index(lo, lod_comp));
+ info->lti_comp_idx[info->lti_count++] =
+ lod_comp_index(lo, lod_comp);
+ }
+ } else { /* MD_LAYOUT_RESYNC */
+ if (mlc->mlc_mirror_id == 0) {
+ /* normal resync */
+ lod_foreach_mirror_comp(lod_comp, lo, primary) {
+ if (!lod_comp_inited(lod_comp))
+ break;
+
+ extent.e_end = lod_comp->llc_extent.e_end;
+ }
+
+ rc = lod_prepare_resync(env, lo, &extent);
+ if (rc)
+ GOTO(out, rc);
+ } else {
+ /* mirror write, try to init its all components */
+ rc = lod_prepare_resync_mirror(env, lo,
+ mlc->mlc_mirror_id);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ /* change the file state to SYNC_PENDING */
+ lo->ldo_flr_state = LCM_FL_SYNC_PENDING;
+ }
+
+ rc = lod_declare_instantiate_components(env, lo, th);
+ if (rc)
+ GOTO(out, rc);
+
+ /* 3. transfer layout version to OST objects.
+ * transfer new layout version to OST objects so that stale writes
+ * can be denied. It also ends an era of writing by setting
+ * LU_LAYOUT_RESYNC. Normal client can never use this bit to
+ * send write RPC; only resync RPCs could do it. */
+ layout_attr->la_valid = LA_LAYOUT_VERSION;
+ layout_attr->la_layout_version = 0; /* set current version */
+ if (mlc->mlc_opc == MD_LAYOUT_RESYNC)
+ layout_attr->la_layout_version = LU_LAYOUT_RESYNC;
+ rc = lod_declare_attr_set(env, &lo->ldo_obj, layout_attr, th);
+ if (rc)
+ GOTO(out, rc);
+
+ lod_obj_inc_layout_gen(lo);
+out:
+ if (rc)
+ lod_striping_free(env, lo);
+ RETURN(rc);
+}
+
+static int lod_declare_update_sync_pending(const struct lu_env *env,
+ struct lod_object *lo, struct md_layout_change *mlc,
+ struct thandle *th)
+{
+ struct lod_thread_info *info = lod_env_info(env);
+ unsigned sync_components = 0;
+ unsigned resync_components = 0;
+ int i;
+ int rc;
+ ENTRY;
+
+ LASSERT(lo->ldo_flr_state == LCM_FL_SYNC_PENDING);
+ LASSERT(mlc->mlc_opc == MD_LAYOUT_RESYNC_DONE ||
+ mlc->mlc_opc == MD_LAYOUT_WRITE);
+
+ CDEBUG(D_LAYOUT, DFID ": received op %d in sync pending\n",
+ PFID(lod_object_fid(lo)), mlc->mlc_opc);
+
+ if (mlc->mlc_opc == MD_LAYOUT_WRITE) {
+ CDEBUG(D_LAYOUT, DFID": cocurrent write to sync pending\n",
+ PFID(lod_object_fid(lo)));
+
+ lo->ldo_flr_state = LCM_FL_WRITE_PENDING;
+ return lod_declare_update_write_pending(env, lo, mlc, th);
+ }
+
+ /* MD_LAYOUT_RESYNC_DONE */
+
+ for (i = 0; i < lo->ldo_comp_cnt; i++) {
+ struct lod_layout_component *lod_comp;
+ int j;
+
+ lod_comp = &lo->ldo_comp_entries[i];
+
+ if (!(lod_comp->llc_flags & LCME_FL_STALE)) {
+ sync_components++;
+ continue;
+ }
+
+ for (j = 0; j < mlc->mlc_resync_count; j++) {
+ if (lod_comp->llc_id != mlc->mlc_resync_ids[j])
+ continue;
+
+ mlc->mlc_resync_ids[j] = LCME_ID_INVAL;
+ lod_comp->llc_flags &= ~LCME_FL_STALE;
+ resync_components++;
+ break;
+ }
+ }
+
+ /* valid check */
+ for (i = 0; i < mlc->mlc_resync_count; i++) {
+ if (mlc->mlc_resync_ids[i] == LCME_ID_INVAL)
+ continue;
+
+ CDEBUG(D_LAYOUT, DFID": lcme id %u (%d / %zd) not exist "
+ "or already synced\n", PFID(lod_object_fid(lo)),
+ mlc->mlc_resync_ids[i], i, mlc->mlc_resync_count);
+ GOTO(out, rc = -EINVAL);
+ }
+
+ if (!sync_components || (mlc->mlc_resync_count && !resync_components)) {
+ CDEBUG(D_LAYOUT, DFID": no mirror in sync\n",
+ PFID(lod_object_fid(lo)));
+
+ /* tend to return an error code here to prevent
+ * the MDT from setting SoM attribute */
+ GOTO(out, rc = -EINVAL);
+ }
+
+ CDEBUG(D_LAYOUT, DFID": synced %u resynced %u/%zu components\n",
+ PFID(lod_object_fid(lo)),
+ sync_components, resync_components, mlc->mlc_resync_count);
+
+ lo->ldo_flr_state = LCM_FL_RDONLY;
+ lod_obj_inc_layout_gen(lo);
+
+ info->lti_buf.lb_len = lod_comp_md_size(lo, false);
+ rc = lod_sub_declare_xattr_set(env, lod_object_child(lo),
+ &info->lti_buf, XATTR_NAME_LOV, 0, th);
+ EXIT;
+
+out:
+ if (rc)
+ lod_striping_free(env, lo);
+ RETURN(rc);
+}
+
+typedef int (*mlc_handler)(const struct lu_env *env, struct dt_object *dt,
+ const struct md_layout_change *mlc,
+ struct thandle *th);
+
+/**
+ * Attach stripes after target's for migrating directory. NB, we
+ * only need to declare this, the actual work is done inside
+ * lod_xattr_set_lmv().
+ *
+ * \param[in] env execution environment
+ * \param[in] dt target object
+ * \param[in] mlc layout change data
+ * \param[in] th transaction handle
+ *
+ * \retval 0 on success
+ * \retval negative if failed
+ */
+static int lod_dir_declare_layout_attach(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct md_layout_change *mlc,
+ struct thandle *th)
+{
+ struct lod_thread_info *info = lod_env_info(env);
+ struct lod_device *lod = lu2lod_dev(dt->do_lu.lo_dev);
+ struct lod_tgt_descs *ltd = &lod->lod_mdt_descs;
+ struct lod_object *lo = lod_dt_obj(dt);
+ struct dt_object *next = dt_object_child(dt);
+ struct dt_object_format *dof = &info->lti_format;
+ struct lmv_mds_md_v1 *lmv = mlc->mlc_buf.lb_buf;
+ struct dt_object **stripes;
+ __u32 stripe_count = le32_to_cpu(lmv->lmv_stripe_count);
+ struct lu_fid *fid = &info->lti_fid;
+ struct lod_tgt_desc *tgt;
+ struct dt_object *dto;
+ struct dt_device *tgt_dt;
+ int type = LU_SEQ_RANGE_ANY;
+ struct dt_insert_rec *rec = &info->lti_dt_rec;
+ char *stripe_name = info->lti_key;
+ struct lu_name *sname;
+ struct linkea_data ldata = { NULL };
+ struct lu_buf linkea_buf;
+ __u32 idx;
+ int i;
+ int rc;
+
+ ENTRY;
+
+ if (!lmv_is_sane(lmv))
+ RETURN(-EINVAL);
+
+ if (!dt_try_as_dir(env, dt))
+ return -ENOTDIR;
+
+ dof->dof_type = DFT_DIR;
+
+ OBD_ALLOC_PTR_ARRAY(stripes, (lo->ldo_dir_stripe_count + stripe_count));
+ if (!stripes)
+ RETURN(-ENOMEM);
+
+ for (i = 0; i < lo->ldo_dir_stripe_count; i++)
+ stripes[i] = lo->ldo_stripe[i];
+
+ rec->rec_type = S_IFDIR;
+
+ for (i = 0; i < stripe_count; i++) {
+ fid_le_to_cpu(fid,
+ &lmv->lmv_stripe_fids[i]);
+ if (!fid_is_sane(fid))
+ continue;
+
+ rc = lod_fld_lookup(env, lod, fid, &idx, &type);
+ if (rc)
+ GOTO(out, rc);
+
+ if (idx == lod2lu_dev(lod)->ld_site->ld_seq_site->ss_node_id) {
+ tgt_dt = lod->lod_child;
+ } else {
+ tgt = LTD_TGT(ltd, idx);
+ if (tgt == NULL)
+ GOTO(out, rc = -ESTALE);
+ tgt_dt = tgt->ltd_tgt;
+ }
+
+ dto = dt_locate_at(env, tgt_dt, fid,
+ lo->ldo_obj.do_lu.lo_dev->ld_site->ls_top_dev,
+ NULL);
+ if (IS_ERR(dto))
+ GOTO(out, rc = PTR_ERR(dto));
+
+ stripes[i + lo->ldo_dir_stripe_count] = dto;
+
+ if (!dt_try_as_dir(env, dto))
+ GOTO(out, rc = -ENOTDIR);
+
+ rc = lod_sub_declare_ref_add(env, dto, th);
+ if (rc)
+ GOTO(out, rc);
+
+ rec->rec_fid = lu_object_fid(&dto->do_lu);
+ rc = lod_sub_declare_insert(env, dto,
+ (const struct dt_rec *)rec,
+ (const struct dt_key *)dot, th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = lod_sub_declare_insert(env, dto,
+ (const struct dt_rec *)rec,
+ (const struct dt_key *)dotdot, th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = lod_sub_declare_xattr_set(env, dto, &mlc->mlc_buf,
+ XATTR_NAME_LMV, 0, th);
+ if (rc)
+ GOTO(out, rc);
+
+ snprintf(stripe_name, sizeof(info->lti_key), DFID":%u",
+ PFID(lu_object_fid(&dto->do_lu)),
+ i + lo->ldo_dir_stripe_count);
+
+ sname = lod_name_get(env, stripe_name, strlen(stripe_name));
+ rc = linkea_links_new(&ldata, &info->lti_linkea_buf,
+ sname, lu_object_fid(&dt->do_lu));
+ if (rc)
+ GOTO(out, rc);
+
+ linkea_buf.lb_buf = ldata.ld_buf->lb_buf;
+ linkea_buf.lb_len = ldata.ld_leh->leh_len;
+ rc = lod_sub_declare_xattr_set(env, dto, &linkea_buf,
+ XATTR_NAME_LINK, 0, th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = lod_sub_declare_insert(env, next,
+ (const struct dt_rec *)rec,
+ (const struct dt_key *)stripe_name,
+ th);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = lod_sub_declare_ref_add(env, next, th);
+ if (rc)
+ GOTO(out, rc);
+ }
+
+ if (lo->ldo_stripe)
+ OBD_FREE_PTR_ARRAY(lo->ldo_stripe,
+ lo->ldo_dir_stripes_allocated);
+ lo->ldo_stripe = stripes;
+ lo->ldo_dir_migrate_offset = lo->ldo_dir_stripe_count;
+ lo->ldo_dir_migrate_hash = le32_to_cpu(lmv->lmv_hash_type);
+ lo->ldo_dir_stripe_count += stripe_count;
+ lo->ldo_dir_stripes_allocated += stripe_count;
+
+ /* plain directory split creates target as a plain directory, while
+ * after source attached as the first stripe, it becomes a striped
+ * directory, set correct do_index_ops, otherwise it can't be unlinked.
+ */
+ dt->do_index_ops = &lod_striped_index_ops;
+
+ RETURN(0);
+out:
+ i = lo->ldo_dir_stripe_count;
+ while (i < lo->ldo_dir_stripe_count + stripe_count && stripes[i])
+ dt_object_put(env, stripes[i++]);
+
+ OBD_FREE_PTR_ARRAY(stripes, stripe_count + lo->ldo_dir_stripe_count);
+ return rc;
+}
+
+static int lod_dir_declare_layout_detach(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct md_layout_change *unused,
+ struct thandle *th)
+{
+ struct lod_thread_info *info = lod_env_info(env);
+ struct lod_object *lo = lod_dt_obj(dt);
+ struct dt_object *next = dt_object_child(dt);
+ char *stripe_name = info->lti_key;
+ struct dt_object *dto;
+ int i;
+ int rc = 0;
+
+ if (!dt_try_as_dir(env, dt))
+ return -ENOTDIR;
+
+ if (!lo->ldo_dir_stripe_count)
+ return lod_sub_declare_delete(env, next,
+ (const struct dt_key *)dotdot, th);
+
+ for (i = 0; i < lo->ldo_dir_stripe_count; i++) {
+ dto = lo->ldo_stripe[i];
+ if (!dto)
+ continue;
+
+ if (!dt_try_as_dir(env, dto))
+ return -ENOTDIR;
+
+ rc = lod_sub_declare_delete(env, dto,
+ (const struct dt_key *)dotdot, th);
+ if (rc)
+ return rc;
+
+ snprintf(stripe_name, sizeof(info->lti_key), DFID":%d",
+ PFID(lu_object_fid(&dto->do_lu)), i);
+
+ rc = lod_sub_declare_delete(env, next,
+ (const struct dt_key *)stripe_name, th);
+ if (rc)
+ return rc;
+
+ rc = lod_sub_declare_ref_del(env, next, th);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int dt_dir_is_empty(const struct lu_env *env,
+ struct dt_object *obj)
+{
+ struct dt_it *it;
+ const struct dt_it_ops *iops;
+ int rc;
+
+ ENTRY;
+
+ if (!dt_try_as_dir(env, obj))
+ RETURN(-ENOTDIR);
+
+ iops = &obj->do_index_ops->dio_it;
+ it = iops->init(env, obj, LUDA_64BITHASH);
+ if (IS_ERR(it))
+ RETURN(PTR_ERR(it));
+
+ rc = iops->get(env, it, (const struct dt_key *)"");
+ if (rc > 0) {
+ int i;
+
+ for (rc = 0, i = 0; rc == 0 && i < 3; ++i)
+ rc = iops->next(env, it);
+ if (!rc)
+ rc = -ENOTEMPTY;
+ else if (rc == 1)
+ rc = 0;
+ } else if (!rc) {
+ /* Huh? Index contains no zero key? */
+ rc = -EIO;
+ }
+
+ iops->put(env, it);
+ iops->fini(env, it);
+
+ RETURN(rc);
+}
+
+static int lod_dir_declare_layout_shrink(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct md_layout_change *mlc,
+ struct thandle *th)
+{
+ struct lod_thread_info *info = lod_env_info(env);
+ struct lod_object *lo = lod_dt_obj(dt);
+ struct dt_object *next = dt_object_child(dt);
+ struct lmv_user_md *lmu = mlc->mlc_buf.lb_buf;
+ __u32 final_stripe_count;
+ char *stripe_name = info->lti_key;
+ struct lu_buf *lmv_buf = &info->lti_buf;
+ struct dt_object *dto;
+ int i;
+ int rc;
+
+ LASSERT(lmu);
+
+ if (!dt_try_as_dir(env, dt))
+ return -ENOTDIR;
+
+ /* shouldn't be called on plain directory */
+ LASSERT(lo->ldo_dir_stripe_count);
+
+ lmv_buf->lb_buf = &info->lti_lmv.lmv_md_v1;
+ lmv_buf->lb_len = sizeof(info->lti_lmv.lmv_md_v1);
+
+ final_stripe_count = le32_to_cpu(lmu->lum_stripe_count);
+ LASSERT(final_stripe_count &&
+ final_stripe_count < lo->ldo_dir_stripe_count);
+
+ for (i = 0; i < lo->ldo_dir_stripe_count; i++) {
+ dto = lo->ldo_stripe[i];
+ if (!dto)
+ continue;
+
+ if (i < final_stripe_count) {
+ if (final_stripe_count == 1)
+ continue;
+
+ rc = lod_sub_declare_xattr_set(env, dto, lmv_buf,
+ XATTR_NAME_LMV,
+ LU_XATTR_REPLACE, th);
+ if (rc)
+ return rc;
+
+ continue;
+ }
+
+ rc = dt_dir_is_empty(env, dto);
+ if (rc < 0)
+ return rc;
+
+ rc = lod_sub_declare_ref_del(env, dto, th);
+ if (rc)
+ return rc;
+
+ rc = lod_sub_declare_destroy(env, dto, th);
+ if (rc)
+ return rc;
+
+ snprintf(stripe_name, sizeof(info->lti_key), DFID":%d",
+ PFID(lu_object_fid(&dto->do_lu)), i);
+
+ rc = lod_sub_declare_delete(env, next,
+ (const struct dt_key *)stripe_name, th);
+ if (rc)
+ return rc;
+
+ rc = lod_sub_declare_ref_del(env, next, th);
+ if (rc)
+ return rc;
+ }
+
+ rc = lod_sub_declare_xattr_set(env, next, lmv_buf, XATTR_NAME_LMV,
+ LU_XATTR_REPLACE, th);
+ return rc;
+}
+
+/**
+ * Allocate stripes for split directory.
+ *
+ * \param[in] env execution environment
+ * \param[in] dt target object
+ * \param[in] mlc layout change data
+ * \param[in] th transaction handle
+ *
+ * \retval 0 on success
+ * \retval negative if failed
+ */
+static int lod_dir_declare_layout_split(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct md_layout_change *mlc,
+ struct thandle *th)
+{
+ struct lod_thread_info *info = lod_env_info(env);
+ struct lod_device *lod = lu2lod_dev(dt->do_lu.lo_dev);
+ struct lod_object *lo = lod_dt_obj(dt);
+ struct dt_object_format *dof = &info->lti_format;
+ struct lmv_user_md_v1 *lum = mlc->mlc_spec->u.sp_ea.eadata;
+ struct dt_object **stripes;
+ u32 stripe_count;
+ u32 saved_count;
+ int i;
+ int rc;
+
+ ENTRY;