*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/mdd/mdd_object.c
*
{
struct lu_buf *buf;
- buf = &mdd_env_info(env)->mti_buf[0];
+ buf = &mdd_env_info(env)->mdi_buf[0];
buf->lb_buf = area;
buf->lb_len = len;
return buf;
{
struct lu_buf *buf;
- buf = &mdd_env_info(env)->mti_buf[0];
+ buf = &mdd_env_info(env)->mdi_buf[0];
buf->lb_buf = (void *)area;
buf->lb_len = len;
return buf;
const struct md_op_spec *spec,
struct dt_allocation_hint *hint)
{
- struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
+ struct dt_object_format *dof = &mdd_env_info(env)->mdi_dof;
const struct dt_index_features *feat = spec->sp_feat;
int rc;
ENTRY;
const struct md_op_spec *spec,
struct dt_allocation_hint *hint)
{
- struct dt_object_format *dof = &mdd_env_info(env)->mti_dof;
+ struct dt_object_format *dof = &mdd_env_info(env)->mdi_dof;
int rc;
ENTRY;
RETURN(0);
if (is_project_state_change(oattr, la)) {
- if (!md_capable(uc, CFS_CAP_SYS_RESOURCE) &&
+ if (!md_capable(uc, CAP_SYS_RESOURCE) &&
!lustre_in_group_p(uc, ma->ma_enable_chprojid_gid) &&
!(ma->ma_enable_chprojid_gid == -1 &&
mdd_permission_internal(env, obj, oattr, MAY_WRITE)))
(LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL);
if ((uc->uc_fsuid != oattr->la_uid) &&
- !md_capable(uc, CFS_CAP_FOWNER))
+ !md_capable(uc, CAP_FOWNER))
RETURN(-EPERM);
/* The IMMUTABLE and APPEND_ONLY flags can
* only be changed by the relevant capability. */
if ((oldflags ^ newflags) &&
- !md_capable(uc, CFS_CAP_LINUX_IMMUTABLE))
+ !md_capable(uc, CAP_LINUX_IMMUTABLE))
RETURN(-EPERM);
if (!S_ISDIR(oattr->la_mode)) {
if ((la->la_valid & (LA_MTIME | LA_ATIME | LA_CTIME)) &&
!(la->la_valid & ~(LA_MTIME | LA_ATIME | LA_CTIME))) {
if ((uc->uc_fsuid != oattr->la_uid) &&
- !md_capable(uc, CFS_CAP_FOWNER)) {
+ !md_capable(uc, CAP_FOWNER)) {
rc = mdd_permission_internal(env, obj, oattr,
MAY_WRITE);
if (rc)
if (la->la_valid & LA_MODE) {
if (!(flags & MDS_PERM_BYPASS) &&
(uc->uc_fsuid != oattr->la_uid) &&
- !md_capable(uc, CFS_CAP_FOWNER))
+ !md_capable(uc, CAP_FOWNER))
RETURN(-EPERM);
if (la->la_mode == (umode_t) -1)
/* Also check the setgid bit! */
if (!lustre_in_group_p(uc, (la->la_valid & LA_GID) ?
la->la_gid : oattr->la_gid) &&
- !md_capable(uc, CFS_CAP_FSETID))
+ !md_capable(uc, CAP_FSETID))
la->la_mode &= ~S_ISGID;
} else {
la->la_mode = oattr->la_mode;
la->la_uid = oattr->la_uid;
if (((uc->uc_fsuid != oattr->la_uid) ||
(la->la_uid != oattr->la_uid)) &&
- !md_capable(uc, CFS_CAP_CHOWN))
+ !md_capable(uc, CAP_CHOWN))
RETURN(-EPERM);
/* If the user or group of a non-directory has been
if (((uc->uc_fsuid != oattr->la_uid) ||
((la->la_gid != oattr->la_gid) &&
!lustre_in_group_p(uc, la->la_gid))) &&
- !md_capable(uc, CFS_CAP_CHOWN))
+ !md_capable(uc, CAP_CHOWN))
RETURN(-EPERM);
/* Likewise, if the user or group of a non-directory
}
if (la->la_valid & LA_CTIME) {
- /* The pure setattr, it has the priority over what is
- * already set, do not drop it if ctime is equal. */
+ /**
+ * The pure setattr, it has the priority over what is
+ * already set, do not drop it if ctime is equal.
+ */
if (la->la_ctime < oattr->la_ctime)
la->la_valid &= ~(LA_ATIME | LA_MTIME | LA_CTIME);
}
reclen = llog_data_len(LLOG_CHANGELOG_HDR_SZ +
changelog_rec_offset(clf_flags & CLF_SUPPORTED,
xflags & CLFE_SUPPORTED));
- buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_big_buf, reclen);
+ buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mdi_chlg_buf, reclen);
if (buf->lb_buf == NULL)
RETURN(-ENOMEM);
rec = buf->lb_buf;
bits |= (valid & LA_MTIME) ? BIT(CL_MTIME) : 0;
bits |= (valid & LA_CTIME) ? BIT(CL_CTIME) : 0;
bits |= (valid & LA_ATIME) ? BIT(CL_ATIME) : 0;
- bits = bits & mdd->mdd_cl.mc_mask;
+ bits = bits & mdd->mdd_cl.mc_current_mask;
/* This is an implementation limit rather than a protocol limit */
BUILD_BUG_ON(CL_LAST > sizeof(int) * 8);
if (bits == 0)
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle = NULL;
- struct lu_attr *la_copy = &mdd_env_info(env)->mti_la_for_fix;
+ struct lu_attr *la_copy = &mdd_env_info(env)->mdi_la_for_fix;
struct lu_attr *attr = MDD_ENV_VAR(env, cattr);
const struct lu_attr *la = &ma->ma_attr;
struct lu_ucred *uc;
+ bool quota_reserved = false;
bool chrgrp_by_unprivileged_user = false;
+ __s64 quota_size = 0;
int rc;
ENTRY;
uc = lu_ucred_check(env);
if (S_ISREG(attr->la_mode) && la->la_valid & LA_GID &&
la->la_gid != attr->la_gid && uc != NULL && uc->uc_fsuid != 0) {
- /* LU-10048: disable synchronous chgrp operation for it will
- * cause deadlock between MDT and OST.
- la_copy->la_valid |= LA_FLAGS;
- la_copy->la_flags |= LUSTRE_SET_SYNC_FL;
- */
+ CDEBUG(D_QUOTA, "%s: reserve quota for changing group: gid=%u size=%llu\n",
+ mdd2obd_dev(mdd)->obd_name, la->la_gid, la->la_size);
+
+ if (la->la_valid & LA_BLOCKS)
+ quota_size = la->la_blocks << 9;
+ else if (la->la_valid & LA_SIZE)
+ quota_size = la->la_size;
+ /* use local attr gotten above */
+ else if (attr->la_valid & LA_BLOCKS)
+ quota_size = attr->la_blocks << 9;
+ else if (attr->la_valid & LA_SIZE)
+ quota_size = attr->la_size;
+
+ if (quota_size > 0) {
+ rc = dt_reserve_or_free_quota(env, mdd->mdd_bottom,
+ GRPQUOTA, attr->la_uid,
+ la->la_gid, quota_size,
+ false);
+
+ if (rc) {
+ CDEBUG(D_QUOTA, "%s: failed to reserve quota for gid %d size %llu\n",
+ mdd2obd_dev(mdd)->obd_name,
+ la->la_gid, quota_size);
+
+ GOTO(out, rc);
+ }
+
+ quota_reserved = true;
+ la_copy->la_valid |= LA_FLAGS;
+ }
+
chrgrp_by_unprivileged_user = true;
/* Flush the possible existing client setattr requests to OSTs
rc = mdd_attr_set_changelog(env, obj, handle, &ma->ma_pfid,
la_copy->la_valid);
+ if (rc == 0 && quota_reserved) {
+ struct thandle *sub_th;
+
+ sub_th = thandle_get_sub_by_dt(env, handle, mdd->mdd_bottom);
+ if (unlikely(IS_ERR(sub_th))) {
+ dt_reserve_or_free_quota(env, mdd->mdd_bottom, GRPQUOTA,
+ attr->la_uid, la->la_gid,
+ -quota_size, false);
+ } else {
+ sub_th->th_reserved_quota.qrr_type = GRPQUOTA;
+ sub_th->th_reserved_quota.qrr_id.qid_gid = la->la_gid;
+ sub_th->th_reserved_quota.qrr_count = quota_size;
+ }
+ }
+
if (handle != NULL)
rc = mdd_trans_stop(env, mdd, rc, handle);
* can write attributes. */
if (S_ISDIR(attr->la_mode) && (attr->la_mode & S_ISVTX) &&
(uc->uc_fsuid != attr->la_uid) &&
- !md_capable(uc, CFS_CAP_FOWNER))
+ !md_capable(uc, CAP_FOWNER))
RETURN(-EPERM);
} else if (strcmp(name, XATTR_NAME_SOM) != 0 &&
(uc->uc_fsuid != attr->la_uid) &&
- !md_capable(uc, CFS_CAP_FOWNER)) {
+ !md_capable(uc, CAP_FOWNER)) {
RETURN(-EPERM);
}
RETURN(-ENOMEM);
/* Read HSM attrs from disk */
- current_buf = lu_buf_check_and_alloc(&info->mti_xattr_buf,
+ current_buf = lu_buf_check_and_alloc(&info->mdi_xattr_buf,
min_t(unsigned int,
mdd_obj2mdd_dev(mdd_obj)->mdd_dt_conf.ddp_max_ea_size,
XATTR_SIZE_MAX));
struct mdd_device *mdd = mdo2mdd(md_obj);
struct mdd_object *obj = md2mdd_obj(md_obj);
struct mdd_object *vic = md2mdd_obj(md_vic);
- struct lu_buf *buf = &mdd_env_info(env)->mti_buf[0];
- struct lu_buf *buf_vic = &mdd_env_info(env)->mti_buf[1];
+ struct lu_buf *buf = &mdd_env_info(env)->mdi_buf[0];
+ struct lu_buf *buf_vic = &mdd_env_info(env)->mdi_buf[1];
struct lov_mds_md *lmm;
struct thandle *handle;
- int rc;
+ int rc, lock_order;
ENTRY;
- rc = lu_fid_cmp(mdd_object_fid(obj), mdd_object_fid(vic));
- if (rc == 0) /* same fid */
+ lock_order = lu_fid_cmp(mdd_object_fid(obj), mdd_object_fid(vic));
+ if (lock_order == 0) /* same fid */
RETURN(-EPERM);
handle = mdd_trans_create(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- if (rc > 0) {
- mdd_write_lock(env, obj, DT_TGT_CHILD);
- mdd_write_lock(env, vic, DT_TGT_CHILD);
- } else {
- mdd_write_lock(env, vic, DT_TGT_CHILD);
- mdd_write_lock(env, obj, DT_TGT_CHILD);
- }
-
/* get EA of victim file */
memset(buf_vic, 0, sizeof(*buf_vic));
rc = mdd_stripe_get(env, vic, buf_vic, XATTR_NAME_LOV);
if (rc < 0) {
if (rc == -ENODATA)
rc = 0;
- GOTO(out, rc);
+ GOTO(stop, rc);
}
/* parse the layout of victim file */
lmm = buf_vic->lb_buf;
if (le32_to_cpu(lmm->lmm_magic) != LOV_MAGIC_COMP_V1)
- GOTO(out, rc = -EINVAL);
+ GOTO(stop, rc = -EINVAL);
/* save EA of target file for restore */
memset(buf, 0, sizeof(*buf));
rc = mdd_stripe_get(env, obj, buf, XATTR_NAME_LOV);
if (rc < 0)
- GOTO(out, rc);
+ GOTO(stop, rc);
/* Get rid of the layout from victim object */
rc = mdd_declare_xattr_del(env, mdd, vic, XATTR_NAME_LOV, handle);
if (rc)
- GOTO(out, rc);
+ GOTO(stop, rc);
- rc = mdd_declare_xattr_set(env, mdd, obj, buf_vic, XATTR_LUSTRE_LOV,
+ rc = mdd_declare_xattr_set(env, mdd, obj, buf_vic, XATTR_NAME_LOV,
LU_XATTR_MERGE, handle);
if (rc)
- GOTO(out, rc);
+ GOTO(stop, rc);
rc = mdd_trans_start(env, mdd, handle);
if (rc != 0)
- GOTO(out, rc);
+ GOTO(stop, rc);
- rc = mdo_xattr_set(env, obj, buf_vic, XATTR_LUSTRE_LOV, LU_XATTR_MERGE,
+ if (lock_order > 0) {
+ mdd_write_lock(env, obj, DT_TGT_CHILD);
+ mdd_write_lock(env, vic, DT_TGT_CHILD);
+ } else {
+ mdd_write_lock(env, vic, DT_TGT_CHILD);
+ mdd_write_lock(env, obj, DT_TGT_CHILD);
+ }
+
+ rc = mdo_xattr_set(env, obj, buf_vic, XATTR_NAME_LOV, LU_XATTR_MERGE,
handle);
if (rc)
GOTO(out, rc);
}
out:
- mdd_trans_stop(env, mdd, rc, handle);
mdd_write_unlock(env, obj);
mdd_write_unlock(env, vic);
+stop:
+ mdd_trans_stop(env, mdd, rc, handle);
lu_buf_free(buf);
lu_buf_free(buf_vic);
{
struct mdd_device *mdd = mdo2mdd(md_obj);
struct mdd_object *obj = md2mdd_obj(md_obj);
- struct mdd_object *vic = md2mdd_obj(mrd->mrd_obj);
- struct lu_buf *buf = &mdd_env_info(env)->mti_buf[0];
- struct lu_buf *buf_save = &mdd_env_info(env)->mti_buf[1];
- struct lu_buf *buf_vic = &mdd_env_info(env)->mti_buf[2];
+ struct mdd_object *vic = NULL;
+ struct lu_buf *buf = &mdd_env_info(env)->mdi_buf[0];
+ struct lu_buf *buf_save = &mdd_env_info(env)->mdi_buf[1];
+ struct lu_buf *buf_vic = &mdd_env_info(env)->mdi_buf[2];
struct lov_comp_md_v1 *lcm;
struct thandle *handle;
int rc;
ENTRY;
- rc = lu_fid_cmp(mdd_object_fid(obj), mdd_object_fid(vic));
- if (rc == 0) /* same fid */
- RETURN(-EPERM);
+ /**
+ * NULL @mrd_obj means mirror deleting, and use NULL vic to indicate
+ * mirror deleting
+ */
+ if (mrd->mrd_obj)
+ vic = md2mdd_obj(mrd->mrd_obj);
handle = mdd_trans_create(env, mdd);
if (IS_ERR(handle))
RETURN(PTR_ERR(handle));
- if (rc > 0) {
- mdd_write_lock(env, obj, DT_TGT_CHILD);
- mdd_write_lock(env, vic, DT_TGT_CHILD);
- } else {
- mdd_write_lock(env, vic, DT_TGT_CHILD);
- mdd_write_lock(env, obj, DT_TGT_CHILD);
- }
-
/* get EA of mirrored file */
memset(buf_save, 0, sizeof(*buf));
rc = mdd_stripe_get(env, obj, buf_save, XATTR_NAME_LOV);
if (rc < 0)
- GOTO(out, rc);
+ GOTO(stop, rc);
lcm = buf_save->lb_buf;
if (le32_to_cpu(lcm->lcm_magic) != LOV_MAGIC_COMP_V1)
- GOTO(out, rc = -EINVAL);
+ GOTO(stop, rc = -EINVAL);
/**
* Extract the mirror with specified mirror id, and store the splitted
- * mirror layout to the victim file.
+ * mirror layout to the victim buffer.
*/
memset(buf, 0, sizeof(*buf));
memset(buf_vic, 0, sizeof(*buf_vic));
rc = mdd_split_ea(lcm, mrd->mrd_mirror_id, buf, buf_vic);
if (rc < 0)
- GOTO(out, rc);
+ GOTO(stop, rc);
+ /**
+ * @buf stores layout w/o the specified mirror, @buf_vic stores the
+ * splitted mirror
+ */
dom_stripe = mdd_lmm_dom_size(buf_vic->lb_buf) > 0;
- rc = mdd_declare_xattr_set(env, mdd, obj, buf, XATTR_NAME_LOV,
- LU_XATTR_SPLIT, handle);
- if (rc)
- GOTO(out, rc);
- rc = mdd_declare_xattr_set(env, mdd, vic, buf_vic, XATTR_NAME_LOV,
- LU_XATTR_SPLIT, handle);
- if (rc)
- GOTO(out, rc);
+ if (vic) {
+ /**
+ * non delete mirror split
+ *
+ * declare obj set remaining layout in @buf, will set obj's
+ * in-memory layout
+ */
+ rc = mdd_declare_xattr_set(env, mdd, obj, buf, XATTR_NAME_LOV,
+ LU_XATTR_SPLIT, handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ /* declare vic set splitted layout in @buf_vic */
+ rc = mdd_declare_xattr_set(env, mdd, vic, buf_vic,
+ XATTR_NAME_LOV, LU_XATTR_SPLIT,
+ handle);
+ if (rc)
+ GOTO(stop, rc);
+ } else {
+ /**
+ * declare delete mirror objects in @buf_vic, will change obj's
+ * in-memory layout
+ */
+ rc = mdd_declare_xattr_set(env, mdd, obj, buf_vic,
+ XATTR_NAME_LOV, LU_XATTR_PURGE,
+ handle);
+ if (rc)
+ GOTO(stop, rc);
+
+ /* declare obj set remaining layout in @buf */
+ rc = mdd_declare_xattr_set(env, mdd, obj, buf,
+ XATTR_NAME_LOV, LU_XATTR_SPLIT,
+ handle);
+ if (rc)
+ GOTO(stop, rc);
+ }
rc = mdd_trans_start(env, mdd, handle);
if (rc)
- GOTO(out, rc);
+ GOTO(stop, rc);
- rc = mdo_xattr_set(env, obj, buf, XATTR_NAME_LOV, LU_XATTR_REPLACE,
- handle);
- if (rc)
- GOTO(out, rc);
+ if (vic) {
+ /* don't use the same file to save the splitted mirror */
+ rc = lu_fid_cmp(mdd_object_fid(obj), mdd_object_fid(vic));
+ if (rc == 0)
+ GOTO(stop, rc = -EPERM);
+
+ if (rc > 0) {
+ mdd_write_lock(env, obj, DT_TGT_CHILD);
+ mdd_write_lock(env, vic, DT_TGT_CHILD);
+ } else {
+ mdd_write_lock(env, vic, DT_TGT_CHILD);
+ mdd_write_lock(env, obj, DT_TGT_CHILD);
+ }
+ } else {
+ mdd_write_lock(env, obj, DT_TGT_CHILD);
+ }
- rc = mdo_xattr_set(env, vic, buf_vic, XATTR_NAME_LOV, LU_XATTR_CREATE,
+ /* set obj's layout in @buf */
+ rc = mdo_xattr_set(env, obj, buf, XATTR_NAME_LOV, LU_XATTR_SPLIT,
handle);
if (rc)
- GOTO(out_restore, rc);
+ GOTO(unlock, rc);
+
+ if (vic) {
+ /* set vic's layout in @buf_vic */
+ rc = mdo_xattr_set(env, vic, buf_vic, XATTR_NAME_LOV,
+ LU_XATTR_CREATE, handle);
+ if (rc)
+ GOTO(out_restore, rc);
+ } else {
+ /* delete mirror objects */
+ rc = mdo_xattr_set(env, obj, buf_vic, XATTR_NAME_LOV,
+ LU_XATTR_PURGE, handle);
+ if (rc)
+ GOTO(out_restore, rc);
+ }
rc = mdd_changelog_data_store(env, mdd, CL_LAYOUT, 0, obj, handle,
NULL);
if (rc)
- GOTO(out, rc);
+ GOTO(out_restore, rc);
- rc = mdd_changelog_data_store(env, mdd, CL_LAYOUT, 0, vic, handle,
- NULL);
- if (rc)
- GOTO(out, rc);
- EXIT;
+ if (vic) {
+ rc = mdd_changelog_data_store(env, mdd, CL_LAYOUT, 0, vic,
+ handle, NULL);
+ if (rc)
+ GOTO(out_restore, rc);
+ }
out_restore:
if (rc) {
- /* restore obj's layout */
+ /* restore obj's in-memory and on-disk layout */
int rc2 = mdo_xattr_set(env, obj, buf_save, XATTR_NAME_LOV,
LU_XATTR_REPLACE, handle);
if (rc2)
- CERROR("%s: failed rollback "DFID" layout: file state unkonwn: rc = %d\n",
+ CERROR("%s: failed rollback "DFID
+ " layout: file state unknown: rc = %d\n",
mdd_obj_dev_name(obj),
- PFID(mdd_object_fid(obj)), rc2);
+ PFID(mdd_object_fid(obj)), rc);
}
-out:
+
+unlock:
+ mdd_write_unlock(env, obj);
+ if (vic)
+ mdd_write_unlock(env, vic);
+stop:
rc = mdd_trans_stop(env, mdd, rc, handle);
/* Truncate local DOM data if all went well */
if (!rc && dom_stripe)
mdd_dom_data_truncate(env, mdd, obj);
- mdd_write_unlock(env, obj);
- mdd_write_unlock(env, vic);
lu_buf_free(buf_save);
lu_buf_free(buf);
lu_buf_free(buf_vic);
if (buf->lb_len != sizeof(*mrd))
RETURN(-EINVAL);
- rc = mdd_layout_merge_allowed(env, obj, victim);
- if (rc)
- RETURN(rc);
- if (fl == LU_XATTR_MERGE)
+ if (fl == LU_XATTR_MERGE) {
+ rc = mdd_layout_merge_allowed(env, obj, victim);
+ if (rc)
+ RETURN(rc);
/* merge layout of victim as a mirror of obj's. */
rc = mdd_xattr_merge(env, obj, victim);
- else
+ } else {
rc = mdd_xattr_split(env, obj, mrd);
+ }
RETURN(rc);
}
int mdd_stripe_get(const struct lu_env *env, struct mdd_object *obj,
struct lu_buf *lmm_buf, const char *name)
{
- struct lu_buf *buf = &mdd_env_info(env)->mti_big_buf;
+ struct lu_buf *buf = &mdd_env_info(env)->mdi_big_buf;
int rc;
ENTRY;
repeat:
rc = mdo_xattr_get(env, obj, buf, name);
if (rc == -ERANGE) {
- /* mti_big_buf is allocated but is too small
+ /* mdi_big_buf is allocated but is too small
* we need to increase it */
- buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_big_buf,
+ buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mdi_big_buf,
buf->lb_len * 2);
if (buf->lb_buf == NULL)
RETURN(-ENOMEM);
/*
* we don't use lmm_buf directly, because we don't know xattr size, so
- * by using mti_big_buf we can avoid calling mdo_xattr_get() twice.
+ * by using mdi_big_buf we can avoid calling mdo_xattr_get() twice.
*/
memcpy(lmm_buf->lb_buf, buf->lb_buf, rc);
struct lu_attr *snd_la = MDD_ENV_VAR(env, tattr);
struct mdd_device *mdd = mdo2mdd(obj1);
struct lov_mds_md *fst_lmm, *snd_lmm;
- struct lu_buf *fst_buf = &info->mti_buf[0];
- struct lu_buf *snd_buf = &info->mti_buf[1];
- struct lu_buf *fst_hsm_buf = &info->mti_buf[2];
- struct lu_buf *snd_hsm_buf = &info->mti_buf[3];
+ struct lu_buf *fst_buf = &info->mdi_buf[0];
+ struct lu_buf *snd_buf = &info->mdi_buf[1];
+ struct lu_buf *fst_hsm_buf = &info->mdi_buf[2];
+ struct lu_buf *snd_hsm_buf = &info->mdi_buf[3];
struct ost_id *saved_oi = NULL;
struct thandle *handle;
struct mdd_object *dom_o = NULL;
ENTRY;
- BUILD_BUG_ON(ARRAY_SIZE(info->mti_buf) < 4);
- memset(info->mti_buf, 0, sizeof(info->mti_buf));
+ BUILD_BUG_ON(ARRAY_SIZE(info->mdi_buf) < 4);
+ memset(info->mdi_buf, 0, sizeof(info->mdi_buf));
/* we have to sort the 2 obj, so locking will always
* be in the same order, even in case of 2 concurrent swaps */
if (fst_lmm != NULL) {
struct ost_id temp_oi;
- saved_oi = &info->mti_oa.o_oi;
+ saved_oi = &info->mdi_oa.o_oi;
mdd_get_lmm_oi(fst_lmm, saved_oi);
mdd_get_lmm_oi(snd_lmm, &temp_oi);
mdd_set_lmm_gen(fst_lmm, &snd_gen);
if (rc2 < 0)
goto do_lbug;
- ++steps;
- rc2 = mdd_xattr_hsm_replace(env, fst_o, fst_hsm_buf, handle);
- if (rc2 < 0)
- goto do_lbug;
+ if (flags & SWAP_LAYOUTS_MDS_HSM) {
+ ++steps;
+ rc2 = mdd_xattr_hsm_replace(env, fst_o, fst_hsm_buf,
+ handle);
+ if (rc2 < 0)
+ goto do_lbug;
- ++steps;
- rc2 = mdd_xattr_hsm_replace(env, snd_o, snd_hsm_buf, handle);
+ ++steps;
+ rc2 = mdd_xattr_hsm_replace(env, snd_o, snd_hsm_buf,
+ handle);
+ }
do_lbug:
if (rc2 < 0) {
struct md_layout_change *mlc, struct thandle *handle)
{
struct mdd_device *mdd = mdd_obj2mdd_dev(obj);
- struct lu_buf *som_buf = &mdd_env_info(env)->mti_buf[1];
+ struct lu_buf *som_buf = &mdd_env_info(env)->mdi_buf[1];
struct lustre_som_attrs *som = &mlc->mlc_som;
int fl = 0;
int rc;
lustre_som_swab(som);
if (som->lsa_valid & SOM_FL_STRICT)
fl = LU_XATTR_REPLACE;
+
+ if (mlc->mlc_opc == MD_LAYOUT_WRITE &&
+ mlc->mlc_intent->li_extent.e_end > som->lsa_size) {
+ som->lsa_size = mlc->mlc_intent->li_extent.e_end + 1;
+ fl = LU_XATTR_REPLACE;
+ }
}
rc = mdd_declare_layout_change(env, mdd, obj, mlc, handle);
struct thandle *handle)
{
struct mdd_device *mdd = mdd_obj2mdd_dev(obj);
+ struct lu_buf *som_buf = &mdd_env_info(env)->mdi_buf[1];
+ struct lustre_som_attrs *som = &mlc->mlc_som;
+ int fl = 0;
int rc;
ENTRY;
* resync state. */
break;
case MD_LAYOUT_WRITE:
- /* legal race for concurrent write, the file state has been
- * changed by another client. */
+ /**
+ * legal race for concurrent write, the file state has been
+ * changed by another client. Or a jump over file size and
+ * write.
+ */
+ som_buf->lb_buf = som;
+ som_buf->lb_len = sizeof(*som);
+ rc = mdo_xattr_get(env, obj, som_buf, XATTR_NAME_SOM);
+ if (rc < 0 && rc != -ENODATA)
+ RETURN(rc);
+
+ if (rc > 0) {
+ lustre_som_swab(som);
+ if (mlc->mlc_intent->li_extent.e_end > som->lsa_size) {
+ som->lsa_size =
+ mlc->mlc_intent->li_extent.e_end + 1;
+ fl = LU_XATTR_REPLACE;
+ }
+ }
break;
default:
RETURN(-EBUSY);
if (rc)
GOTO(out, rc);
+ if (fl) {
+ rc = mdd_declare_xattr_set(env, mdd, obj, som_buf,
+ XATTR_NAME_SOM, fl, handle);
+ if (rc)
+ GOTO(out, rc);
+ }
+
rc = mdd_trans_start(env, mdd, handle);
if (rc)
GOTO(out, rc);
mdd_write_lock(env, obj, DT_TGT_CHILD);
rc = mdo_layout_change(env, obj, mlc, handle);
+ if (!rc && fl) {
+ som->lsa_valid = SOM_FL_STALE;
+ lustre_som_swab(som);
+ rc = mdo_xattr_set(env, obj, som_buf, XATTR_NAME_SOM,
+ fl, handle);
+ }
mdd_write_unlock(env, obj);
if (rc)
GOTO(out, rc);
struct md_layout_change *mlc, struct thandle *handle)
{
struct mdd_device *mdd = mdd_obj2mdd_dev(obj);
- struct lu_buf *som_buf = &mdd_env_info(env)->mti_buf[1];
+ struct lu_buf *som_buf = &mdd_env_info(env)->mdi_buf[1];
int fl = 0;
int rc;
ENTRY;
nc->do_ops->do_ah_init(env, hint, np, nc, attr->la_mode & S_IFMT);
}
-static int accmode(const struct lu_env *env, const struct lu_attr *la,
- u64 open_flags)
+static int mdd_accmode(const struct lu_env *env, const struct lu_attr *la,
+ u64 open_flags)
{
/* Sadly, NFSD reopens a file repeatedly during operation, so the
* "acc_mode = 0" allowance for newly-created files isn't honoured.
const struct lu_attr *attr, u64 open_flags,
int is_replay)
{
- int mode, rc;
+ unsigned int may_mask;
+ int rc;
ENTRY;
/* EEXIST check, also opening of *open* orphans is allowed so we can
if (S_ISLNK(attr->la_mode))
RETURN(-ELOOP);
- mode = accmode(env, attr, open_flags);
+ may_mask = mdd_accmode(env, attr, open_flags);
- if (S_ISDIR(attr->la_mode) && (mode & MAY_WRITE))
+ if (S_ISDIR(attr->la_mode) && (may_mask & MAY_WRITE))
RETURN(-EISDIR);
if (!(open_flags & MDS_OPEN_CREATED)) {
- rc = mdd_permission_internal(env, obj, attr, mode);
+ rc = mdd_permission_internal(env, obj, attr, may_mask);
if (rc)
RETURN(rc);
}
rc = mdd_open_sanity_check(env, mdd_obj, attr, open_flags,
spec->no_create);
- if ((rc == -EACCES) && (mdd->mdd_cl.mc_mask & BIT(CL_DN_OPEN)))
+ if ((rc == -EACCES) && (mdd->mdd_cl.mc_current_mask & BIT(CL_DN_OPEN)))
type = CL_DN_OPEN;
else if (rc != 0)
GOTO(out, rc);
* this is not a big deal if we have a CL_CLOSE entry with no matching
* CL_OPEN. Plus Changelogs mask may not change often.
*/
- if (((!(mdd->mdd_cl.mc_mask & BIT(CL_OPEN)) &&
+ if (((!(mdd->mdd_cl.mc_current_mask & BIT(CL_OPEN)) &&
(open_flags & (MDS_FMODE_WRITE | MDS_OPEN_APPEND |
MDS_OPEN_TRUNC))) ||
- ((mdd->mdd_cl.mc_mask & BIT(CL_OPEN)) && last_close_by_uid)) &&
+ ((mdd->mdd_cl.mc_current_mask & BIT(CL_OPEN)) &&
+ last_close_by_uid)) &&
!(ma->ma_valid & MA_FLAGS && ma->ma_attr_flags & MDS_RECOV_OPEN)) {
if (handle == NULL) {
handle = mdd_trans_create(env, mdo2mdd(obj));