static const struct lu_object_operations mdd_lu_obj_ops;
struct mdd_object_user {
- struct list_head mou_list; /**< linked off mod_users */
- u64 mou_open_flags; /**< open mode by client */
- __u64 mou_uidgid; /**< uid_gid on client */
- int mou_opencount; /**< # opened */
- ktime_t mou_deniednext; /**< time of next access denied
- * notfication
- */
+ struct list_head mou_list; /* linked off mod_users */
+ u64 mou_open_flags; /* open mode by client */
+ __u64 mou_uidgid; /* uid_gid on client */
+ int mou_opencount; /* # opened */
+ /* time of next access denied notificaiton */
+ ktime_t mou_deniednext;
};
static int mdd_xattr_get(const struct lu_env *env,
- struct md_object *obj, struct lu_buf *buf,
- const char *name);
+ struct md_object *obj, struct lu_buf *buf,
+ const char *name);
static int mdd_changelog_data_store_by_fid(const struct lu_env *env,
struct mdd_device *mdd,
}
const struct lu_buf *mdd_buf_get_const(const struct lu_env *env,
- const void *area, ssize_t len)
+ const void *area, ssize_t len)
{
struct lu_buf *buf;
}
static int mdd_object_init(const struct lu_env *env, struct lu_object *o,
- const struct lu_object_conf *unused)
+ const struct lu_object_conf *unused)
{
- struct mdd_device *d = lu2mdd_dev(o->lo_dev);
- struct mdd_object *mdd_obj = lu2mdd_obj(o);
- struct lu_object *below;
- struct lu_device *under;
- ENTRY;
+ struct mdd_device *d = lu2mdd_dev(o->lo_dev);
+ struct mdd_object *mdd_obj = lu2mdd_obj(o);
+ struct lu_object *below;
+ struct lu_device *under;
+
+ ENTRY;
mdd_obj->mod_cltime = ktime_set(0, 0);
- under = &d->mdd_child->dd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
+ under = &d->mdd_child->dd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
if (IS_ERR(below))
RETURN(PTR_ERR(below));
- lu_object_add(o, below);
+ lu_object_add(o, below);
- RETURN(0);
+ RETURN(0);
}
static int mdd_object_start(const struct lu_env *env, struct lu_object *o)
}
static int mdd_object_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct lu_object *o)
+ lu_printer_t p, const struct lu_object *o)
{
struct mdd_object *mdd = lu2mdd_obj((struct lu_object *)o);
return (*p)(env, cookie,
LUSTRE_MDD_NAME"-object@%p(open_count=%d, valid=%x, cltime=%lldns, flags=%lx)",
- mdd, mdd->mod_count, mdd->mod_valid,
+ mdd, mdd->mod_count, mdd->mod_valid,
ktime_to_ns(mdd->mod_cltime), mdd->mod_flags);
}
static const struct lu_object_operations mdd_lu_obj_ops = {
- .loo_object_init = mdd_object_init,
- .loo_object_start = mdd_object_start,
- .loo_object_free = mdd_object_free,
- .loo_object_print = mdd_object_print,
+ .loo_object_init = mdd_object_init,
+ .loo_object_start = mdd_object_start,
+ .loo_object_free = mdd_object_free,
+ .loo_object_print = mdd_object_print,
};
struct mdd_object *mdd_object_find(const struct lu_env *env,
- struct mdd_device *d,
- const struct lu_fid *f)
+ struct mdd_device *d,
+ const struct lu_fid *f)
{
- return md2mdd_obj(md_object_find_slice(env, &d->mdd_md_dev, f));
+ return md2mdd_obj(md_object_find_slice(env, &d->mdd_md_dev, f));
}
/*
* No permission check is needed.
*/
static int mdd_xattr_get(const struct lu_env *env,
- struct md_object *obj, struct lu_buf *buf,
- const char *name)
+ struct md_object *obj, struct lu_buf *buf,
+ const char *name)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
struct mdd_device *mdd;
ENTRY;
if (mdd_object_exists(mdd_obj) == 0) {
- CERROR("%s: object "DFID" not found: rc = -2\n",
+ rc = -ENOENT;
+ CERROR("%s: object "DFID" not found: rc = %d\n",
mdd_obj_dev_name(mdd_obj),
- PFID(mdd_object_fid(mdd_obj)));
- return -ENOENT;
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ return rc;
}
/* If the object has been destroyed, then do not get LMVEA, because
* it needs to load stripes from the iteration of the master object,
* and it will cause problem if master object has been destroyed, see
- * LU-6427 */
+ * LU-6427
+ */
if (unlikely((mdd_obj->mod_flags & DEAD_OBJ) &&
!(mdd_obj->mod_flags & ORPHAN_OBJ) &&
strcmp(name, XATTR_NAME_LMV) == 0))
RETURN(-ENOENT);
/* If the object has been delete from the namespace, then
- * get linkEA should return -ENOENT as well */
+ * get linkEA should return -ENOENT as well
+ */
if (unlikely((mdd_obj->mod_flags & (DEAD_OBJ | ORPHAN_OBJ)) &&
strcmp(name, XATTR_NAME_LINK) == 0))
RETURN(-ENOENT);
RETURN(rc);
}
-/*
- * Permission check is done when open,
- * no need check again.
- */
+/* Permission check is done when open, no need check again. */
int mdd_readlink(const struct lu_env *env, struct md_object *obj,
struct lu_buf *buf)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- struct dt_object *next;
- loff_t pos = 0;
- int rc;
- ENTRY;
-
- if (mdd_object_exists(mdd_obj) == 0) {
- CERROR("%s: object "DFID" not found: rc = -2\n",
- mdd_obj_dev_name(mdd_obj),PFID(mdd_object_fid(mdd_obj)));
- return -ENOENT;
- }
-
- next = mdd_object_child(mdd_obj);
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ struct dt_object *next;
+ loff_t pos = 0;
+ int rc;
+
+ ENTRY;
+
+ if (mdd_object_exists(mdd_obj) == 0) {
+ rc = -ENOENT;
+ CERROR("%s: object "DFID" not found: rc = %d\n",
+ mdd_obj_dev_name(mdd_obj),
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ return rc;
+ }
+
+ next = mdd_object_child(mdd_obj);
LASSERT(next != NULL);
LASSERT(next->do_body_ops != NULL);
LASSERT(next->do_body_ops->dbo_read != NULL);
mdd_read_lock(env, mdd_obj, DT_TGT_CHILD);
rc = dt_read(env, next, buf, &pos);
- mdd_read_unlock(env, mdd_obj);
- RETURN(rc);
+ mdd_read_unlock(env, mdd_obj);
+ RETURN(rc);
}
-/*
- * No permission check is needed.
- */
+/* No permission check is needed. */
static int mdd_xattr_list(const struct lu_env *env, struct md_object *obj,
- struct lu_buf *buf)
+ struct lu_buf *buf)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- int rc;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ int rc;
- ENTRY;
+ ENTRY;
mdd_read_lock(env, mdd_obj, DT_TGT_CHILD);
rc = mdo_xattr_list(env, mdd_obj, buf);
- mdd_read_unlock(env, mdd_obj);
+ mdd_read_unlock(env, mdd_obj);
/* If the buffer is NULL then we are only here to get the
- * length of the xattr name list. */
+ * length of the xattr name list.
+ */
if (rc < 0 || buf->lb_buf == NULL)
RETURN(rc);
struct dt_object_format *dof = &mdd_env_info(env)->mdi_dof;
const struct dt_index_features *feat = spec->sp_feat;
int rc;
+
ENTRY;
if (feat != &dt_directory_features && feat != NULL) {
{
struct dt_object_format *dof = &mdd_env_info(env)->mdi_dof;
int rc;
+
ENTRY;
LASSERT(!mdd_object_exists(c));
int needacl)
{
int rc;
+
ENTRY;
rc = mdo_attr_set(env, obj, attr, handle);
struct thandle *handle)
{
int rc = 0;
+
ENTRY;
LASSERT(attr->la_valid & LA_CTIME);
/* Make sure the ctime is increased only, however, it's not strictly
* reliable at here because there is not guarantee to hold lock on
* object, so we just bypass some unnecessary cmtime setting first
- * and OSD has to check it again. */
+ * and OSD has to check it again.
+ */
if (attr->la_ctime < oattr->la_ctime)
attr->la_valid &= ~(LA_MTIME | LA_CTIME);
else if (attr->la_valid == LA_CTIME &&
if (la->la_valid == LA_CTIME) {
if (!(flags & MDS_PERM_BYPASS))
/* This is only for set ctime when rename's source is
- * on remote MDS. */
+ * on remote MDS.
+ */
rc = mdd_may_delete(env, NULL, NULL, obj, oattr, NULL,
1, 0);
if (rc == 0 && la->la_ctime <= oattr->la_ctime)
RETURN(-EPERM);
/* The IMMUTABLE and APPEND_ONLY flags can
- * only be changed by the relevant capability. */
+ * only be changed by the relevant capability.
+ */
if ((oldflags ^ newflags) &&
!cap_raised(uc->uc_cap, CAP_LINUX_IMMUTABLE))
RETURN(-EPERM);
if (la->la_valid & LA_KILL_SGID) {
la->la_valid &= ~LA_KILL_SGID;
- if (((oattr->la_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP)) &&
+ if (((oattr->la_mode & (S_ISGID | 0010)) ==
+ (S_ISGID | 0010)) &&
!(la->la_valid & LA_MODE)) {
la->la_mode = oattr->la_mode;
la->la_valid |= LA_MODE;
!cap_raised(uc->uc_cap, CAP_FSETID))
la->la_mode &= ~S_ISGID;
} else {
- la->la_mode = oattr->la_mode;
+ la->la_mode = oattr->la_mode;
}
/* Make sure a caller can chown. */
* to avoid some races. This is the behavior we had in
* 2.0. The check for non-root was definitely wrong
* for 2.2 anyway, as it should have been using
- * CAP_FSETID rather than fsuid -- 19990830 SD. */
+ * CAP_FSETID rather than fsuid -- 19990830 SD.
+ */
if (((oattr->la_mode & S_ISUID) == S_ISUID) &&
!S_ISDIR(oattr->la_mode)) {
la->la_mode &= ~S_ISUID;
* locking). 19981026 David C Niemi <niemi@tux.org>
*
* Removed the fsuid check (see the comment above) --
- * 19990830 SD. */
- if (((oattr->la_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP)) && !S_ISDIR(oattr->la_mode)) {
+ * 19990830 SD.
+ */
+ if (((oattr->la_mode & (S_ISGID | 0010)) ==
+ (S_ISGID | 0010)) && !S_ISDIR(oattr->la_mode)) {
la->la_mode &= ~S_ISGID;
la->la_valid |= LA_MODE;
}
if ((type >= CL_MTIME) && (type <= CL_ATIME) &&
ktime_before(mdd->mdd_cl.mc_starttime, mdd_obj->mod_cltime)) {
/* Don't need multiple updates in this log */
- /* Don't check under lock - no big deal if we get an extra
- entry */
+ /* Don't check under lock - no big deal if we get extra entry */
RETURN(0);
}
struct thandle *handle;
struct mdd_device *mdd = lu2mdd_dev(&m->md_lu_dev);
int rc;
+
ENTRY;
LASSERT(fid != NULL);
/* We'll check this again below, but we check now before we
- * start a transaction. */
+ * start a transaction.
+ */
if (!mdd_changelog_enabled(env, mdd, type))
RETURN(0);
/* Precedence for choosing record type when multiple
* attributes change: setattr > mtime > ctime > atime
* (ctime changes when mtime does, plus chmod/chown.
- * atime and ctime are independent.) */
+ * atime and ctime are independent.)
+ */
static int mdd_attr_set_changelog(const struct lu_env *env,
- struct md_object *obj, struct thandle *handle,
+ struct md_object *obj, struct thandle *handle,
const struct lu_fid *pfid, __u64 valid)
{
struct mdd_device *mdd = mdo2mdd(obj);
}
static int mdd_declare_attr_set(const struct lu_env *env,
- struct mdd_device *mdd,
- struct mdd_object *obj,
+ struct mdd_device *mdd,
+ struct mdd_object *obj,
const struct lu_attr *attr,
- struct thandle *handle)
+ struct thandle *handle)
{
int rc;
rc = mdo_declare_attr_set(env, obj, attr, handle);
- if (rc)
- return rc;
+ if (rc)
+ return rc;
#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
if (attr->la_valid & LA_MODE) {
mdd_read_lock(env, obj, DT_TGT_CHILD);
rc = mdo_xattr_get(env, obj, &LU_BUF_NULL,
XATTR_NAME_ACL_ACCESS);
- mdd_read_unlock(env, obj);
- if (rc == -EOPNOTSUPP || rc == -ENODATA)
- rc = 0;
- else if (rc < 0)
- return rc;
+ mdd_read_unlock(env, obj);
+ if (rc == -EOPNOTSUPP || rc == -ENODATA)
+ rc = 0;
+ else if (rc < 0)
+ return rc;
- if (rc != 0) {
+ if (rc != 0) {
struct lu_buf *buf = mdd_buf_get(env, NULL, rc);
- rc = mdo_declare_xattr_set(env, obj, buf,
- XATTR_NAME_ACL_ACCESS, 0,
- handle);
- if (rc)
- return rc;
- }
- }
+
+ rc = mdo_declare_xattr_set(env, obj, buf,
+ XATTR_NAME_ACL_ACCESS, 0,
+ handle);
+ if (rc)
+ return rc;
+ }
+ }
#endif
rc = mdd_declare_changelog_store(env, mdd, CL_SETXATTR, NULL, NULL,
bool quota_reserved = false;
bool chrgrp_by_unprivileged_user = false;
int rc;
+
ENTRY;
/* we do not use ->attr_set() for LOV/HSM EA any more */
/* If an unprivileged user changes group of some file,
* the setattr operation will be processed synchronously to
- * honor the quota limit of the corresponding group. see LU-5152 */
+ * honor the quota limit of the corresponding group. see LU-5152
+ */
uc = lu_ucred_check(env);
memset(&qi, 0, sizeof(qi));
if (S_ISREG(attr->la_mode) && la->la_valid & LA_GID &&
/* Flush the possible existing client setattr requests to OSTs
* to keep the order with the current setattr operation that
- * will be sent directly to OSTs. see LU-5152 */
- /* LU-11303 disable sync as this is too heavyweight.
+ * will be sent directly to OSTs. see LU-5152
+ *
+ * LU-11303 disable sync as this is too heavyweight.
* This should be replaced with a sync only for the object
* being modified here, not the whole filesystem.
- rc = dt_sync(env, mdd->mdd_child);
- if (rc)
+ rc = dt_sync(env, mdd->mdd_child);
+ if (rc)
GOTO(out, rc);
*/
}
const char *name)
{
struct lu_ucred *uc = lu_ucred_assert(env);
+
ENTRY;
if (attr->la_flags & (LUSTRE_IMMUTABLE_FL | LUSTRE_APPEND_FL))
if (strncmp(XATTR_USER_PREFIX, name,
sizeof(XATTR_USER_PREFIX) - 1) == 0) {
/* For sticky directories, only the owner and privileged user
- * can write attributes. */
+ * can write attributes.
+ */
if (S_ISDIR(attr->la_mode) && (attr->la_mode & S_ISVTX) &&
(uc->uc_fsuid != attr->la_uid) &&
!cap_raised(uc->uc_cap, CAP_FOWNER))
current_buf = lu_buf_check_and_alloc(&info->mdi_xattr_buf,
min_t(unsigned int,
mdd_obj2mdd_dev(mdd_obj)->mdd_dt_conf.ddp_max_ea_size,
- XATTR_SIZE_MAX));
+ XATTR_SIZE_MAX));
rc = mdo_xattr_get(env, mdd_obj, current_buf, XATTR_NAME_HSM);
rc = lustre_buf2hsm(current_buf->lb_buf, rc, current_mh);
if (rc < 0 && rc != -ENODATA)
handle->th_complex = 1;
/* it doesn't need to track the PFID update via llog, because LFSCK
- * will repair it even it goes wrong */
+ * will repair it even it goes wrong
+ */
rc = mdd_declare_xattr_set(env, mdd, o, NULL, XATTR_NAME_FID,
0, handle);
if (rc)
int rc2 = mdo_xattr_set(env, obj, buf_save, XATTR_NAME_LOV,
LU_XATTR_REPLACE, handle);
if (rc2)
- CERROR("%s: failed rollback "DFID
- " layout: file state unknown: rc = %d\n",
+ CERROR("%s: failed rollback "DFID" layout: file state unknown: rc = %d\n",
mdd_obj_dev_name(obj),
PFID(mdd_object_fid(obj)), rc);
}
struct md_object *victim)
{
struct mdd_object *o1 = md2mdd_obj(target);
+ int rc = 0;
/* cannot extend directory's LOVEA */
if (S_ISDIR(mdd_object_type(o1))) {
- CERROR("%s: Don't extend directory's LOVEA, just set it.\n",
- mdd_obj_dev_name(o1));
- RETURN(-EISDIR);
+ rc = -EISDIR;
+ CERROR("%s: Don't extend directory's LOVEA, just set it: rc = %d\n",
+ mdd_obj_dev_name(o1), rc);
+ RETURN(rc);
}
- RETURN(0);
+ RETURN(rc);
}
/**
strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) {
struct posix_acl *acl;
- /* user may set empty ACL, which should be treated as removing
- * ACL. */
+ /* user set empty ACL, this should be treated as removing ACL */
acl = posix_acl_from_xattr(&init_user_ns, buf->lb_buf,
buf->lb_len);
if (IS_ERR(acl))
}
static int mdd_declare_xattr_del(const struct lu_env *env,
- struct mdd_device *mdd,
- struct mdd_object *obj,
- const char *name,
- struct thandle *handle)
+ struct mdd_device *mdd,
+ struct mdd_object *obj,
+ const char *name,
+ struct thandle *handle)
{
enum changelog_rec_type type;
int rc;
struct mdd_device *mdd = mdo2mdd(obj);
struct thandle *handle;
int rc;
+
ENTRY;
rc = mdd_la_get(env, mdd_obj, attr);
repeat:
rc = mdo_xattr_get(env, obj, buf, name);
if (rc == -ERANGE) {
- /* mdi_big_buf is allocated but is too small
- * we need to increase it */
+ /* mdi_big_buf is allocated but too small need to increase it */
buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mdi_big_buf,
buf->lb_len * 2);
if (buf->lb_buf == NULL)
enum hsm_states hsm_flags;
struct hsm_attrs *attrs;
+ ENTRY;
attrs = hsm_buf->lb_buf;
hsm_flags = le32_to_cpu(attrs->hsm_flags);
__u64 flags)
{
const struct lu_fid *fid1, *fid2;
+
ENTRY;
fid1 = mdd_object_fid(o1);
} else if (le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V1 ||
le32_to_cpu(lmm->lmm_magic) == LOV_MAGIC_V3) {
__u16 tmp_gen = *gen;
+
if (get)
*gen = le16_to_cpu(lmm->lmm_layout_gen);
else
return 0;
}
-/**
- * swap layouts between 2 lustre objects
- */
+/* swap layouts between 2 lustre objects */
static int mdd_swap_layouts(const struct lu_env *env,
struct md_object *obj1, struct md_object *obj2,
__u64 dv1, __u64 dv2, __u64 flags)
memset(info->mdi_buf, 0, sizeof(info->mdi_buf));
/* we have to sort the 2 obj, so locking will always
- * be in the same order, even in case of 2 concurrent swaps */
+ * be in the same order, even in case of 2 concurrent swaps
+ */
rc = lu_fid_cmp(mdd_object_fid(fst_o), mdd_object_fid(snd_o));
if (rc == 0) /* same fid ? */
RETURN(-EPERM);
} else if (domsize_dom > 0 || domsize_vlt > 0) {
/* 'lfs swap_layouts' case, neither file should have DoM */
rc = -EOPNOTSUPP;
- CDEBUG(D_LAYOUT, "cannot swap layouts with DOM component, "
- "use migration instead: rc = %d\n", rc);
+ CDEBUG(D_LAYOUT, "cannot swap layouts with DOM component, use migration instead: rc = %d\n",
+ rc);
GOTO(stop, rc);
}
GOTO(stop, rc = 0);
/* to help inode migration between MDT, it is better to
- * start by the no layout file (if one), so we order the swap */
+ * start by the no layout file (if one), so we order the swap
+ */
if (snd_buf->lb_buf == NULL) {
swap(fst_o, snd_o);
swap(fst_buf, snd_buf);
int steps = 0;
/* failure on second file, but first was done, so we have
- * to roll back first. */
+ * to roll back first.
+ */
if (fst_buf->lb_buf != NULL) {
mdd_set_lmm_oi(fst_lmm, saved_oi);
mdd_set_lmm_gen(fst_lmm, &saved_gen);
handle);
}
- do_lbug:
+do_lbug:
if (rc2 < 0) {
/* very bad day */
CERROR("%s: unable to roll back layout swap of "DFID" and "DFID", steps: %d: rc = %d/%d\n",
{
struct mdd_device *mdd = mdd_obj2mdd_dev(obj);
int rc;
+
ENTRY;
if (mlc->mlc_opc != MD_LAYOUT_WRITE)
struct lustre_som_attrs *som = &mlc->mlc_som;
int fl = 0;
int rc;
+
ENTRY;
/* Verify acceptable operations */
case MD_LAYOUT_WRITE:
case MD_LAYOUT_RESYNC:
/* these are legal operations - this represents the case that
- * a few mirrors were missed in the last resync. */
+ * a few mirrors were missed in the last resync.
+ */
break;
case MD_LAYOUT_RESYNC_DONE:
default:
struct lustre_som_attrs *som = &mlc->mlc_som;
int fl = 0;
int rc;
+
ENTRY;
switch (mlc->mlc_opc) {
* instantiate all stale components right away to get ready
* for mirror copy. In order to avoid layout version change,
* client should avoid sending LAYOUT_WRITE request at the
- * resync state. */
+ * resync state.
+ */
break;
case MD_LAYOUT_WRITE:
/**
* to WRITE_PENDING in a sync tx. It doesn't have to change the layout
* version because the version will be increased in the transition to
* SYNC_PENDING later so that it can deny the write request from potential
- * evicted SYNC clients. */
+ * evicted SYNC clients.
+ */
static int
mdd_object_update_sync_pending(const struct lu_env *env, struct mdd_object *obj,
struct md_layout_change *mlc, struct thandle *handle)
struct lu_buf *som_buf = &mdd_env_info(env)->mdi_buf[1];
int fl = 0;
int rc;
+
ENTRY;
/* operation validation */
break;
case MD_LAYOUT_RESYNC:
/* resync again, most likely the previous run failed.
- * no-op if it's already in SYNC_PENDING state */
+ * no-op if it's already in SYNC_PENDING state
+ */
RETURN(0);
default:
RETURN(-EBUSY);
RETURN(rc);
}
-/**
- * Update the layout for PCC-RO.
- */
+/* Update the layout for PCC-RO. */
static int
mdd_layout_update_pccro(const struct lu_env *env, struct md_object *o,
struct md_layout_change *mlc)
memset(hint, 0, sizeof(*hint));
/* For striped directory, give striping EA to lod_ah_init, which will
- * decide the stripe_offset and stripe count by it. */
+ * decide the stripe_offset and stripe count by it.
+ */
if (S_ISDIR(attr->la_mode) && spec) {
if (unlikely(spec->sp_cr_flags & MDS_OPEN_HAS_EA)) {
hint->dah_eadata = spec->u.sp_ea.eadata;
* "acc_mode = 0" allowance for newly-created files isn't honoured.
* NFSD uses the MDS_OPEN_OWNEROVERRIDE flag to say that a file
* owner can write to a file even if it is marked readonly to hide
- * its brokenness. (bug 5781) */
+ * its brokenness. (bug 5781)
+ */
if (open_flags & MDS_OPEN_OWNEROVERRIDE) {
struct lu_ucred *uc = lu_ucred_check(env);
{
unsigned int may_mask;
int rc;
+
ENTRY;
/* EEXIST check, also opening of *open* orphans is allowed so we can
struct mdd_device *mdd = mdo2mdd(obj);
enum changelog_rec_type type = CL_OPEN;
int rc = 0;
+
ENTRY;
mdd_write_lock(env, mdd_obj, DT_TGT_CHILD);
}
/* we can't hold object lock over transaction start
- * and we don't actually need the object to be locked */
+ * and we don't actually need the object to be locked
+ */
mdd_write_unlock(env, mdd_obj);
/* FYI, only the bottom 32 bits of open_flags are recorded */
return mdo_declare_destroy(env, obj, handle);
}
-/*
- * No permission check is needed.
- */
+/* No permission check is needed. */
static int mdd_close(const struct lu_env *env, struct md_object *obj,
struct md_attr *ma, u64 open_flags)
{
bool blocked = false;
bool last_close_by_uid = false;
const struct lu_ucred *uc = lu_ucred(env);
+
ENTRY;
if (ma->ma_valid & MA_FLAGS && ma->ma_attr_flags & MDS_KEEP_ORPHAN) {
mdd_write_unlock(env, mdd_obj);
if (mdd_obj->mod_flags & ORPHAN_OBJ && !mdd_obj->mod_count)
- CDEBUG(D_HA, "Object "DFID" is retained in orphan "
- "list\n", PFID(mdd_object_fid(mdd_obj)));
+ CDEBUG(D_HA, "Object "DFID" is retained in orphan list\n",
+ PFID(mdd_object_fid(mdd_obj)));
RETURN(0);
}
/* mdd_finish_unlink() will always set orphan object as DEAD_OBJ, but
- * it might fail to add the object to orphan list (w/o ORPHAN_OBJ). */
+ * it might fail to add the object to orphan list (w/o ORPHAN_OBJ).
+ */
/* check without any lock */
is_orphan = mdd_obj->mod_count == 1 &&
(mdd_obj->mod_flags & (ORPHAN_OBJ | DEAD_OBJ)) != 0;
* mdd_trans_create() failed because of barrier_entry(), the
* MDT-object will become real orphan that is neither in the
* namespace nor in the orphan list. Such bad case should be
- * very rare and will be handled by e2fsck/lfsck. */
+ * very rare and will be handled by e2fsck/lfsck.
+ */
handle = mdd_trans_create(env, mdo2mdd(obj));
if (IS_ERR(handle)) {
rc = PTR_ERR(handle);
/* under mdd write lock */
/* If recording, see if we need to remove UID from list. uc is not
- * initialized if the client has been evicted. */
+ * initialized if the client has been evicted.
+ */
if (mdd_changelog_enabled(env, mdd, CL_OPEN) && uc) {
struct mdd_object_user *mou;
/* Orphan object */
/* NB: Object maybe not in orphan list originally, it is rare case for
* mdd_finish_unlink() failure, in that case, the object doesn't have
- * ORPHAN_OBJ flag */
+ * ORPHAN_OBJ flag
+ */
if ((mdd_obj->mod_flags & ORPHAN_OBJ) != 0) {
/* remove link to object from orphan index */
LASSERT(handle != NULL);
rc = mdd_orphan_delete(env, mdd_obj, handle);
if (rc != 0) {
- CERROR("%s: unable to delete "DFID" from orphan list: "
- "rc = %d\n", lu_dev_name(mdd2lu_dev(mdd)),
+ CERROR("%s: unable to delete "DFID" from orphan list: rc = %d\n",
+ lu_dev_name(mdd2lu_dev(mdd)),
PFID(mdd_object_fid(mdd_obj)), rc);
/* If object was not deleted from orphan list, do not
* destroy OSS objects, which will be done when next
- * recovery. */
+ * recovery.
+ */
GOTO(out, rc);
}
- CDEBUG(D_HA, "Object "DFID" is deleted from orphan "
- "list, OSS objects to be destroyed.\n",
+ CDEBUG(D_HA, "Object "DFID" is deleted from orphan list, OSS objects to be destroyed.\n",
PFID(mdd_object_fid(mdd_obj)));
}
rc = mdo_destroy(env, mdd_obj, handle);
if (rc != 0) {
- CERROR("%s: unable to delete "DFID" from orphan list: "
- "rc = %d\n", lu_dev_name(mdd2lu_dev(mdd)),
+ CERROR("%s: unable to delete "DFID" from orphan list: rc = %d\n",
+ lu_dev_name(mdd2lu_dev(mdd)),
PFID(mdd_object_fid(mdd_obj)), rc);
}
EXIT;
return rc;
}
-/*
- * Permission check is done when open,
- * no need check again.
- */
+/* Permission check is done when open, no need check again. */
static int mdd_readpage_sanity_check(const struct lu_env *env,
struct mdd_object *obj)
{
}
int mdd_readpage(const struct lu_env *env, struct md_object *obj,
- const struct lu_rdpg *rdpg)
+ const struct lu_rdpg *rdpg)
{
- struct mdd_object *mdd_obj = md2mdd_obj(obj);
- int rc;
- ENTRY;
+ struct mdd_object *mdd_obj = md2mdd_obj(obj);
+ int rc;
- if (mdd_object_exists(mdd_obj) == 0) {
- CERROR("%s: object "DFID" not found: rc = -2\n",
- mdd_obj_dev_name(mdd_obj),PFID(mdd_object_fid(mdd_obj)));
- return -ENOENT;
- }
+ ENTRY;
+
+ if (mdd_object_exists(mdd_obj) == 0) {
+ rc = -ENOENT;
+ CERROR("%s: object "DFID" not found: rc = %d\n",
+ mdd_obj_dev_name(mdd_obj),
+ PFID(mdd_object_fid(mdd_obj)), rc);
+ return rc;
+ }
mdd_read_lock(env, mdd_obj, DT_TGT_CHILD);
- rc = mdd_readpage_sanity_check(env, mdd_obj);
- if (rc)
- GOTO(out_unlock, rc);
+ rc = mdd_readpage_sanity_check(env, mdd_obj);
+ if (rc)
+ GOTO(out_unlock, rc);
- if (mdd_is_dead_obj(mdd_obj)) {
- struct page *pg;
- struct lu_dirpage *dp;
+ if (mdd_is_dead_obj(mdd_obj)) {
+ struct page *pg;
+ struct lu_dirpage *dp;
/*
* According to POSIX, please do not return any entry to client:
CDEBUG(D_INODE, "readdir from dead object: "DFID"\n",
PFID(mdd_object_fid(mdd_obj)));
- if (rdpg->rp_count <= 0)
- GOTO(out_unlock, rc = -EFAULT);
- LASSERT(rdpg->rp_pages != NULL);
+ if (rdpg->rp_count <= 0)
+ GOTO(out_unlock, rc = -EFAULT);
+ LASSERT(rdpg->rp_pages != NULL);
- pg = rdpg->rp_pages[0];
+ pg = rdpg->rp_pages[0];
dp = (struct lu_dirpage *)kmap(pg);
- memset(dp, 0 , sizeof(struct lu_dirpage));
- dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
- dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
- dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
+ memset(dp, 0, sizeof(struct lu_dirpage));
+ dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
+ dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
+ dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
kunmap(pg);
- GOTO(out_unlock, rc = LU_PAGE_SIZE);
- }
+ GOTO(out_unlock, rc = LU_PAGE_SIZE);
+ }
rc = dt_index_walk(env, mdd_object_child(mdd_obj), rdpg,
mdd_dir_page_build, NULL);
GOTO(out_unlock, rc);
out_unlock:
- mdd_read_unlock(env, mdd_obj);
- return rc;
+ mdd_read_unlock(env, mdd_obj);
+ return rc;
}
static int mdd_object_sync(const struct lu_env *env, struct md_object *obj)
union ldlm_policy_data *policy)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
+
return dt_object_lock(env, mdd_object_child(mdd_obj), lh,
einfo, policy);
}
union ldlm_policy_data *policy)
{
struct mdd_object *mdd_obj = md2mdd_obj(obj);
+
return dt_object_unlock(env, mdd_object_child(mdd_obj), einfo, policy);
}