#endif
extern int lustre_posix_acl_permission(struct lu_ucred *mu,
- const struct lu_attr *la, int want,
+ const struct lu_attr *la,
+ unsigned int may_mask,
posix_acl_xattr_entry *entry,
int count);
extern int lustre_posix_acl_chmod_masq(posix_acl_xattr_entry *entry,
/* do NOT or the MAY_*'s, you'll get the weakest */
static inline int mds_accmode(u64 open_flags)
{
- int res = 0;
+ unsigned int may_mask = 0;
if (open_flags & MDS_FMODE_READ)
- res |= MAY_READ;
+ may_mask |= MAY_READ;
if (open_flags & (MDS_FMODE_WRITE | MDS_OPEN_TRUNC | MDS_OPEN_APPEND))
- res |= MAY_WRITE;
+ may_mask |= MAY_WRITE;
if (open_flags & MDS_FMODE_EXEC)
- res = MAY_EXEC;
+ may_mask = MAY_EXEC;
- return res;
+ return may_mask;
}
/** @} mds */
struct md_object_operations {
int (*moo_permission)(const struct lu_env *env,
struct md_object *pobj, struct md_object *cobj,
- struct md_attr *attr, int mask);
+ struct md_attr *attr, unsigned int may_mask);
int (*moo_attr_get)(const struct lu_env *env, struct md_object *obj,
struct md_attr *attr);
/** md operations */
static inline int mo_permission(const struct lu_env *env, struct md_object *p,
struct md_object *c, struct md_attr *at,
- int mask)
+ unsigned int may_mask)
{
LASSERT(c->mo_ops->moo_permission);
- return c->mo_ops->moo_permission(env, p, c, at, mask);
+ return c->mo_ops->moo_permission(env, p, c, at, may_mask);
}
static inline int mo_attr_get(const struct lu_env *env, struct md_object *m,
static int
__mdd_lookup(const struct lu_env *env, struct md_object *pobj,
const struct lu_attr *pattr, const struct lu_name *lname,
- struct lu_fid* fid, int mask)
+ struct lu_fid *fid, unsigned int may_mask)
{
const char *name = lname->ln_name;
const struct dt_key *key = (const struct dt_key *)name;
PFID(mdd_object_fid(mdd_obj)));
}
- rc = mdd_permission_internal_locked(env, mdd_obj, pattr, mask,
+ rc = mdd_permission_internal_locked(env, mdd_obj, pattr, may_mask,
DT_TGT_PARENT);
if (rc)
RETURN(rc);
int __mdd_fix_mode_acl(const struct lu_env *env, struct lu_buf *buf,
__u32 *mode);
int __mdd_permission_internal(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_attr *la, int mask, int role);
+ const struct lu_attr *la, unsigned int may_mask,
+ int role);
int mdd_permission(const struct lu_env *env, struct md_object *pobj,
- struct md_object *cobj, struct md_attr *ma, int mask);
+ struct md_object *cobj, struct md_attr *ma,
+ unsigned int may_mask);
int mdd_generic_thread_start(struct mdd_generic_thread *thread,
int (*func)(void *), void *data, char *name);
void mdd_generic_thread_stop(struct mdd_generic_thread *thread);
static inline int mdd_permission_internal(const struct lu_env *env,
struct mdd_object *obj,
- const struct lu_attr *la, int mask)
+ const struct lu_attr *la,
+ unsigned int may_mask)
{
- return __mdd_permission_internal(env, obj, la, mask, -1);
+ return __mdd_permission_internal(env, obj, la, may_mask, -1);
}
static inline int mdd_permission_internal_locked(const struct lu_env *env,
struct mdd_object *obj,
const struct lu_attr *la,
- int mask,
+ unsigned int may_mask,
enum dt_object_role role)
{
- return __mdd_permission_internal(env, obj, la, mask, role);
+ return __mdd_permission_internal(env, obj, la, may_mask, role);
}
/* mdd inline func for calling osd_dt_object ops */
nc->do_ops->do_ah_init(env, hint, np, nc, attr->la_mode & S_IFMT);
}
-static int accmode(const struct lu_env *env, const struct lu_attr *la,
- u64 open_flags)
+static int mdd_accmode(const struct lu_env *env, const struct lu_attr *la,
+ u64 open_flags)
{
/* Sadly, NFSD reopens a file repeatedly during operation, so the
* "acc_mode = 0" allowance for newly-created files isn't honoured.
const struct lu_attr *attr, u64 open_flags,
int is_replay)
{
- int mode, rc;
+ unsigned int may_mask;
+ int rc;
ENTRY;
/* EEXIST check, also opening of *open* orphans is allowed so we can
if (S_ISLNK(attr->la_mode))
RETURN(-ELOOP);
- mode = accmode(env, attr, open_flags);
+ may_mask = mdd_accmode(env, attr, open_flags);
- if (S_ISDIR(attr->la_mode) && (mode & MAY_WRITE))
+ if (S_ISDIR(attr->la_mode) && (may_mask & MAY_WRITE))
RETURN(-EISDIR);
if (!(open_flags & MDS_OPEN_CREATED)) {
- rc = mdd_permission_internal(env, obj, attr, mode);
+ rc = mdd_permission_internal(env, obj, attr, may_mask);
if (rc)
RETURN(rc);
}
* Hold read_lock for obj.
*/
static int mdd_check_acl(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_attr *la, int mask)
+ const struct lu_attr *la, unsigned int may_mask)
{
#ifdef CONFIG_LUSTRE_FS_POSIX_ACL
struct lu_ucred *uc = lu_ucred_assert(env);
if (entry_count <= 0)
RETURN(-EAGAIN);
- rc = lustre_posix_acl_permission(uc, la, mask, entry, entry_count);
+ rc = lustre_posix_acl_permission(uc, la, may_mask, entry, entry_count);
RETURN(rc);
#else
ENTRY;
}
int __mdd_permission_internal(const struct lu_env *env, struct mdd_object *obj,
- const struct lu_attr *la, int mask, int role)
+ const struct lu_attr *la, unsigned int may_mask,
+ int role)
{
struct lu_ucred *uc = lu_ucred(env);
__u32 mode;
int rc;
ENTRY;
- if (mask == 0)
+ if (may_mask == 0)
RETURN(0);
/* These means unnecessary for permission check */
/*
* Nobody gets write access to an immutable file.
*/
- if ((mask & MAY_WRITE) && (la->la_valid & LA_FLAGS) &&
+ if ((may_mask & MAY_WRITE) && (la->la_valid & LA_FLAGS) &&
(la->la_flags & LUSTRE_IMMUTABLE_FL))
RETURN(-EACCES);
mode = la->la_mode;
if (uc->uc_fsuid == la->la_uid) {
mode >>= 6;
- } else {
- if (mode & S_IRWXG) {
- if (role != -1)
- mdd_read_lock(env, obj, role);
- rc = mdd_check_acl(env, obj, la, mask);
- if (role != -1)
- mdd_read_unlock(env, obj);
- if (rc == -EACCES)
- goto check_capabilities;
- else if ((rc != -EAGAIN) && (rc != -EOPNOTSUPP) &&
- (rc != -ENODATA))
- RETURN(rc);
- }
- if (lustre_in_group_p(uc, la->la_gid))
- mode >>= 3;
- }
-
- if (((mode & mask & S_IRWXO) == mask))
- RETURN(0);
+ } else {
+ if (mode & S_IRWXG) {
+ if (role != -1)
+ mdd_read_lock(env, obj, role);
+ rc = mdd_check_acl(env, obj, la, may_mask);
+ if (role != -1)
+ mdd_read_unlock(env, obj);
+ if (rc == -EACCES)
+ goto check_capabilities;
+ else if ((rc != -EAGAIN) && (rc != -EOPNOTSUPP) &&
+ (rc != -ENODATA))
+ RETURN(rc);
+ }
+ if (lustre_in_group_p(uc, la->la_gid))
+ mode >>= 3;
+ }
+
+ if (((mode & may_mask & S_IRWXO) == may_mask))
+ RETURN(0);
check_capabilities:
- if (!(mask & MAY_EXEC) ||
+ if (!(may_mask & MAY_EXEC) ||
(la->la_mode & S_IXUGO) || S_ISDIR(la->la_mode))
if (md_capable(uc, CAP_DAC_OVERRIDE))
RETURN(0);
- if ((mask == MAY_READ) ||
- (S_ISDIR(la->la_mode) && !(mask & MAY_WRITE)))
+ if ((may_mask == MAY_READ) ||
+ (S_ISDIR(la->la_mode) && !(may_mask & MAY_WRITE)))
if (md_capable(uc, CAP_DAC_READ_SEARCH))
RETURN(0);
}
int mdd_permission(const struct lu_env *env, struct md_object *pobj,
- struct md_object *cobj, struct md_attr *ma, int mask)
+ struct md_object *cobj, struct md_attr *ma,
+ unsigned int may_mask)
{
struct mdd_object *mdd_pobj = NULL;
struct mdd_object *mdd_cobj;
RETURN(rc);
rc = mdd_permission_internal_locked(env, mdd_cobj, cattr,
- mask & ~MAY_RGETFACL,
+ may_mask & ~MAY_RGETFACL,
DT_TGT_CHILD);
- if (unlikely(rc == 0 && (mask & MAY_RGETFACL))) {
+ if (unlikely(rc == 0 && (may_mask & MAY_RGETFACL))) {
if (likely(!uc))
uc = lu_ucred_assert(env);
* Check permission based on POSIX ACL.
*/
int lustre_posix_acl_permission(struct lu_ucred *mu, const struct lu_attr *la,
- int want, posix_acl_xattr_entry *entry,
- int count)
+ unsigned int may_mask,
+ posix_acl_xattr_entry *entry, int count)
{
posix_acl_xattr_entry *pa, *pe, *mask_obj;
posix_acl_xattr_entry ae, me;
+ __u16 acl_want;
int found = 0;
if (count <= 0)
return -EACCES;
+ /* There is implicit conversion between MAY_* modes and ACL_* modes.
+ * Don't bother explicitly converting them unless they actually change.
+ */
+ if (0) {
+ acl_want = (may_mask & MAY_READ ? ACL_READ : 0) |
+ (may_mask & MAY_WRITE ? ACL_WRITE : 0) |
+ (may_mask & MAY_EXEC ? ACL_EXECUTE : 0);
+ } else {
+ BUILD_BUG_ON(MAY_READ != ACL_READ);
+ BUILD_BUG_ON(MAY_WRITE != ACL_WRITE);
+ BUILD_BUG_ON(MAY_EXEC != ACL_EXECUTE);
+
+ acl_want = may_mask;
+ }
+
for (pa = &entry[0], pe = &entry[count - 1]; pa <= pe; pa++) {
lustre_posix_acl_le_to_cpu(&ae, pa);
switch (ae.e_tag) {
case ACL_GROUP_OBJ:
if (lustre_in_group_p(mu, la->la_gid)) {
found = 1;
- if ((ae.e_perm & want) == want)
+ if ((ae.e_perm & acl_want) == acl_want)
goto mask;
}
break;
case ACL_GROUP:
if (lustre_in_group_p(mu, ae.e_id)) {
found = 1;
- if ((ae.e_perm & want) == want)
+ if ((ae.e_perm & acl_want) == acl_want)
goto mask;
}
break;
for (mask_obj = pa + 1; mask_obj <= pe; mask_obj++) {
lustre_posix_acl_le_to_cpu(&me, mask_obj);
if (me.e_tag == ACL_MASK) {
- if ((ae.e_perm & me.e_perm & want) == want)
+ if ((ae.e_perm & me.e_perm & acl_want) == acl_want)
return 0;
return -EACCES;
}
check_perm:
- if ((ae.e_perm & want) == want)
+ if ((ae.e_perm & acl_want) == acl_want)
return 0;
return -EACCES;
{
posix_acl_xattr_entry *group_obj = NULL, *mask_obj = NULL, *pa, *pe;
+ /* There is implicit conversion between S_IRWX modes and ACL_* modes.
+ * Don't bother explicitly converting them unless they actually change.
+ */
+ BUILD_BUG_ON(S_IROTH != ACL_READ);
+ BUILD_BUG_ON(S_IWOTH != ACL_WRITE);
+ BUILD_BUG_ON(S_IXOTH != ACL_EXECUTE);
+
for (pa = &entry[0], pe = &entry[count - 1]; pa <= pe; pa++) {
switch (le16_to_cpu(pa->e_tag)) {
case ACL_USER_OBJ: