4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/acl.c
38 * Lustre Access Control List.
40 * Author: Fan Yong <fanyong@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_SEC
45 #include <lustre_acl.h>
46 #include <lustre_eacl.h>
47 #include <obd_support.h>
49 #ifdef CONFIG_FS_POSIX_ACL
51 #define CFS_ACL_XATTR_VERSION POSIX_ACL_XATTR_VERSION
54 ES_UNK = 0, /* unknown stat */
55 ES_UNC = 1, /* ACL entry is not changed */
56 ES_MOD = 2, /* ACL entry is modified */
57 ES_ADD = 3, /* ACL entry is added */
58 ES_DEL = 4 /* ACL entry is deleted */
61 static inline void lustre_ext_acl_le_to_cpu(ext_acl_xattr_entry *d,
62 ext_acl_xattr_entry *s)
64 d->e_tag = le16_to_cpu(s->e_tag);
65 d->e_perm = le16_to_cpu(s->e_perm);
66 d->e_id = le32_to_cpu(s->e_id);
67 d->e_stat = le32_to_cpu(s->e_stat);
70 static inline void lustre_ext_acl_cpu_to_le(ext_acl_xattr_entry *d,
71 ext_acl_xattr_entry *s)
73 d->e_tag = cpu_to_le16(s->e_tag);
74 d->e_perm = cpu_to_le16(s->e_perm);
75 d->e_id = cpu_to_le32(s->e_id);
76 d->e_stat = cpu_to_le32(s->e_stat);
79 static inline void lustre_posix_acl_le_to_cpu(posix_acl_xattr_entry *d,
80 posix_acl_xattr_entry *s)
82 d->e_tag = le16_to_cpu(s->e_tag);
83 d->e_perm = le16_to_cpu(s->e_perm);
84 d->e_id = le32_to_cpu(s->e_id);
87 static inline void lustre_posix_acl_cpu_to_le(posix_acl_xattr_entry *d,
88 posix_acl_xattr_entry *s)
90 d->e_tag = cpu_to_le16(s->e_tag);
91 d->e_perm = cpu_to_le16(s->e_perm);
92 d->e_id = cpu_to_le32(s->e_id);
96 * Check permission based on POSIX ACL.
98 int lustre_posix_acl_permission(struct lu_ucred *mu, struct lu_attr *la,
99 int want, posix_acl_xattr_entry *entry,
102 posix_acl_xattr_entry *pa, *pe, *mask_obj;
103 posix_acl_xattr_entry ae, me;
109 for (pa = &entry[0], pe = &entry[count - 1]; pa <= pe; pa++) {
110 lustre_posix_acl_le_to_cpu(&ae, pa);
113 /* (May have been checked already) */
114 if (la->la_uid == mu->uc_fsuid)
118 if (ae.e_id == mu->uc_fsuid)
122 if (lustre_in_group_p(mu, la->la_gid)) {
124 if ((ae.e_perm & want) == want)
129 if (lustre_in_group_p(mu, ae.e_id)) {
131 if ((ae.e_perm & want) == want)
149 for (mask_obj = pa + 1; mask_obj <= pe; mask_obj++) {
150 lustre_posix_acl_le_to_cpu(&me, mask_obj);
151 if (me.e_tag == ACL_MASK) {
152 if ((ae.e_perm & me.e_perm & want) == want)
160 if ((ae.e_perm & want) == want)
165 EXPORT_SYMBOL(lustre_posix_acl_permission);
168 * Modify the ACL for the chmod.
170 int lustre_posix_acl_chmod_masq(posix_acl_xattr_entry *entry, __u32 mode,
173 posix_acl_xattr_entry *group_obj = NULL, *mask_obj = NULL, *pa, *pe;
175 for (pa = &entry[0], pe = &entry[count - 1]; pa <= pe; pa++) {
176 switch (le16_to_cpu(pa->e_tag)) {
178 pa->e_perm = cpu_to_le16((mode & S_IRWXU) >> 6);
190 pa->e_perm = cpu_to_le16(mode & S_IRWXO);
198 mask_obj->e_perm = cpu_to_le16((mode & S_IRWXG) >> 3);
202 group_obj->e_perm = cpu_to_le16((mode & S_IRWXG) >> 3);
207 EXPORT_SYMBOL(lustre_posix_acl_chmod_masq);
210 * Returns 0 if the acl can be exactly represented in the traditional
211 * file mode permission bits, or else 1. Returns -E... on error.
214 lustre_posix_acl_equiv_mode(posix_acl_xattr_entry *entry, mode_t *mode_p,
217 posix_acl_xattr_entry *pa, *pe;
221 for (pa = &entry[0], pe = &entry[count - 1]; pa <= pe; pa++) {
222 __u16 perm = le16_to_cpu(pa->e_perm);
223 switch (le16_to_cpu(pa->e_tag)) {
225 mode |= (perm & S_IRWXO) << 6;
228 mode |= (perm & S_IRWXO) << 3;
231 mode |= perm & S_IRWXO;
234 mode = (mode & ~S_IRWXG) |
235 ((perm & S_IRWXO) << 3);
247 *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
250 EXPORT_SYMBOL(lustre_posix_acl_equiv_mode);
253 * Modify acl when creating a new object.
255 int lustre_posix_acl_create_masq(posix_acl_xattr_entry *entry, __u32 *pmode,
258 posix_acl_xattr_entry *group_obj = NULL, *mask_obj = NULL, *pa, *pe;
259 posix_acl_xattr_entry ae;
263 for (pa = &entry[0], pe = &entry[count - 1]; pa <= pe; pa++) {
264 lustre_posix_acl_le_to_cpu(&ae, pa);
267 ae.e_perm &= (mode >> 6) | ~S_IRWXO;
268 pa->e_perm = cpu_to_le16(ae.e_perm);
269 mode &= (ae.e_perm << 6) | ~S_IRWXU;
279 ae.e_perm &= mode | ~S_IRWXO;
280 pa->e_perm = cpu_to_le16(ae.e_perm);
281 mode &= ae.e_perm | ~S_IRWXO;
293 ae.e_perm = le16_to_cpu(mask_obj->e_perm) &
294 ((mode >> 3) | ~S_IRWXO);
295 mode &= (ae.e_perm << 3) | ~S_IRWXG;
296 mask_obj->e_perm = cpu_to_le16(ae.e_perm);
300 ae.e_perm = le16_to_cpu(group_obj->e_perm) &
301 ((mode >> 3) | ~S_IRWXO);
302 mode &= (ae.e_perm << 3) | ~S_IRWXG;
303 group_obj->e_perm = cpu_to_le16(ae.e_perm);
306 *pmode = (*pmode & ~S_IRWXUGO) | mode;
309 EXPORT_SYMBOL(lustre_posix_acl_create_masq);
311 /* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
312 static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
313 int old_count, int new_count)
315 int old_size = CFS_ACL_XATTR_SIZE(old_count, posix_acl_xattr);
316 int new_size = CFS_ACL_XATTR_SIZE(new_count, posix_acl_xattr);
317 posix_acl_xattr_header *new;
319 if (unlikely(old_count <= new_count))
322 OBD_ALLOC(new, new_size);
323 if (unlikely(new == NULL))
326 memcpy(new, *header, new_size);
327 OBD_FREE(*header, old_size);
332 /* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */
333 static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
336 int ext_count = le32_to_cpu((*header)->a_count);
337 int ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
338 int old_size = CFS_ACL_XATTR_SIZE(old_count, ext_acl_xattr);
339 ext_acl_xattr_header *new;
341 if (unlikely(old_count <= ext_count))
344 OBD_ALLOC(new, ext_size);
345 if (unlikely(new == NULL))
348 memcpy(new, *header, ext_size);
349 OBD_FREE(*header, old_size);
355 * Generate new extended ACL based on the posix ACL.
357 ext_acl_xattr_header *
358 lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
361 ext_acl_xattr_header *new;
364 if (unlikely(size < 0))
365 RETURN(ERR_PTR(-EINVAL));
369 count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
370 esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
371 OBD_ALLOC(new, esize);
372 if (unlikely(new == NULL))
373 RETURN(ERR_PTR(-ENOMEM));
375 new->a_count = cpu_to_le32(count);
376 for (i = 0; i < count; i++) {
377 new->a_entries[i].e_tag = header->a_entries[i].e_tag;
378 new->a_entries[i].e_perm = header->a_entries[i].e_perm;
379 new->a_entries[i].e_id = header->a_entries[i].e_id;
380 new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
385 EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
388 * Filter out the "nobody" entries in the posix ACL.
390 int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
391 posix_acl_xattr_header **out)
393 int count, i, j, rc = 0;
395 posix_acl_xattr_header *new;
398 if (unlikely(size < 0))
403 OBD_ALLOC(new, size);
404 if (unlikely(new == NULL))
407 new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
408 count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
409 for (i = 0, j = 0; i < count; i++) {
410 id = le32_to_cpu(header->a_entries[i].e_id);
411 switch (le16_to_cpu(header->a_entries[i].e_tag)) {
416 if (id != ACL_UNDEFINED_ID)
417 GOTO(_out, rc = -EIO);
419 memcpy(&new->a_entries[j++], &header->a_entries[i],
420 sizeof(posix_acl_xattr_entry));
423 if (id != NOBODY_UID)
424 memcpy(&new->a_entries[j++],
425 &header->a_entries[i],
426 sizeof(posix_acl_xattr_entry));
429 if (id != NOBODY_GID)
430 memcpy(&new->a_entries[j++],
431 &header->a_entries[i],
432 sizeof(posix_acl_xattr_entry));
435 GOTO(_out, rc = -EIO);
439 /* free unused space. */
440 rc = lustre_posix_acl_xattr_reduce_space(&new, count, j);
455 EXPORT_SYMBOL(lustre_posix_acl_xattr_filter);
458 * Convert server-side uid/gid in the posix ACL items to the client-side ones.
461 * nothing to be converted.
463 * mapped ids are converted to client-side ones,
464 * unmapped ones are converted to "nobody".
466 * only mapped ids are converted to "nobody".
468 * only unmapped ids are converted to "nobody".
470 int lustre_posix_acl_xattr_id2client(struct lu_ucred *mu,
471 struct lustre_idmap_table *t,
472 posix_acl_xattr_header *header,
479 if (unlikely(size < 0))
484 if (unlikely(flags == CFS_IC_NOTHING))
487 count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
488 for (i = 0; i < count; i++) {
489 id = le32_to_cpu(header->a_entries[i].e_id);
490 switch (le16_to_cpu(header->a_entries[i].e_tag)) {
495 if (id != ACL_UNDEFINED_ID)
499 id = lustre_idmap_lookup_uid(mu, t, 1, id);
500 if (flags == CFS_IC_ALL) {
501 if (id == CFS_IDMAP_NOTFOUND)
503 header->a_entries[i].e_id = cpu_to_le32(id);
504 } else if (flags == CFS_IC_MAPPED) {
505 if (id != CFS_IDMAP_NOTFOUND)
506 header->a_entries[i].e_id =
507 cpu_to_le32(NOBODY_UID);
508 } else if (flags == CFS_IC_UNMAPPED) {
509 if (id == CFS_IDMAP_NOTFOUND)
510 header->a_entries[i].e_id =
511 cpu_to_le32(NOBODY_UID);
515 id = lustre_idmap_lookup_gid(mu, t, 1, id);
516 if (flags == CFS_IC_ALL) {
517 if (id == CFS_IDMAP_NOTFOUND)
519 header->a_entries[i].e_id = cpu_to_le32(id);
520 } else if (flags == CFS_IC_MAPPED) {
521 if (id != CFS_IDMAP_NOTFOUND)
522 header->a_entries[i].e_id =
523 cpu_to_le32(NOBODY_GID);
524 } else if (flags == CFS_IC_UNMAPPED) {
525 if (id == CFS_IDMAP_NOTFOUND)
526 header->a_entries[i].e_id =
527 cpu_to_le32(NOBODY_GID);
536 EXPORT_SYMBOL(lustre_posix_acl_xattr_id2client);
539 * Release the posix ACL space.
541 void lustre_posix_acl_xattr_free(posix_acl_xattr_header *header, int size)
543 OBD_FREE(header, size);
545 EXPORT_SYMBOL(lustre_posix_acl_xattr_free);
548 * Converts client-side uid/gid in the extended ACL items to server-side ones.
550 * mapped ids are converted to server-side ones,
551 * unmapped ones cause "EPERM" error.
553 int lustre_ext_acl_xattr_id2server(struct lu_ucred *mu,
554 struct lustre_idmap_table *t,
555 ext_acl_xattr_header *header)
558 int i, count = le32_to_cpu(header->a_count);
562 for (i = 0; i < count; i++) {
563 id = le32_to_cpu(header->a_entries[i].e_id);
564 switch (le16_to_cpu(header->a_entries[i].e_tag)) {
569 if (id != ACL_UNDEFINED_ID)
573 id = lustre_idmap_lookup_uid(mu, t, 0, id);
574 if (id == CFS_IDMAP_NOTFOUND)
577 header->a_entries[i].e_id = cpu_to_le32(id);
580 id = lustre_idmap_lookup_gid(mu, t, 0, id);
581 if (id == CFS_IDMAP_NOTFOUND)
584 header->a_entries[i].e_id = cpu_to_le32(id);
592 EXPORT_SYMBOL(lustre_ext_acl_xattr_id2server);
595 * Release the extended ACL space.
597 void lustre_ext_acl_xattr_free(ext_acl_xattr_header *header)
599 OBD_FREE(header, CFS_ACL_XATTR_SIZE(le32_to_cpu(header->a_count), \
602 EXPORT_SYMBOL(lustre_ext_acl_xattr_free);
604 static ext_acl_xattr_entry *
605 lustre_ext_acl_xattr_search(ext_acl_xattr_header *header,
606 posix_acl_xattr_entry *entry, int *pos)
608 int once, start, end, i, j, count = le32_to_cpu(header->a_count);
615 for (i = start; i < end; i++) {
616 if (header->a_entries[i].e_tag == entry->e_tag &&
617 header->a_entries[i].e_id == entry->e_id) {
622 return &header->a_entries[j];
637 * Merge the posix ACL and the extended ACL into new posix ACL.
639 int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
640 ext_acl_xattr_header *ext_header,
641 posix_acl_xattr_header **out)
643 int posix_count, posix_size, i, j;
644 int ext_count = le32_to_cpu(ext_header->a_count), pos = 0, rc = 0;
645 posix_acl_xattr_entry pe = {ACL_MASK, 0, ACL_UNDEFINED_ID};
646 posix_acl_xattr_header *new;
647 ext_acl_xattr_entry *ee, ae;
650 lustre_posix_acl_cpu_to_le(&pe, &pe);
651 ee = lustre_ext_acl_xattr_search(ext_header, &pe, &pos);
652 if (ee == NULL || le32_to_cpu(ee->e_stat) == ES_DEL) {
653 /* there are only base ACL entries at most. */
655 posix_size = CFS_ACL_XATTR_SIZE(posix_count, posix_acl_xattr);
656 OBD_ALLOC(new, posix_size);
657 if (unlikely(new == NULL))
660 new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
661 for (i = 0, j = 0; i < ext_count; i++) {
662 lustre_ext_acl_le_to_cpu(&ae,
663 &ext_header->a_entries[i]);
668 if (ae.e_id != ACL_UNDEFINED_ID)
669 GOTO(_out, rc = -EIO);
671 if (ae.e_stat != ES_DEL) {
672 new->a_entries[j].e_tag =
673 ext_header->a_entries[i].e_tag;
674 new->a_entries[j].e_perm =
675 ext_header->a_entries[i].e_perm;
676 new->a_entries[j++].e_id =
677 ext_header->a_entries[i].e_id;
683 if (ae.e_stat == ES_DEL)
686 GOTO(_out, rc = -EIO);
690 /* maybe there are valid ACL_USER or ACL_GROUP entries in the
691 * original server-side ACL, they are regarded as ES_UNC stat.*/
694 if (unlikely(size < 0))
700 CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
701 posix_count = ori_posix_count + ext_count;
703 CFS_ACL_XATTR_SIZE(posix_count, posix_acl_xattr);
704 OBD_ALLOC(new, posix_size);
705 if (unlikely(new == NULL))
708 new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
709 /* 1. process the unchanged ACL entries
710 * in the original server-side ACL. */
712 for (i = 0, j = 0; i < ori_posix_count; i++) {
713 ee = lustre_ext_acl_xattr_search(ext_header,
714 &posix_header->a_entries[i], &pos);
716 memcpy(&new->a_entries[j++],
717 &posix_header->a_entries[i],
718 sizeof(posix_acl_xattr_entry));
721 /* 2. process the non-deleted entries
722 * from client-side extended ACL. */
723 for (i = 0; i < ext_count; i++) {
724 if (le16_to_cpu(ext_header->a_entries[i].e_stat) !=
726 new->a_entries[j].e_tag =
727 ext_header->a_entries[i].e_tag;
728 new->a_entries[j].e_perm =
729 ext_header->a_entries[i].e_perm;
730 new->a_entries[j++].e_id =
731 ext_header->a_entries[i].e_id;
736 /* free unused space. */
737 rc = lustre_posix_acl_xattr_reduce_space(&new, posix_count, j);
747 OBD_FREE(new, posix_size);
752 EXPORT_SYMBOL(lustre_acl_xattr_merge2posix);
755 * Merge the posix ACL and the extended ACL into new extended ACL.
757 ext_acl_xattr_header *
758 lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
759 ext_acl_xattr_header *ext_header)
761 int ori_ext_count, posix_count, ext_count, ext_size;
762 int i, j, pos = 0, rc = 0;
763 posix_acl_xattr_entry pae;
764 ext_acl_xattr_header *new;
765 ext_acl_xattr_entry *ee, eae;
768 if (unlikely(size < 0))
769 RETURN(ERR_PTR(-EINVAL));
773 posix_count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
774 ori_ext_count = le32_to_cpu(ext_header->a_count);
775 ext_count = posix_count + ori_ext_count;
776 ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
778 OBD_ALLOC(new, ext_size);
779 if (unlikely(new == NULL))
780 RETURN(ERR_PTR(-ENOMEM));
782 for (i = 0, j = 0; i < posix_count; i++) {
783 lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
789 if (pae.e_id != ACL_UNDEFINED_ID)
790 GOTO(out, rc = -EIO);
792 /* ignore "nobody" entry. */
793 if (pae.e_id == NOBODY_UID)
796 new->a_entries[j].e_tag =
797 posix_header->a_entries[i].e_tag;
798 new->a_entries[j].e_perm =
799 posix_header->a_entries[i].e_perm;
800 new->a_entries[j].e_id =
801 posix_header->a_entries[i].e_id;
802 ee = lustre_ext_acl_xattr_search(ext_header,
803 &posix_header->a_entries[i], &pos);
805 if (posix_header->a_entries[i].e_perm !=
807 /* entry modified. */
809 new->a_entries[j++].e_stat =
812 /* entry unchanged. */
814 new->a_entries[j++].e_stat =
818 new->a_entries[j++].e_stat =
823 /* ignore "nobody" entry. */
824 if (pae.e_id == NOBODY_GID)
826 new->a_entries[j].e_tag =
827 posix_header->a_entries[i].e_tag;
828 new->a_entries[j].e_perm =
829 posix_header->a_entries[i].e_perm;
830 new->a_entries[j].e_id =
831 posix_header->a_entries[i].e_id;
832 ee = lustre_ext_acl_xattr_search(ext_header,
833 &posix_header->a_entries[i], &pos);
835 if (posix_header->a_entries[i].e_perm !=
837 /* entry modified. */
839 new->a_entries[j++].e_stat =
842 /* entry unchanged. */
844 new->a_entries[j++].e_stat =
848 new->a_entries[j++].e_stat =
853 GOTO(out, rc = -EIO);
857 /* process deleted entries. */
858 for (i = 0; i < ori_ext_count; i++) {
859 lustre_ext_acl_le_to_cpu(&eae, &ext_header->a_entries[i]);
860 if (eae.e_stat == ES_UNK) {
861 /* ignore "nobody" entry. */
862 if ((eae.e_tag == ACL_USER && eae.e_id == NOBODY_UID) ||
863 (eae.e_tag == ACL_GROUP && eae.e_id == NOBODY_GID))
866 new->a_entries[j].e_tag =
867 ext_header->a_entries[i].e_tag;
868 new->a_entries[j].e_perm =
869 ext_header->a_entries[i].e_perm;
870 new->a_entries[j].e_id = ext_header->a_entries[i].e_id;
871 new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL);
875 new->a_count = cpu_to_le32(j);
876 /* free unused space. */
877 rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
882 OBD_FREE(new, ext_size);
887 EXPORT_SYMBOL(lustre_acl_xattr_merge2ext);