1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_MDS
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/version.h>
32 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
33 # include <linux/locks.h> // for wait_on_buffer
35 # include <linux/buffer_head.h> // for wait_on_buffer
37 #include <linux/unistd.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46 #include <asm/segment.h>
48 #include <linux/obd_support.h>
49 #include <linux/lustre_lib.h>
50 #include <linux/lustre_sec.h>
51 #include <linux/lustre_ucache.h>
52 #include "mds_internal.h"
54 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
55 struct group_info *groups_alloc(int ngroups)
57 struct group_info *ginfo;
59 LASSERT(ngroups <= NGROUPS_SMALL);
61 OBD_ALLOC(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
64 ginfo->ngroups = ngroups;
66 ginfo->blocks[0] = ginfo->small_block;
67 atomic_set(&ginfo->usage, 1);
72 void groups_free(struct group_info *ginfo)
74 LASSERT(ginfo->ngroups <= NGROUPS_SMALL);
75 LASSERT(ginfo->nblocks == 1);
76 LASSERT(ginfo->blocks[0] == ginfo->small_block);
78 OBD_FREE(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
81 /* for 2.4 the group number is small, so simply search the
84 int groups_search(struct group_info *ginfo, gid_t grp)
91 for (i = 0; i < ginfo->ngroups; i++)
92 if (GROUP_AT(ginfo, i) == grp)
99 void groups_sort(struct group_info *ginfo)
101 int base, max, stride;
102 int gidsetsize = ginfo->ngroups;
104 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
109 max = gidsetsize - stride;
110 for (base = 0; base < max; base++) {
112 int right = left + stride;
113 gid_t tmp = GROUP_AT(ginfo, right);
115 while (left >= 0 && GROUP_AT(ginfo, left) > tmp) {
116 GROUP_AT(ginfo, right) =
117 GROUP_AT(ginfo, left);
121 GROUP_AT(ginfo, right) = tmp;
127 int groups_search(struct group_info *ginfo, gid_t grp)
135 right = ginfo->ngroups;
136 while (left < right) {
137 int mid = (left + right) / 2;
138 int cmp = grp - GROUP_AT(ginfo, mid);
150 void groups_from_buffer(struct group_info *ginfo, __u32 *gids)
152 int i, ngroups = ginfo->ngroups;
154 for (i = 0; i < ginfo->nblocks; i++) {
155 int count = min(NGROUPS_PER_BLOCK, ngroups);
157 memcpy(ginfo->blocks[i], gids, count * sizeof(__u32));
158 gids += NGROUPS_PER_BLOCK;
163 void mds_pack_dentry2id(struct obd_device *obd,
164 struct lustre_id *id,
165 struct dentry *dentry,
168 id_ino(id) = dentry->d_inum;
169 id_gen(id) = dentry->d_generation;
172 id_fid(id) = dentry->d_fid;
173 id_group(id) = dentry->d_mdsnum;
177 void mds_pack_dentry2body(struct obd_device *obd,
179 struct dentry *dentry,
182 b->valid |= OBD_MD_FLID | OBD_MD_FLGENER |
186 b->valid |= OBD_MD_FID;
188 mds_pack_dentry2id(obd, &b->id1, dentry, fid);
191 int mds_pack_inode2id(struct obd_device *obd,
192 struct lustre_id *id,
200 /* we have to avoid deadlock. */
201 if (!down_trylock(&inode->i_sem)) {
202 rc = mds_read_inode_sid(obd, inode, id);
205 rc = mds_read_inode_sid(obd, inode, id);
210 id_ino(id) = inode->i_ino;
211 id_gen(id) = inode->i_generation;
212 id_type(id) = (S_IFMT & inode->i_mode);
217 /* Note that we can copy all of the fields, just some will not be "valid" */
218 void mds_pack_inode2body(struct obd_device *obd, struct mds_body *b,
219 struct inode *inode, int fid)
221 b->valid |= OBD_MD_FLID | OBD_MD_FLCTIME | OBD_MD_FLUID |
222 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLTYPE |
223 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLGENER |
224 OBD_MD_FLATIME | OBD_MD_FLMTIME; /* bug 2020 */
226 if (!S_ISREG(inode->i_mode)) {
227 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
228 OBD_MD_FLATIME | OBD_MD_FLMTIME |
231 b->atime = LTIME_S(inode->i_atime);
232 b->mtime = LTIME_S(inode->i_mtime);
233 b->ctime = LTIME_S(inode->i_ctime);
234 b->mode = inode->i_mode;
235 b->size = inode->i_size;
236 b->blocks = inode->i_blocks;
237 b->uid = inode->i_uid;
238 b->gid = inode->i_gid;
239 b->flags = inode->i_flags;
240 b->rdev = inode->i_rdev;
242 /* Return the correct link count for orphan inodes */
243 if (mds_inode_is_orphan(inode)) {
245 } else if (S_ISDIR(inode->i_mode)) {
248 b->nlink = inode->i_nlink;
252 b->valid |= OBD_MD_FID;
254 mds_pack_inode2id(obd, &b->id1, inode, fid);
258 static int mds_setattr_unpack(struct ptlrpc_request *req, int offset,
259 struct mds_update_record *r)
261 struct iattr *attr = &r->ur_iattr;
262 struct mds_rec_setattr *rec;
265 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
266 lustre_swab_mds_rec_setattr);
270 r->ur_id1 = &rec->sa_id;
271 attr->ia_valid = rec->sa_valid;
272 attr->ia_mode = rec->sa_mode;
273 attr->ia_uid = rec->sa_uid;
274 attr->ia_gid = rec->sa_gid;
275 attr->ia_size = rec->sa_size;
276 LTIME_S(attr->ia_atime) = rec->sa_atime;
277 LTIME_S(attr->ia_mtime) = rec->sa_mtime;
278 LTIME_S(attr->ia_ctime) = rec->sa_ctime;
279 attr->ia_attr_flags = rec->sa_attr_flags;
281 LASSERT_REQSWAB (req, offset + 1);
282 if (req->rq_reqmsg->bufcount > offset + 1) {
283 r->ur_eadata = lustre_msg_buf (req->rq_reqmsg,
285 if (r->ur_eadata == NULL)
287 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 1];
290 if (req->rq_reqmsg->bufcount > offset + 2) {
291 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
292 if (r->ur_ea2data == NULL)
295 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 2];
301 static int mds_create_unpack(struct ptlrpc_request *req, int offset,
302 struct mds_update_record *r)
304 struct mds_rec_create *rec;
307 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
308 lustre_swab_mds_rec_create);
312 r->ur_id1 = &rec->cr_id;
313 r->ur_id2 = &rec->cr_replayid;
314 r->ur_mode = rec->cr_mode;
315 r->ur_rdev = rec->cr_rdev;
316 r->ur_time = rec->cr_time;
317 r->ur_flags = rec->cr_flags;
319 LASSERT_REQSWAB (req, offset + 1);
320 r->ur_name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
321 if (r->ur_name == NULL)
323 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
325 LASSERT_REQSWAB (req, offset + 2);
326 if (req->rq_reqmsg->bufcount > offset + 2) {
327 if (S_ISLNK(r->ur_mode)) {
328 r->ur_tgt = lustre_msg_string(req->rq_reqmsg,
330 if (r->ur_tgt == NULL)
332 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
333 } else if (S_ISDIR(r->ur_mode) ) {
334 /* Stripe info for mkdir - just a 16bit integer */
335 if (req->rq_reqmsg->buflens[offset + 2] != 2) {
336 CERROR("mkdir stripe info does not match "
337 "expected size %d vs 2\n",
338 req->rq_reqmsg->buflens[offset + 2]);
341 r->ur_eadata = lustre_swab_buf (req->rq_reqmsg,
342 offset + 2, 2, __swab16s);
343 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
344 } else if (S_ISREG(r->ur_mode)){
345 r->ur_eadata = lustre_msg_buf (req->rq_reqmsg,
347 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
349 /* Hm, no other users so far? */
356 static int mds_link_unpack(struct ptlrpc_request *req, int offset,
357 struct mds_update_record *r)
359 struct mds_rec_link *rec;
362 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
363 lustre_swab_mds_rec_link);
367 r->ur_id1 = &rec->lk_id1;
368 r->ur_id2 = &rec->lk_id2;
369 r->ur_time = rec->lk_time;
371 LASSERT_REQSWAB (req, offset + 1);
372 r->ur_name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
373 if (r->ur_name == NULL)
375 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
379 static int mds_unlink_unpack(struct ptlrpc_request *req, int offset,
380 struct mds_update_record *r)
382 struct mds_rec_unlink *rec;
385 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
386 lustre_swab_mds_rec_unlink);
390 r->ur_mode = rec->ul_mode;
391 r->ur_id1 = &rec->ul_id1;
392 r->ur_id2 = &rec->ul_id2;
393 r->ur_time = rec->ul_time;
395 LASSERT_REQSWAB (req, offset + 1);
396 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
397 if (r->ur_name == NULL)
399 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
403 static int mds_rename_unpack(struct ptlrpc_request *req, int offset,
404 struct mds_update_record *r)
406 struct mds_rec_rename *rec;
409 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
410 lustre_swab_mds_rec_rename);
414 r->ur_id1 = &rec->rn_id1;
415 r->ur_id2 = &rec->rn_id2;
416 r->ur_time = rec->rn_time;
418 LASSERT_REQSWAB (req, offset + 1);
419 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
420 if (r->ur_name == NULL)
422 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
424 LASSERT_REQSWAB (req, offset + 2);
425 r->ur_tgt = lustre_msg_string(req->rq_reqmsg, offset + 2, 0);
426 if (r->ur_tgt == NULL)
428 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
432 static int mds_open_unpack(struct ptlrpc_request *req, int offset,
433 struct mds_update_record *r)
435 struct mds_rec_create *rec;
438 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
439 lustre_swab_mds_rec_create);
443 r->ur_id1 = &rec->cr_id;
444 r->ur_id2 = &rec->cr_replayid;
445 r->ur_mode = rec->cr_mode;
446 r->ur_rdev = rec->cr_rdev;
447 r->ur_time = rec->cr_time;
448 r->ur_flags = rec->cr_flags;
450 LASSERT_REQSWAB (req, offset + 1);
451 r->ur_name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
452 if (r->ur_name == NULL)
454 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
456 LASSERT_REQSWAB (req, offset + 2);
457 if (req->rq_reqmsg->bufcount > offset + 2) {
458 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
459 if (r->ur_eadata == NULL)
461 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
466 typedef int (*update_unpacker)(struct ptlrpc_request *req, int offset,
467 struct mds_update_record *r);
469 static update_unpacker mds_unpackers[REINT_MAX + 1] = {
470 [REINT_SETATTR] mds_setattr_unpack,
471 [REINT_CREATE] mds_create_unpack,
472 [REINT_LINK] mds_link_unpack,
473 [REINT_UNLINK] mds_unlink_unpack,
474 [REINT_RENAME] mds_rename_unpack,
475 [REINT_OPEN] mds_open_unpack,
478 int mds_update_unpack(struct ptlrpc_request *req, int offset,
479 struct mds_update_record *rec)
487 * NB don't lustre_swab_reqbuf() here. We're just taking a peek and we
488 * want to leave it to the specific unpacker once we've identified the
491 opcodep = lustre_msg_buf (req->rq_reqmsg, offset, sizeof(*opcodep));
496 if (lustre_msg_swabbed (req->rq_reqmsg))
499 if (opcode > REINT_MAX ||
500 mds_unpackers[opcode] == NULL) {
501 CERROR ("Unexpected opcode %d\n", opcode);
507 rec->ur_opcode = opcode;
509 rc = mds_unpackers[opcode](req, offset, rec);
512 rec->ur_fsuid = req->rq_uid;
518 * here we take simple rule: once uid/fsuid is root, we also squash
519 * the gid/fsgid, don't care setuid/setgid attributes.
522 int mds_squash_root(struct mds_obd *mds, struct mds_req_sec_desc *rsd,
525 if (!mds->mds_squash_uid || *peernid == mds->mds_nosquash_nid)
528 if (rsd->rsd_uid && rsd->rsd_fsuid)
531 CDEBUG(D_SEC, "squash req from "LPX64":"
532 "(%u:%u-%u:%u/%x)=>(%u:%u-%u:%u/%x)\n", *peernid,
533 rsd->rsd_uid, rsd->rsd_gid,
534 rsd->rsd_fsuid, rsd->rsd_fsgid, rsd->rsd_cap,
535 rsd->rsd_uid ? rsd->rsd_uid : mds->mds_squash_uid,
536 rsd->rsd_uid ? rsd->rsd_gid : mds->mds_squash_gid,
537 rsd->rsd_fsuid ? rsd->rsd_fsuid : mds->mds_squash_uid,
538 rsd->rsd_fsuid ? rsd->rsd_fsgid : mds->mds_squash_gid,
539 rsd->rsd_cap & ~CAP_FS_MASK);
541 if (rsd->rsd_uid == 0) {
542 rsd->rsd_uid = mds->mds_squash_uid;
543 rsd->rsd_gid = mds->mds_squash_gid;
545 if (rsd->rsd_fsuid == 0) {
546 rsd->rsd_fsuid = mds->mds_squash_uid;
547 rsd->rsd_fsgid = mds->mds_squash_gid;
549 rsd->rsd_cap &= ~CAP_FS_MASK;
554 /********************************
555 * MDS uid/gid mapping handling *
556 ********************************/
559 struct mds_idmap_entry* idmap_alloc_entry(__u32 rmt_id, __u32 lcl_id)
561 struct mds_idmap_entry *e;
563 OBD_ALLOC(e, sizeof(*e));
567 INIT_LIST_HEAD(&e->rmt_hash);
568 INIT_LIST_HEAD(&e->lcl_hash);
569 atomic_set(&e->refcount, 1);
576 void idmap_free_entry(struct mds_idmap_entry *e)
578 if (!list_empty(&e->rmt_hash))
579 list_del(&e->rmt_hash);
580 if (!list_empty(&e->lcl_hash))
581 list_del(&e->lcl_hash);
582 OBD_FREE(e, sizeof(*e));
586 int idmap_insert_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
587 struct mds_idmap_entry *new, const char *warn_msg)
589 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(new->rmt_id)];
590 struct list_head *lcl_head = &lcl_hash[MDS_IDMAP_HASHFUNC(new->lcl_id)];
591 struct mds_idmap_entry *e;
593 list_for_each_entry(e, rmt_head, rmt_hash) {
594 if (e->rmt_id == new->rmt_id &&
595 e->lcl_id == new->lcl_id) {
596 atomic_inc(&e->refcount);
599 if (e->rmt_id == new->rmt_id && warn_msg)
600 CWARN("%s: rmt id %u already map to %u (new %u)\n",
601 warn_msg, e->rmt_id, e->lcl_id, new->lcl_id);
602 if (e->lcl_id == new->lcl_id && warn_msg)
603 CWARN("%s: lcl id %u already be mapped from %u "
604 "(new %u)\n", warn_msg,
605 e->lcl_id, e->rmt_id, new->rmt_id);
608 list_add_tail(rmt_head, &new->rmt_hash);
609 list_add_tail(lcl_head, &new->lcl_hash);
614 int idmap_remove_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
615 __u32 rmt_id, __u32 lcl_id)
617 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(rmt_id)];
618 struct mds_idmap_entry *e;
620 list_for_each_entry(e, rmt_head, rmt_hash) {
621 if (e->rmt_id == rmt_id && e->lcl_id == lcl_id) {
622 if (atomic_dec_and_test(&e->refcount)) {
623 list_del(&e->rmt_hash);
624 list_del(&e->lcl_hash);
625 OBD_FREE(e, sizeof(*e));
634 int mds_idmap_add(struct mds_idmap_table *tbl,
635 uid_t rmt_uid, uid_t lcl_uid,
636 gid_t rmt_gid, gid_t lcl_gid)
638 struct mds_idmap_entry *ue, *ge;
644 ue = idmap_alloc_entry(rmt_uid, lcl_uid);
647 ge = idmap_alloc_entry(rmt_gid, lcl_gid);
649 idmap_free_entry(ue);
653 spin_lock(&tbl->mit_lock);
655 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
656 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
657 ue, "UID mapping")) {
658 idmap_free_entry(ue);
661 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
662 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
663 ge, "GID mapping")) {
664 idmap_free_entry(ge);
667 spin_unlock(&tbl->mit_lock);
671 int mds_idmap_del(struct mds_idmap_table *tbl,
672 uid_t rmt_uid, uid_t lcl_uid,
673 gid_t rmt_gid, gid_t lcl_gid)
680 spin_lock(&tbl->mit_lock);
681 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
682 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
684 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
685 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
687 spin_unlock(&tbl->mit_lock);
692 __u32 idmap_lookup_id(struct list_head *hash, int reverse, __u32 id)
694 struct list_head *head = &hash[MDS_IDMAP_HASHFUNC(id)];
695 struct mds_idmap_entry *e;
698 list_for_each_entry(e, head, rmt_hash) {
702 return MDS_IDMAP_NOTFOUND;
704 list_for_each_entry(e, head, lcl_hash) {
708 return MDS_IDMAP_NOTFOUND;
712 int mds_idmap_lookup_uid(struct mds_idmap_table *tbl, int reverse, uid_t uid)
714 struct list_head *hash;
717 return MDS_IDMAP_NOTFOUND;
720 hash = tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX];
722 hash = tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX];
724 spin_lock(&tbl->mit_lock);
725 uid = idmap_lookup_id(hash, reverse, uid);
726 spin_unlock(&tbl->mit_lock);
731 int mds_idmap_lookup_gid(struct mds_idmap_table *tbl, int reverse, gid_t gid)
733 struct list_head *hash;
736 return MDS_IDMAP_NOTFOUND;
739 hash = tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX];
741 hash = tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX];
743 spin_lock(&tbl->mit_lock);
744 gid = idmap_lookup_id(hash, reverse, gid);
745 spin_unlock(&tbl->mit_lock);
750 struct mds_idmap_table *mds_idmap_alloc()
752 struct mds_idmap_table *tbl;
755 OBD_ALLOC(tbl, sizeof(*tbl));
759 spin_lock_init(&tbl->mit_lock);
760 for (i = 0; i < MDS_IDMAP_N_HASHES; i++)
761 for (j = 0; j < MDS_IDMAP_HASHSIZE; j++)
762 INIT_LIST_HEAD(&tbl->mit_idmaps[i][j]);
767 static void idmap_clear_rmt_hash(struct list_head *list)
769 struct mds_idmap_entry *e;
772 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
773 while (!list_empty(&list[i])) {
774 e = list_entry(list[i].next, struct mds_idmap_entry,
781 void mds_idmap_free(struct mds_idmap_table *tbl)
785 spin_lock(&tbl->mit_lock);
786 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX]);
787 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX]);
789 /* paranoid checking */
790 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
791 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX][i]));
792 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX][i]));
794 spin_unlock(&tbl->mit_lock);
796 OBD_FREE(tbl, sizeof(*tbl));
799 /*********************************
800 * helpers doing mapping for MDS *
801 *********************************/
804 * we allow remote setuid/setgid to an "authencated" one,
805 * this policy probably change later.
808 int mds_req_secdesc_do_map(struct mds_export_data *med,
809 struct mds_req_sec_desc *rsd)
811 struct mds_idmap_table *idmap = med->med_idmap;
815 uid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_uid);
816 if (uid == MDS_IDMAP_NOTFOUND) {
817 CERROR("can't find map for uid %u\n", rsd->rsd_uid);
821 if (rsd->rsd_uid == rsd->rsd_fsuid)
824 fsuid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_fsuid);
825 if (fsuid == MDS_IDMAP_NOTFOUND) {
826 CERROR("can't find map for fsuid %u\n", rsd->rsd_fsuid);
831 gid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_gid);
832 if (gid == MDS_IDMAP_NOTFOUND) {
833 CERROR("can't find map for gid %u\n", rsd->rsd_gid);
837 if (rsd->rsd_gid == rsd->rsd_fsgid)
840 fsgid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_fsgid);
841 if (fsgid == MDS_IDMAP_NOTFOUND) {
842 CERROR("can't find map for fsgid %u\n", rsd->rsd_fsgid);
849 rsd->rsd_fsuid = fsuid;
850 rsd->rsd_fsgid = fsgid;
855 void mds_body_do_reverse_map(struct mds_export_data *med,
856 struct mds_body *body)
861 if (!med->med_remote)
865 if (body->valid & OBD_MD_FLUID) {
866 uid = mds_idmap_lookup_uid(med->med_idmap, 1, body->uid);
867 if (uid == MDS_IDMAP_NOTFOUND) {
869 if (body->valid & OBD_MD_FLMODE) {
870 body->mode = (body->mode & ~S_IRWXU) |
871 ((body->mode & S_IRWXO) << 6);
876 if (body->valid & OBD_MD_FLGID) {
877 gid = mds_idmap_lookup_gid(med->med_idmap, 1, body->gid);
878 if (gid == MDS_IDMAP_NOTFOUND) {
880 if (body->valid & OBD_MD_FLMODE) {
881 body->mode = (body->mode & ~S_IRWXG) |
882 ((body->mode & S_IRWXO) << 3);
891 /**********************
892 * MDS ucred handling *
893 **********************/
895 static inline void drop_ucred_ginfo(struct lvfs_ucred *ucred)
897 if (ucred->luc_ginfo) {
898 put_group_info(ucred->luc_ginfo);
899 ucred->luc_ginfo = NULL;
903 static inline void drop_ucred_lsd(struct lvfs_ucred *ucred)
905 if (ucred->luc_lsd) {
906 mds_put_lsd(ucred->luc_lsd);
907 ucred->luc_lsd = NULL;
912 * the heart of the uid/gid handling and security checking.
914 * root could set any group_info if we allowed setgroups, while
915 * normal user only could 'reduce' their group members -- which
916 * is somewhat expensive.
918 * authenticated as mds user (using mds service credential) could
919 * bypass all checkings.
921 int mds_init_ucred(struct lvfs_ucred *ucred,
922 struct ptlrpc_request *req,
923 struct mds_req_sec_desc *rsd)
925 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
926 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
927 struct lustre_sec_desc *lsd;
928 ptl_nid_t peernid = req->rq_peer.peer_id.nid;
929 struct group_info *gnew;
930 unsigned int setuid, setgid, strong_sec, root_squashed;
936 LASSERT(rsd->rsd_ngroups <= LUSTRE_MAX_GROUPS);
938 if (SEC_FLAVOR_MAJOR(req->rq_req_secflvr) == PTLRPCS_FLVR_MAJOR_GSS &&
939 (SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_AUTH ||
940 SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_PRIV))
945 LASSERT(!(req->rq_remote_realm && !strong_sec));
947 if (strong_sec && req->rq_auth_uid == -1) {
948 CWARN("user not authenticated, deny access\n");
952 /* sanity check: if we use strong authentication, we expect the
953 * uid which client claimed is true.
954 * not apply to special mds user .
956 if (!req->rq_auth_usr_mds && strong_sec) {
957 if (!med->med_remote) {
958 if (req->rq_auth_uid != rsd->rsd_uid) {
959 CERROR("local client "LPU64": auth uid %u "
960 "while client claim %u:%u/%u:%u\n",
961 peernid, req->rq_auth_uid,
962 rsd->rsd_uid, rsd->rsd_gid,
963 rsd->rsd_fsuid, rsd->rsd_fsgid);
967 if (req->rq_mapped_uid == MDS_IDMAP_NOTFOUND) {
968 CWARN("no mapping found, deny\n");
972 if (mds_req_secdesc_do_map(med, rsd))
975 if (req->rq_mapped_uid != rsd->rsd_uid) {
976 CERROR("remote client "LPU64": auth uid %u "
977 "while client claim %u:%u/%u:%u\n",
978 peernid, req->rq_auth_uid,
979 rsd->rsd_uid, rsd->rsd_gid,
980 rsd->rsd_fsuid, rsd->rsd_fsgid);
986 /* now LSD come into play */
987 ucred->luc_ginfo = NULL;
988 ucred->luc_lsd = lsd = mds_get_lsd(rsd->rsd_uid);
991 CERROR("Deny access without LSD: uid %d\n", rsd->rsd_uid);
995 lsd_perms = mds_lsd_get_perms(lsd, med->med_remote, 0, peernid);
997 /* check setuid/setgid permissions.
998 * again not apply to special mds user.
1000 if (!req->rq_auth_usr_mds) {
1001 /* find out the setuid/setgid attempt */
1002 setuid = (rsd->rsd_uid != rsd->rsd_fsuid);
1003 setgid = (rsd->rsd_gid != rsd->rsd_fsgid ||
1004 rsd->rsd_gid != lsd->lsd_gid);
1006 /* check permission of setuid */
1007 if (setuid && !(lsd_perms & LSD_PERM_SETUID)) {
1008 CWARN("mds blocked setuid attempt (%u -> %u) "
1009 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_fsuid,
1014 /* check permission of setgid */
1015 if (setgid && !(lsd_perms & LSD_PERM_SETGID)) {
1016 CWARN("mds blocked setgid attempt (%u:%u/%u:%u -> %u) "
1017 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_gid,
1018 rsd->rsd_fsuid, rsd->rsd_fsgid, lsd->lsd_gid,
1024 root_squashed = mds_squash_root(mds, rsd, &peernid);
1026 /* remove privilege for non-root user */
1028 rsd->rsd_cap &= ~CAP_FS_MASK;
1030 /* by now every fields other than groups in rsd have been granted */
1031 ucred->luc_uid = rsd->rsd_uid;
1032 ucred->luc_gid = rsd->rsd_gid;
1033 ucred->luc_fsuid = rsd->rsd_fsuid;
1034 ucred->luc_fsgid = rsd->rsd_fsgid;
1035 ucred->luc_cap = rsd->rsd_cap;
1037 /* don't use any supplementary group if we squashed root.
1038 * XXX The exact behavior of root_squash is not defined, we just
1039 * keep the reminder here */
1043 /* install groups from LSD */
1044 if (lsd->lsd_ginfo) {
1045 ucred->luc_ginfo = lsd->lsd_ginfo;
1046 get_group_info(ucred->luc_ginfo);
1049 /* everything is done if we don't allow setgroups, or it is
1050 * from remote client (which implies forced to be no-setgroups).
1052 * Note: remote user's supplementary groups sent along the request
1053 * (if any) are all ignored, but we make the mapped local user's
1054 * supplementary groups take effect.
1056 if (med->med_remote || !(lsd_perms & LSD_PERM_SETGRP))
1059 /* root could set any groups as he want (if allowed), normal
1060 * users only could reduce his group array.
1062 if (ucred->luc_uid == 0) {
1063 drop_ucred_ginfo(ucred);
1065 if (rsd->rsd_ngroups == 0)
1068 gnew = groups_alloc(rsd->rsd_ngroups);
1070 CERROR("out of memory\n");
1071 drop_ucred_lsd(ucred);
1074 groups_from_buffer(gnew, rsd->rsd_groups);
1075 groups_sort(gnew); /* don't rely on client doing this */
1077 ucred->luc_ginfo = gnew;
1079 __u32 set = 0, cur = 0;
1080 struct group_info *ginfo = ucred->luc_ginfo;
1085 /* Note: freeing a group_info count on 'nblocks' instead of
1086 * 'ngroups', thus we can safely alloc enough buffer and reduce
1087 * and ngroups number later.
1089 gnew = groups_alloc(rsd->rsd_ngroups);
1091 CERROR("out of memory\n");
1092 drop_ucred_ginfo(ucred);
1093 drop_ucred_lsd(ucred);
1097 while (cur < rsd->rsd_ngroups) {
1098 if (groups_search(ginfo, rsd->rsd_groups[cur])) {
1099 GROUP_AT(gnew, set) = rsd->rsd_groups[cur];
1104 gnew->ngroups = set;
1106 put_group_info(ucred->luc_ginfo);
1107 ucred->luc_ginfo = gnew;
1112 void mds_exit_ucred(struct lvfs_ucred *ucred)
1115 drop_ucred_ginfo(ucred);
1116 drop_ucred_lsd(ucred);