1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_MDS
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/version.h>
32 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
33 # include <linux/locks.h> // for wait_on_buffer
35 # include <linux/buffer_head.h> // for wait_on_buffer
37 #include <linux/unistd.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46 #include <asm/segment.h>
47 #include <linux/random.h>
49 #include <linux/obd_support.h>
50 #include <linux/lustre_lib.h>
51 #include <linux/lustre_sec.h>
52 #include <linux/lustre_ucache.h>
53 #include <linux/lustre_gs.h>
54 #include <linux/lustre_fsfilt.h>
56 #include "mds_internal.h"
58 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
59 struct group_info *groups_alloc(int ngroups)
61 struct group_info *ginfo;
63 LASSERT(ngroups <= NGROUPS_SMALL);
65 OBD_ALLOC(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
68 ginfo->ngroups = ngroups;
70 ginfo->blocks[0] = ginfo->small_block;
71 atomic_set(&ginfo->usage, 1);
76 void groups_free(struct group_info *ginfo)
78 LASSERT(ginfo->ngroups <= NGROUPS_SMALL);
79 LASSERT(ginfo->nblocks == 1);
80 LASSERT(ginfo->blocks[0] == ginfo->small_block);
82 OBD_FREE(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
85 /* for 2.4 the group number is small, so simply search the
88 int groups_search(struct group_info *ginfo, gid_t grp)
95 for (i = 0; i < ginfo->ngroups; i++)
96 if (GROUP_AT(ginfo, i) == grp)
103 void groups_sort(struct group_info *ginfo)
105 int base, max, stride;
106 int gidsetsize = ginfo->ngroups;
108 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
113 max = gidsetsize - stride;
114 for (base = 0; base < max; base++) {
116 int right = left + stride;
117 gid_t tmp = GROUP_AT(ginfo, right);
119 while (left >= 0 && GROUP_AT(ginfo, left) > tmp) {
120 GROUP_AT(ginfo, right) =
121 GROUP_AT(ginfo, left);
125 GROUP_AT(ginfo, right) = tmp;
131 int groups_search(struct group_info *ginfo, gid_t grp)
139 right = ginfo->ngroups;
140 while (left < right) {
141 int mid = (left + right) / 2;
142 int cmp = grp - GROUP_AT(ginfo, mid);
154 void groups_from_buffer(struct group_info *ginfo, __u32 *gids)
156 int i, ngroups = ginfo->ngroups;
158 for (i = 0; i < ginfo->nblocks; i++) {
159 int count = min(NGROUPS_PER_BLOCK, ngroups);
161 memcpy(ginfo->blocks[i], gids, count * sizeof(__u32));
162 gids += NGROUPS_PER_BLOCK;
167 void mds_pack_dentry2id(struct obd_device *obd,
168 struct lustre_id *id,
169 struct dentry *dentry,
172 id_ino(id) = dentry->d_inum;
173 id_gen(id) = dentry->d_generation;
176 id_fid(id) = dentry->d_fid;
177 id_group(id) = dentry->d_mdsnum;
181 void mds_pack_dentry2body(struct obd_device *obd,
183 struct dentry *dentry,
186 b->valid |= OBD_MD_FLID | OBD_MD_FLGENER |
190 b->valid |= OBD_MD_FID;
192 mds_pack_dentry2id(obd, &b->id1, dentry, fid);
195 int mds_pack_inode2id(struct obd_device *obd,
196 struct lustre_id *id,
204 /* we have to avoid deadlock. */
205 if (!down_trylock(&inode->i_sem)) {
206 rc = mds_read_inode_sid(obd, inode, id);
209 rc = mds_read_inode_sid(obd, inode, id);
214 id_ino(id) = inode->i_ino;
215 id_gen(id) = inode->i_generation;
216 id_type(id) = (S_IFMT & inode->i_mode);
221 void mds_inode2id(struct obd_device *obd, struct lustre_id *id,
222 struct inode *inode, __u64 fid)
224 struct mds_obd *mds = &obd->u.mds;
227 LASSERT(inode != NULL);
232 id_ino(id) = inode->i_ino;
233 id_group(id) = mds->mds_num;
234 id_gen(id) = inode->i_generation;
235 id_type(id) = (S_IFMT & inode->i_mode);
240 int mds_pack_gskey(struct obd_device *obd, struct lustre_msg *repmsg,
241 int *offset, struct mds_body *body, struct inode *inode)
243 struct crypto_key_md *md_key;
244 struct crypto_key *ckey;
245 __u32 buflen, *sizep;
250 sizep = lustre_msg_buf(repmsg, (*offset)++, 4);
252 CERROR("can't locate returned ckey size buf\n");
255 *sizep = cpu_to_le32(sizeof(*ckey));
257 OBD_ALLOC(md_key, sizeof(*md_key));
261 buflen = repmsg->buflens[*offset];
262 buf = lustre_msg_buf(repmsg, (*offset)++, buflen);
264 size = fsfilt_get_md(obd, inode, md_key, sizeof(*md_key),
268 CERROR("Can not get gskey from MDS ino %lu rc %d\n",
270 GOTO(out, rc = size);
272 if (le32_to_cpu(md_key->md_magic) != MD_KEY_MAGIC) {
273 CDEBUG(D_INFO, "given match %x != magic %x\n",
274 md_key->md_magic, MD_KEY_MAGIC);
278 CDEBUG(D_INFO, "get key %s mac %s for ino %lu size %d \n",
279 md_key->md_ck.ck_key, md_key->md_ck.ck_mac, inode->i_ino, size);
280 ckey=(struct crypto_key*)buf;
282 memcpy(ckey, &md_key->md_ck, sizeof(*ckey));
283 body->valid |= OBD_MD_FLKEY;
285 OBD_FREE(md_key, sizeof(*md_key));
289 static int mds_get_gskey(struct inode *inode, struct crypto_key *ckey)
292 /*tmp create gs key here*/
293 LASSERT(ckey->ck_type == MKS_TYPE);
294 get_random_bytes(ckey->ck_key, KEY_SIZE);
298 int mds_set_gskey(struct obd_device *obd, void *handle,
299 struct inode *inode, void *key, int key_len,
302 struct crypto_key_md *md_key = NULL;
303 struct crypto_key *ckey = (struct crypto_key *)key;
310 LASSERT(ckey->ck_type == MKS_TYPE || ckey->ck_type == GKS_TYPE);
312 OBD_ALLOC(md_key, sizeof(*md_key));
313 if (ckey->ck_type == MKS_TYPE)
314 mds_get_gskey(inode, ckey);
316 rc = fsfilt_get_md(obd, inode, md_key, sizeof(*md_key),
320 LASSERT(le32_to_cpu(md_key->md_magic) == MD_KEY_MAGIC ||
321 md_key->md_magic == 0);
323 if (le32_to_cpu(md_key->md_magic) == MD_KEY_MAGIC) {
324 CDEBUG(D_INFO, "reset key %s mac %s", md_key->md_ck.ck_mac,
325 md_key->md_ck.ck_key);
328 md_key->md_magic = cpu_to_le32(MD_KEY_MAGIC);
329 /*get key and mac from request buffer*/
330 if (valid & ATTR_MAC) {
331 memcpy(md_key->md_ck.ck_mac, ckey->ck_mac, MAC_SIZE);
332 CDEBUG(D_INFO, "set mac %s for ino %lu \n",
333 md_key->md_ck.ck_mac, inode->i_ino);
335 if (valid & ATTR_KEY) {
336 memcpy(md_key->md_ck.ck_key, ckey->ck_key, KEY_SIZE);
337 CDEBUG(D_INFO, "set key %s for ino %lu \n",
338 md_key->md_ck.ck_key, inode->i_ino);
340 rc = fsfilt_set_md(obd, inode, handle, md_key, sizeof(*md_key), EA_KEY);
343 OBD_FREE(md_key, sizeof(*md_key));
347 int mds_set_crypto_type(struct obd_device *obd, void *val, __u32 vallen)
349 struct mds_obd *mds = &obd->u.mds;
351 if (vallen >= strlen("mks") &&
352 memcmp(val, "mks", vallen) == 0) {
353 mds->mds_crypto_type = MKS_TYPE;
354 CDEBUG(D_IOCTL, "mks type\n");
356 if (vallen >= strlen("gks") &&
357 memcmp(val, "gks", vallen) == 0) {
358 mds->mds_crypto_type = GKS_TYPE;
359 CDEBUG(D_IOCTL, "gks type \n");
364 /* Note that we can copy all of the fields, just some will not be "valid" */
365 void mds_pack_inode2body(struct obd_device *obd, struct mds_body *b,
366 struct inode *inode, int fid)
368 b->valid |= OBD_MD_FLID | OBD_MD_FLCTIME | OBD_MD_FLUID |
369 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLTYPE |
370 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLGENER |
371 OBD_MD_FLATIME | OBD_MD_FLMTIME; /* bug 2020 */
373 if (!S_ISREG(inode->i_mode)) {
374 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
375 OBD_MD_FLATIME | OBD_MD_FLMTIME |
378 b->atime = LTIME_S(inode->i_atime);
379 b->mtime = LTIME_S(inode->i_mtime);
380 b->ctime = LTIME_S(inode->i_ctime);
381 b->mode = inode->i_mode;
382 b->size = inode->i_size;
383 b->blocks = inode->i_blocks;
384 b->uid = inode->i_uid;
385 b->gid = inode->i_gid;
386 b->flags = inode->i_flags;
387 b->rdev = inode->i_rdev;
389 /* Return the correct link count for orphan inodes */
390 if (mds_inode_is_orphan(inode)) {
392 } else if (S_ISDIR(inode->i_mode)) {
395 b->nlink = inode->i_nlink;
399 b->valid |= OBD_MD_FID;
401 mds_pack_inode2id(obd, &b->id1, inode, fid);
405 static int mds_setattr_unpack(struct ptlrpc_request *req, int offset,
406 struct mds_update_record *r)
408 struct iattr *attr = &r->ur_iattr;
409 struct mds_rec_setattr *rec;
412 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
413 lustre_swab_mds_rec_setattr);
417 r->ur_id1 = &rec->sa_id;
418 r->ur_flags = rec->sa_flags;
419 attr->ia_valid = rec->sa_valid;
420 attr->ia_mode = rec->sa_mode;
421 attr->ia_uid = rec->sa_uid;
422 attr->ia_gid = rec->sa_gid;
423 attr->ia_size = rec->sa_size;
424 LTIME_S(attr->ia_atime) = rec->sa_atime;
425 LTIME_S(attr->ia_mtime) = rec->sa_mtime;
426 LTIME_S(attr->ia_ctime) = rec->sa_ctime;
427 attr->ia_attr_flags = rec->sa_attr_flags;
429 LASSERT_REQSWAB(req, offset + 1);
430 if (req->rq_reqmsg->bufcount > offset + 1) {
431 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg,
433 if (r->ur_eadata == NULL)
435 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 1];
438 if (req->rq_reqmsg->bufcount > offset + 2) {
439 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
440 if (r->ur_ea2data == NULL)
443 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 2];
446 if (req->rq_reqmsg->bufcount > offset + 3) {
447 r->ur_ea3data = lustre_msg_buf(req->rq_reqmsg, offset + 3, 0);
448 if (r->ur_ea3data == NULL)
451 r->ur_ea3datalen = req->rq_reqmsg->buflens[offset + 3];
457 static int mds_create_unpack(struct ptlrpc_request *req, int offset,
458 struct mds_update_record *r)
460 struct mds_rec_create *rec;
463 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
464 lustre_swab_mds_rec_create);
468 r->ur_id1 = &rec->cr_id;
469 r->ur_id2 = &rec->cr_replayid;
470 r->ur_mode = rec->cr_mode;
471 r->ur_rdev = rec->cr_rdev;
472 r->ur_time = rec->cr_time;
473 r->ur_flags = rec->cr_flags;
475 LASSERT_REQSWAB(req, offset + 1);
476 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
477 if (r->ur_name == NULL)
479 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
481 LASSERT_REQSWAB(req, offset + 2);
482 if (req->rq_reqmsg->bufcount > offset + 2) {
483 if (S_ISLNK(r->ur_mode)) {
484 r->ur_tgt = lustre_msg_string(req->rq_reqmsg,
486 if (r->ur_tgt == NULL)
488 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
489 } else if (S_ISDIR(r->ur_mode) ) {
490 /* Stripe info for mkdir - just a 16bit integer */
491 if (req->rq_reqmsg->buflens[offset + 2] != 2) {
492 CERROR("mkdir stripe info does not match "
493 "expected size %d vs 2\n",
494 req->rq_reqmsg->buflens[offset + 2]);
497 r->ur_eadata = lustre_swab_buf(req->rq_reqmsg,
500 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
501 } else if (S_ISREG(r->ur_mode)){
502 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg,
504 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
506 /* Hm, no other users so far? */
513 static int mds_link_unpack(struct ptlrpc_request *req, int offset,
514 struct mds_update_record *r)
516 struct mds_rec_link *rec;
519 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
520 lustre_swab_mds_rec_link);
524 r->ur_id1 = &rec->lk_id1;
525 r->ur_id2 = &rec->lk_id2;
526 r->ur_time = rec->lk_time;
527 r->ur_flags = rec->lk_flags;
529 LASSERT_REQSWAB(req, offset + 1);
530 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
531 if (r->ur_name == NULL)
533 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
537 static int mds_unlink_unpack(struct ptlrpc_request *req, int offset,
538 struct mds_update_record *r)
540 struct mds_rec_unlink *rec;
543 rec = lustre_swab_reqbuf(req, offset, sizeof (*rec),
544 lustre_swab_mds_rec_unlink);
548 r->ur_mode = rec->ul_mode;
549 r->ur_id1 = &rec->ul_id1;
550 r->ur_id2 = &rec->ul_id2;
551 r->ur_time = rec->ul_time;
552 r->ur_flags = rec->ul_flags;
554 LASSERT_REQSWAB(req, offset + 1);
555 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
556 if (r->ur_name == NULL)
558 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
562 static int mds_rename_unpack(struct ptlrpc_request *req, int offset,
563 struct mds_update_record *r)
565 struct mds_rec_rename *rec;
568 rec = lustre_swab_reqbuf(req, offset, sizeof (*rec),
569 lustre_swab_mds_rec_rename);
573 r->ur_id1 = &rec->rn_id1;
574 r->ur_id2 = &rec->rn_id2;
575 r->ur_time = rec->rn_time;
576 r->ur_flags = rec->rn_flags;
578 LASSERT_REQSWAB(req, offset + 1);
579 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
580 if (r->ur_name == NULL)
582 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
584 LASSERT_REQSWAB(req, offset + 2);
585 r->ur_tgt = lustre_msg_string(req->rq_reqmsg, offset + 2, 0);
586 if (r->ur_tgt == NULL)
588 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
592 static int mds_open_unpack(struct ptlrpc_request *req, int offset,
593 struct mds_update_record *r)
595 struct mds_rec_create *rec;
598 rec = lustre_swab_reqbuf(req, offset, sizeof (*rec),
599 lustre_swab_mds_rec_create);
603 r->ur_id1 = &rec->cr_id;
604 r->ur_id2 = &rec->cr_replayid;
605 r->ur_mode = rec->cr_mode;
606 r->ur_rdev = rec->cr_rdev;
607 r->ur_time = rec->cr_time;
608 r->ur_flags = rec->cr_flags;
609 r->ur_ioepoch = rec->cr_ioepoch;
611 LASSERT_REQSWAB(req, offset + 1);
612 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
614 if (r->ur_name == NULL)
616 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
618 LASSERT_REQSWAB(req, offset + 2);
619 if (req->rq_reqmsg->bufcount > offset + 2) {
620 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
621 if (r->ur_eadata == NULL)
623 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
626 if (rec->cr_flags & MDS_OPEN_HAS_KEY) {
627 LASSERT(req->rq_reqmsg->bufcount > offset + 3);
628 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 3, 0);
629 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 3];
634 typedef int (*update_unpacker)(struct ptlrpc_request *req, int offset,
635 struct mds_update_record *r);
637 static update_unpacker mds_unpackers[REINT_MAX + 1] = {
638 [REINT_SETATTR] mds_setattr_unpack,
639 [REINT_CREATE] mds_create_unpack,
640 [REINT_LINK] mds_link_unpack,
641 [REINT_UNLINK] mds_unlink_unpack,
642 [REINT_RENAME] mds_rename_unpack,
643 [REINT_OPEN] mds_open_unpack,
646 int mds_update_unpack(struct ptlrpc_request *req, int offset,
647 struct mds_update_record *rec)
654 /* NB don't lustre_swab_reqbuf() here. We're just taking a peek and we
655 * want to leave it to the specific unpacker once we've identified the
657 opcodep = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*opcodep));
662 if (lustre_msg_swabbed(req->rq_reqmsg))
665 if (opcode > REINT_MAX ||
666 mds_unpackers[opcode] == NULL) {
667 CERROR("Unexpected opcode %d\n", opcode);
673 rec->ur_opcode = opcode;
675 rc = mds_unpackers[opcode](req, offset, rec);
678 rec->ur_fsuid = req->rq_uid;
684 * here we take simple rule: once uid/fsuid is root, we also squash
685 * the gid/fsgid, don't care setuid/setgid attributes.
688 int mds_squash_root(struct mds_obd *mds, struct mds_req_sec_desc *rsd,
691 if (!mds->mds_squash_uid || *peernid == mds->mds_nosquash_nid)
694 if (rsd->rsd_uid && rsd->rsd_fsuid)
697 CDEBUG(D_SEC, "squash req from "LPX64":"
698 "(%u:%u-%u:%u/%x)=>(%u:%u-%u:%u/%x)\n", *peernid,
699 rsd->rsd_uid, rsd->rsd_gid,
700 rsd->rsd_fsuid, rsd->rsd_fsgid, rsd->rsd_cap,
701 rsd->rsd_uid ? rsd->rsd_uid : mds->mds_squash_uid,
702 rsd->rsd_uid ? rsd->rsd_gid : mds->mds_squash_gid,
703 rsd->rsd_fsuid ? rsd->rsd_fsuid : mds->mds_squash_uid,
704 rsd->rsd_fsuid ? rsd->rsd_fsgid : mds->mds_squash_gid,
705 rsd->rsd_cap & ~CAP_FS_MASK);
707 if (rsd->rsd_uid == 0) {
708 rsd->rsd_uid = mds->mds_squash_uid;
709 rsd->rsd_gid = mds->mds_squash_gid;
711 if (rsd->rsd_fsuid == 0) {
712 rsd->rsd_fsuid = mds->mds_squash_uid;
713 rsd->rsd_fsgid = mds->mds_squash_gid;
715 rsd->rsd_cap &= ~CAP_FS_MASK;
720 /********************************
721 * MDS uid/gid mapping handling *
722 ********************************/
725 struct mds_idmap_entry* idmap_alloc_entry(__u32 rmt_id, __u32 lcl_id)
727 struct mds_idmap_entry *e;
729 OBD_ALLOC(e, sizeof(*e));
733 INIT_LIST_HEAD(&e->rmt_hash);
734 INIT_LIST_HEAD(&e->lcl_hash);
735 atomic_set(&e->refcount, 1);
742 void idmap_free_entry(struct mds_idmap_entry *e)
744 if (!list_empty(&e->rmt_hash))
745 list_del(&e->rmt_hash);
746 if (!list_empty(&e->lcl_hash))
747 list_del(&e->lcl_hash);
748 OBD_FREE(e, sizeof(*e));
752 int idmap_insert_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
753 struct mds_idmap_entry *new, const char *warn_msg)
755 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(new->rmt_id)];
756 struct list_head *lcl_head = &lcl_hash[MDS_IDMAP_HASHFUNC(new->lcl_id)];
757 struct mds_idmap_entry *e;
759 list_for_each_entry(e, rmt_head, rmt_hash) {
760 if (e->rmt_id == new->rmt_id &&
761 e->lcl_id == new->lcl_id) {
762 atomic_inc(&e->refcount);
765 if (e->rmt_id == new->rmt_id && warn_msg)
766 CWARN("%s: rmt id %u already map to %u (new %u)\n",
767 warn_msg, e->rmt_id, e->lcl_id, new->lcl_id);
768 if (e->lcl_id == new->lcl_id && warn_msg)
769 CWARN("%s: lcl id %u already be mapped from %u "
770 "(new %u)\n", warn_msg,
771 e->lcl_id, e->rmt_id, new->rmt_id);
774 list_add_tail(rmt_head, &new->rmt_hash);
775 list_add_tail(lcl_head, &new->lcl_hash);
780 int idmap_remove_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
781 __u32 rmt_id, __u32 lcl_id)
783 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(rmt_id)];
784 struct mds_idmap_entry *e;
786 list_for_each_entry(e, rmt_head, rmt_hash) {
787 if (e->rmt_id == rmt_id && e->lcl_id == lcl_id) {
788 if (atomic_dec_and_test(&e->refcount)) {
789 list_del(&e->rmt_hash);
790 list_del(&e->lcl_hash);
791 OBD_FREE(e, sizeof(*e));
800 int mds_idmap_add(struct mds_idmap_table *tbl,
801 uid_t rmt_uid, uid_t lcl_uid,
802 gid_t rmt_gid, gid_t lcl_gid)
804 struct mds_idmap_entry *ue, *ge;
810 ue = idmap_alloc_entry(rmt_uid, lcl_uid);
813 ge = idmap_alloc_entry(rmt_gid, lcl_gid);
815 idmap_free_entry(ue);
819 spin_lock(&tbl->mit_lock);
821 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
822 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
823 ue, "UID mapping")) {
824 idmap_free_entry(ue);
827 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
828 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
829 ge, "GID mapping")) {
830 idmap_free_entry(ge);
833 spin_unlock(&tbl->mit_lock);
837 int mds_idmap_del(struct mds_idmap_table *tbl,
838 uid_t rmt_uid, uid_t lcl_uid,
839 gid_t rmt_gid, gid_t lcl_gid)
846 spin_lock(&tbl->mit_lock);
847 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
848 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
850 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
851 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
853 spin_unlock(&tbl->mit_lock);
858 __u32 idmap_lookup_id(struct list_head *hash, int reverse, __u32 id)
860 struct list_head *head = &hash[MDS_IDMAP_HASHFUNC(id)];
861 struct mds_idmap_entry *e;
864 list_for_each_entry(e, head, rmt_hash) {
868 return MDS_IDMAP_NOTFOUND;
870 list_for_each_entry(e, head, lcl_hash) {
874 return MDS_IDMAP_NOTFOUND;
878 int mds_idmap_lookup_uid(struct mds_idmap_table *tbl, int reverse, uid_t uid)
880 struct list_head *hash;
883 return MDS_IDMAP_NOTFOUND;
886 hash = tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX];
888 hash = tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX];
890 spin_lock(&tbl->mit_lock);
891 uid = idmap_lookup_id(hash, reverse, uid);
892 spin_unlock(&tbl->mit_lock);
897 int mds_idmap_lookup_gid(struct mds_idmap_table *tbl, int reverse, gid_t gid)
899 struct list_head *hash;
902 return MDS_IDMAP_NOTFOUND;
905 hash = tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX];
907 hash = tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX];
909 spin_lock(&tbl->mit_lock);
910 gid = idmap_lookup_id(hash, reverse, gid);
911 spin_unlock(&tbl->mit_lock);
916 struct mds_idmap_table *mds_idmap_alloc()
918 struct mds_idmap_table *tbl;
921 OBD_ALLOC(tbl, sizeof(*tbl));
925 spin_lock_init(&tbl->mit_lock);
926 for (i = 0; i < MDS_IDMAP_N_HASHES; i++)
927 for (j = 0; j < MDS_IDMAP_HASHSIZE; j++)
928 INIT_LIST_HEAD(&tbl->mit_idmaps[i][j]);
933 static void idmap_clear_rmt_hash(struct list_head *list)
935 struct mds_idmap_entry *e;
938 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
939 while (!list_empty(&list[i])) {
940 e = list_entry(list[i].next, struct mds_idmap_entry,
947 void mds_idmap_free(struct mds_idmap_table *tbl)
951 spin_lock(&tbl->mit_lock);
952 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX]);
953 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX]);
955 /* paranoid checking */
956 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
957 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX][i]));
958 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX][i]));
960 spin_unlock(&tbl->mit_lock);
962 OBD_FREE(tbl, sizeof(*tbl));
965 /*********************************
966 * helpers doing mapping for MDS *
967 *********************************/
970 * we allow remote setuid/setgid to an "authencated" one,
971 * this policy probably change later.
974 int mds_req_secdesc_do_map(struct mds_export_data *med,
975 struct mds_req_sec_desc *rsd)
977 struct mds_idmap_table *idmap = med->med_idmap;
981 uid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_uid);
982 if (uid == MDS_IDMAP_NOTFOUND) {
983 CERROR("can't find map for uid %u\n", rsd->rsd_uid);
987 if (rsd->rsd_uid == rsd->rsd_fsuid)
990 fsuid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_fsuid);
991 if (fsuid == MDS_IDMAP_NOTFOUND) {
992 CERROR("can't find map for fsuid %u\n", rsd->rsd_fsuid);
997 gid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_gid);
998 if (gid == MDS_IDMAP_NOTFOUND) {
999 CERROR("can't find map for gid %u\n", rsd->rsd_gid);
1003 if (rsd->rsd_gid == rsd->rsd_fsgid)
1006 fsgid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_fsgid);
1007 if (fsgid == MDS_IDMAP_NOTFOUND) {
1008 CERROR("can't find map for fsgid %u\n", rsd->rsd_fsgid);
1015 rsd->rsd_fsuid = fsuid;
1016 rsd->rsd_fsgid = fsgid;
1021 void mds_body_do_reverse_map(struct mds_export_data *med,
1022 struct mds_body *body)
1027 if (!med->med_remote)
1031 if (body->valid & OBD_MD_FLUID) {
1032 uid = mds_idmap_lookup_uid(med->med_idmap, 1, body->uid);
1033 if (uid == MDS_IDMAP_NOTFOUND) {
1034 uid = med->med_nllu;
1035 if (body->valid & OBD_MD_FLMODE) {
1036 body->mode = (body->mode & ~S_IRWXU) |
1037 ((body->mode & S_IRWXO) << 6);
1042 if (body->valid & OBD_MD_FLGID) {
1043 gid = mds_idmap_lookup_gid(med->med_idmap, 1, body->gid);
1044 if (gid == MDS_IDMAP_NOTFOUND) {
1045 gid = med->med_nllg;
1046 if (body->valid & OBD_MD_FLMODE) {
1047 body->mode = (body->mode & ~S_IRWXG) |
1048 ((body->mode & S_IRWXO) << 3);
1058 * return error if can't find mapping, it's a error so should not
1059 * fall into nllu/nllg.
1061 int mds_remote_perm_do_reverse_map(struct mds_export_data *med,
1062 struct mds_remote_perm *perm)
1067 LASSERT(med->med_remote);
1069 uid = mds_idmap_lookup_uid(med->med_idmap, 1, perm->mrp_auth_uid);
1070 if (uid == MDS_IDMAP_NOTFOUND) {
1071 CERROR("no map for uid %u\n", perm->mrp_auth_uid);
1074 gid = mds_idmap_lookup_gid(med->med_idmap, 1, perm->mrp_auth_gid);
1075 if (gid == MDS_IDMAP_NOTFOUND) {
1076 CERROR("no map for uid %u\n", perm->mrp_auth_uid);
1080 perm->mrp_auth_uid = uid;
1081 perm->mrp_auth_gid = gid;
1085 /**********************
1086 * MDS ucred handling *
1087 **********************/
1089 static inline void drop_ucred_ginfo(struct lvfs_ucred *ucred)
1091 if (ucred->luc_ginfo) {
1092 put_group_info(ucred->luc_ginfo);
1093 ucred->luc_ginfo = NULL;
1097 static inline void drop_ucred_lsd(struct lvfs_ucred *ucred)
1099 if (ucred->luc_lsd) {
1100 mds_put_lsd(ucred->luc_lsd);
1101 ucred->luc_lsd = NULL;
1106 * the heart of the uid/gid handling and security checking.
1108 * root could set any group_info if we allowed setgroups, while
1109 * normal user only could 'reduce' their group members -- which
1110 * is somewhat expensive.
1112 * authenticated as mds user (using mds service credential) could
1113 * bypass all checkings.
1115 int mds_init_ucred(struct lvfs_ucred *ucred,
1116 struct ptlrpc_request *req,
1117 struct mds_req_sec_desc *rsd)
1119 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1120 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
1121 struct lustre_sec_desc *lsd;
1122 ptl_nid_t peernid = req->rq_peer.peer_id.nid;
1123 struct group_info *gnew;
1124 unsigned int setuid, setgid, strong_sec, root_squashed;
1130 LASSERT(rsd->rsd_ngroups <= LUSTRE_MAX_GROUPS);
1132 if (SEC_FLAVOR_MAJOR(req->rq_req_secflvr) == PTLRPCS_FLVR_MAJOR_GSS &&
1133 (SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_AUTH ||
1134 SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_PRIV))
1139 LASSERT(!(req->rq_remote_realm && !strong_sec));
1141 if (strong_sec && req->rq_auth_uid == -1) {
1142 CWARN("user not authenticated, deny access\n");
1146 /* sanity check: if we use strong authentication, we expect the
1147 * uid which client claimed is true.
1148 * not apply to special mds user .
1150 if (!req->rq_auth_usr_mds && strong_sec) {
1151 if (!med->med_remote) {
1152 if (req->rq_auth_uid != rsd->rsd_uid) {
1153 CERROR("local client "LPU64": auth uid %u "
1154 "while client claim %u:%u/%u:%u\n",
1155 peernid, req->rq_auth_uid,
1156 rsd->rsd_uid, rsd->rsd_gid,
1157 rsd->rsd_fsuid, rsd->rsd_fsgid);
1161 if (req->rq_mapped_uid == MDS_IDMAP_NOTFOUND) {
1162 CWARN("no mapping found, deny\n");
1166 if (mds_req_secdesc_do_map(med, rsd))
1169 if (req->rq_mapped_uid != rsd->rsd_uid) {
1170 CERROR("remote client "LPU64": auth uid %u "
1171 "while client claim %u:%u/%u:%u\n",
1172 peernid, req->rq_auth_uid,
1173 rsd->rsd_uid, rsd->rsd_gid,
1174 rsd->rsd_fsuid, rsd->rsd_fsgid);
1180 /* now LSD come into play */
1181 ucred->luc_ginfo = NULL;
1182 ucred->luc_lsd = lsd = mds_get_lsd(rsd->rsd_uid);
1185 CERROR("Deny access without LSD: uid %d\n", rsd->rsd_uid);
1189 lsd_perms = mds_lsd_get_perms(lsd, med->med_remote, 0, peernid);
1191 /* check setuid/setgid permissions.
1192 * again not apply to special mds user.
1194 if (!req->rq_auth_usr_mds) {
1195 /* find out the setuid/setgid attempt */
1196 setuid = (rsd->rsd_uid != rsd->rsd_fsuid);
1197 setgid = (rsd->rsd_gid != rsd->rsd_fsgid ||
1198 rsd->rsd_gid != lsd->lsd_gid);
1200 /* check permission of setuid */
1201 if (setuid && !(lsd_perms & LSD_PERM_SETUID)) {
1202 CWARN("mds blocked setuid attempt (%u -> %u) "
1203 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_fsuid,
1208 /* check permission of setgid */
1209 if (setgid && !(lsd_perms & LSD_PERM_SETGID)) {
1210 CWARN("mds blocked setgid attempt (%u:%u/%u:%u -> %u) "
1211 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_gid,
1212 rsd->rsd_fsuid, rsd->rsd_fsgid, lsd->lsd_gid,
1218 root_squashed = mds_squash_root(mds, rsd, &peernid);
1220 /* remove privilege for non-root user */
1222 rsd->rsd_cap &= ~CAP_FS_MASK;
1224 /* by now every fields other than groups in rsd have been granted */
1225 ucred->luc_nid = peernid;
1226 ucred->luc_uid = rsd->rsd_uid;
1227 ucred->luc_gid = rsd->rsd_gid;
1228 ucred->luc_fsuid = rsd->rsd_fsuid;
1229 ucred->luc_fsgid = rsd->rsd_fsgid;
1230 ucred->luc_cap = rsd->rsd_cap;
1232 /* don't use any supplementary group if we squashed root.
1233 * XXX The exact behavior of root_squash is not defined, we just
1234 * keep the reminder here */
1238 /* install groups from LSD */
1239 if (lsd->lsd_ginfo) {
1240 ucred->luc_ginfo = lsd->lsd_ginfo;
1241 get_group_info(ucred->luc_ginfo);
1244 /* everything is done if we don't allow setgroups, or it is
1245 * from remote client (which implies forced to be no-setgroups).
1247 * Note: remote user's supplementary groups sent along the request
1248 * (if any) are all ignored, but we make the mapped local user's
1249 * supplementary groups take effect.
1251 if (med->med_remote || !(lsd_perms & LSD_PERM_SETGRP))
1254 /* root could set any groups as he want (if allowed), normal
1255 * users only could reduce his group array.
1257 if (ucred->luc_uid == 0) {
1258 drop_ucred_ginfo(ucred);
1260 if (rsd->rsd_ngroups == 0)
1263 gnew = groups_alloc(rsd->rsd_ngroups);
1265 CERROR("out of memory\n");
1266 drop_ucred_lsd(ucred);
1269 groups_from_buffer(gnew, rsd->rsd_groups);
1270 groups_sort(gnew); /* don't rely on client doing this */
1272 ucred->luc_ginfo = gnew;
1274 __u32 set = 0, cur = 0;
1275 struct group_info *ginfo = ucred->luc_ginfo;
1280 /* Note: freeing a group_info count on 'nblocks' instead of
1281 * 'ngroups', thus we can safely alloc enough buffer and reduce
1282 * and ngroups number later.
1284 gnew = groups_alloc(rsd->rsd_ngroups);
1286 CERROR("out of memory\n");
1287 drop_ucred_ginfo(ucred);
1288 drop_ucred_lsd(ucred);
1292 while (cur < rsd->rsd_ngroups) {
1293 if (groups_search(ginfo, rsd->rsd_groups[cur])) {
1294 GROUP_AT(gnew, set) = rsd->rsd_groups[cur];
1299 gnew->ngroups = set;
1301 put_group_info(ucred->luc_ginfo);
1302 ucred->luc_ginfo = gnew;
1307 void mds_exit_ucred(struct lvfs_ucred *ucred)
1310 drop_ucred_ginfo(ucred);
1311 drop_ucred_lsd(ucred);