1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_MDS
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/version.h>
32 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
33 # include <linux/locks.h> // for wait_on_buffer
35 # include <linux/buffer_head.h> // for wait_on_buffer
37 #include <linux/unistd.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46 #include <asm/segment.h>
47 #include <linux/random.h>
49 #include <linux/obd_support.h>
50 #include <linux/lustre_lib.h>
51 #include <linux/lustre_sec.h>
52 #include <linux/lustre_ucache.h>
53 #include <linux/lustre_gs.h>
54 #include <linux/lustre_fsfilt.h>
55 #include "mds_internal.h"
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
58 struct group_info *groups_alloc(int ngroups)
60 struct group_info *ginfo;
62 LASSERT(ngroups <= NGROUPS_SMALL);
64 OBD_ALLOC(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
67 ginfo->ngroups = ngroups;
69 ginfo->blocks[0] = ginfo->small_block;
70 atomic_set(&ginfo->usage, 1);
75 void groups_free(struct group_info *ginfo)
77 LASSERT(ginfo->ngroups <= NGROUPS_SMALL);
78 LASSERT(ginfo->nblocks == 1);
79 LASSERT(ginfo->blocks[0] == ginfo->small_block);
81 OBD_FREE(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
84 /* for 2.4 the group number is small, so simply search the
87 int groups_search(struct group_info *ginfo, gid_t grp)
94 for (i = 0; i < ginfo->ngroups; i++)
95 if (GROUP_AT(ginfo, i) == grp)
102 void groups_sort(struct group_info *ginfo)
104 int base, max, stride;
105 int gidsetsize = ginfo->ngroups;
107 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
112 max = gidsetsize - stride;
113 for (base = 0; base < max; base++) {
115 int right = left + stride;
116 gid_t tmp = GROUP_AT(ginfo, right);
118 while (left >= 0 && GROUP_AT(ginfo, left) > tmp) {
119 GROUP_AT(ginfo, right) =
120 GROUP_AT(ginfo, left);
124 GROUP_AT(ginfo, right) = tmp;
130 int groups_search(struct group_info *ginfo, gid_t grp)
138 right = ginfo->ngroups;
139 while (left < right) {
140 int mid = (left + right) / 2;
141 int cmp = grp - GROUP_AT(ginfo, mid);
153 void groups_from_buffer(struct group_info *ginfo, __u32 *gids)
155 int i, ngroups = ginfo->ngroups;
157 for (i = 0; i < ginfo->nblocks; i++) {
158 int count = min(NGROUPS_PER_BLOCK, ngroups);
160 memcpy(ginfo->blocks[i], gids, count * sizeof(__u32));
161 gids += NGROUPS_PER_BLOCK;
166 void mds_pack_dentry2id(struct obd_device *obd,
167 struct lustre_id *id,
168 struct dentry *dentry,
171 id_ino(id) = dentry->d_inum;
172 id_gen(id) = dentry->d_generation;
175 id_fid(id) = dentry->d_fid;
176 id_group(id) = dentry->d_mdsnum;
180 void mds_pack_dentry2body(struct obd_device *obd,
182 struct dentry *dentry,
185 b->valid |= OBD_MD_FLID | OBD_MD_FLGENER |
189 b->valid |= OBD_MD_FID;
191 mds_pack_dentry2id(obd, &b->id1, dentry, fid);
194 int mds_pack_inode2id(struct obd_device *obd,
195 struct lustre_id *id,
203 /* we have to avoid deadlock. */
204 if (!down_trylock(&inode->i_sem)) {
205 rc = mds_read_inode_sid(obd, inode, id);
208 rc = mds_read_inode_sid(obd, inode, id);
213 id_ino(id) = inode->i_ino;
214 id_gen(id) = inode->i_generation;
215 id_type(id) = (S_IFMT & inode->i_mode);
220 void mds_inode2id(struct obd_device *obd, struct lustre_id *id,
221 struct inode *inode, __u64 fid)
223 struct mds_obd *mds = &obd->u.mds;
226 LASSERT(inode != NULL);
231 id_ino(id) = inode->i_ino;
232 id_group(id) = mds->mds_num;
233 id_gen(id) = inode->i_generation;
234 id_type(id) = (S_IFMT & inode->i_mode);
239 int mds_pack_gskey(struct obd_device *obd, struct lustre_msg *repmsg,
240 int *offset, struct mds_body *body, struct inode *inode)
242 struct mds_obd *mds = &obd->u.mds;
243 struct crypto_key_md *md_key;
244 struct crypto_key *ckey;
245 __u32 buflen, *sizep;
250 if ((mds->mds_crypto_type != MKS_TYPE &&
251 mds->mds_crypto_type != GKS_TYPE))
254 sizep = lustre_msg_buf(repmsg, (*offset)++, 4);
256 CERROR("can't locate returned ckey size buf\n");
259 *sizep = cpu_to_le32(sizeof(*ckey));
261 OBD_ALLOC(md_key, sizeof(*md_key));
263 buflen = repmsg->buflens[*offset];
264 buf = lustre_msg_buf(repmsg, (*offset)++, buflen);
266 size = fsfilt_get_md(obd, inode, md_key, sizeof(*md_key),
269 CERROR("Can not get gskey from MDS ino %lu rc %d\n",
271 GOTO(out, rc = size);
273 if (le32_to_cpu(md_key->md_magic) != MD_KEY_MAGIC) {
274 CDEBUG(D_INFO, "given match %x != magic %x\n",
275 md_key->md_magic, MD_KEY_MAGIC);
279 CDEBUG(D_INFO, "get key %s mac %s for ino %lu size %d \n",
280 md_key->md_ck.ck_key, md_key->md_ck.ck_mac, inode->i_ino, size);
281 ckey=(struct crypto_key*)buf;
283 memcpy(ckey, &md_key->md_ck, sizeof(*ckey));
284 body->valid |= OBD_MD_FLKEY;
286 OBD_FREE(md_key, sizeof(*md_key));
290 static int mds_get_gskey(struct inode *inode, struct crypto_key *ckey)
293 /*tmp create gs key here*/
294 get_random_bytes(ckey->ck_key, KEY_SIZE);
295 ckey->ck_type = MKS_TYPE;
299 int mds_set_gskey(struct obd_device *obd, void *handle,
300 struct inode *inode, void *key, int key_len,
303 struct crypto_key_md *md_key = NULL;
304 struct crypto_key *ckey = (struct crypto_key *)key;
305 struct mds_obd *mds = &obd->u.mds;
309 if ((mds->mds_crypto_type != MKS_TYPE &&
310 mds->mds_crypto_type != GKS_TYPE) || key_len == 0)
313 OBD_ALLOC(md_key, sizeof(*md_key));
314 LASSERT(ckey != NULL);
315 if (mds->mds_crypto_type == MKS_TYPE) {
316 mds_get_gskey(inode, ckey);
318 rc = fsfilt_get_md(obd, inode, md_key, sizeof(*md_key),
322 LASSERT(le32_to_cpu(md_key->md_magic) == MD_KEY_MAGIC ||
323 md_key->md_magic == 0);
324 if (le32_to_cpu(md_key->md_magic) == MD_KEY_MAGIC) {
325 CDEBUG(D_INFO, "reset key %s mac %s", md_key->md_ck.ck_mac,
326 md_key->md_ck.ck_key);
329 md_key->md_magic = cpu_to_le32(MD_KEY_MAGIC);
330 if (valid & ATTR_MAC) {
331 memcpy(md_key->md_ck.ck_mac, ckey->ck_mac, MAC_SIZE);
332 CDEBUG(D_INFO, "set mac %s for ino %lu \n",
333 md_key->md_ck.ck_mac, inode->i_ino);
335 if (valid & ATTR_KEY) {
336 memcpy(md_key->md_ck.ck_key, ckey->ck_key, KEY_SIZE);
337 CDEBUG(D_INFO, "set key %s for ino %lu \n",
338 md_key->md_ck.ck_key, inode->i_ino);
340 rc = fsfilt_set_md(obd, inode, handle, md_key,
341 sizeof(*md_key), EA_KEY);
344 OBD_FREE(md_key, sizeof(*md_key));
348 int mds_set_crypto_type(struct obd_device *obd, void *val, __u32 vallen)
350 struct mds_obd *mds = &obd->u.mds;
353 if (vallen >= strlen("mks") &&
354 memcmp(val, "mks", vallen) == 0) {
355 mds->mds_crypto_type = MKS_TYPE;
357 if (vallen >= strlen("gks") &&
358 memcmp(val, "gks", vallen) == 0) {
359 mds->mds_crypto_type = GKS_TYPE;
362 CDEBUG(D_IOCTL, "invalid key\n");
366 /* Note that we can copy all of the fields, just some will not be "valid" */
367 void mds_pack_inode2body(struct obd_device *obd, struct mds_body *b,
368 struct inode *inode, int fid)
370 b->valid |= OBD_MD_FLID | OBD_MD_FLCTIME | OBD_MD_FLUID |
371 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLTYPE |
372 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLGENER |
373 OBD_MD_FLATIME | OBD_MD_FLMTIME; /* bug 2020 */
375 if (!S_ISREG(inode->i_mode)) {
376 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
377 OBD_MD_FLATIME | OBD_MD_FLMTIME |
380 b->atime = LTIME_S(inode->i_atime);
381 b->mtime = LTIME_S(inode->i_mtime);
382 b->ctime = LTIME_S(inode->i_ctime);
383 b->mode = inode->i_mode;
384 b->size = inode->i_size;
385 b->blocks = inode->i_blocks;
386 b->uid = inode->i_uid;
387 b->gid = inode->i_gid;
388 b->flags = inode->i_flags;
389 b->rdev = inode->i_rdev;
391 /* Return the correct link count for orphan inodes */
392 if (mds_inode_is_orphan(inode)) {
394 } else if (S_ISDIR(inode->i_mode)) {
397 b->nlink = inode->i_nlink;
401 b->valid |= OBD_MD_FID;
403 mds_pack_inode2id(obd, &b->id1, inode, fid);
407 static int mds_setattr_unpack(struct ptlrpc_request *req, int offset,
408 struct mds_update_record *r)
410 struct iattr *attr = &r->ur_iattr;
411 struct mds_rec_setattr *rec;
414 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
415 lustre_swab_mds_rec_setattr);
419 r->ur_id1 = &rec->sa_id;
420 attr->ia_valid = rec->sa_valid;
421 attr->ia_mode = rec->sa_mode;
422 attr->ia_uid = rec->sa_uid;
423 attr->ia_gid = rec->sa_gid;
424 attr->ia_size = rec->sa_size;
425 LTIME_S(attr->ia_atime) = rec->sa_atime;
426 LTIME_S(attr->ia_mtime) = rec->sa_mtime;
427 LTIME_S(attr->ia_ctime) = rec->sa_ctime;
428 attr->ia_attr_flags = rec->sa_attr_flags;
430 LASSERT_REQSWAB (req, offset + 1);
431 if (req->rq_reqmsg->bufcount > offset + 1) {
432 r->ur_eadata = lustre_msg_buf (req->rq_reqmsg,
434 if (r->ur_eadata == NULL)
436 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 1];
439 if (req->rq_reqmsg->bufcount > offset + 2) {
440 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
441 if (r->ur_ea2data == NULL)
444 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 2];
447 if (req->rq_reqmsg->bufcount > offset + 3) {
448 r->ur_ea3data = lustre_msg_buf(req->rq_reqmsg, offset + 3, 0);
449 if (r->ur_ea3data == NULL)
452 r->ur_ea3datalen = req->rq_reqmsg->buflens[offset + 3];
458 static int mds_create_unpack(struct ptlrpc_request *req, int offset,
459 struct mds_update_record *r)
461 struct mds_rec_create *rec;
464 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
465 lustre_swab_mds_rec_create);
469 r->ur_id1 = &rec->cr_id;
470 r->ur_id2 = &rec->cr_replayid;
471 r->ur_mode = rec->cr_mode;
472 r->ur_rdev = rec->cr_rdev;
473 r->ur_time = rec->cr_time;
474 r->ur_flags = rec->cr_flags;
476 LASSERT_REQSWAB (req, offset + 1);
477 r->ur_name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
478 if (r->ur_name == NULL)
480 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
482 LASSERT_REQSWAB (req, offset + 2);
483 if (req->rq_reqmsg->bufcount > offset + 2) {
484 if (S_ISLNK(r->ur_mode)) {
485 r->ur_tgt = lustre_msg_string(req->rq_reqmsg,
487 if (r->ur_tgt == NULL)
489 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
490 } else if (S_ISDIR(r->ur_mode) ) {
491 /* Stripe info for mkdir - just a 16bit integer */
492 if (req->rq_reqmsg->buflens[offset + 2] != 2) {
493 CERROR("mkdir stripe info does not match "
494 "expected size %d vs 2\n",
495 req->rq_reqmsg->buflens[offset + 2]);
498 r->ur_eadata = lustre_swab_buf (req->rq_reqmsg,
499 offset + 2, 2, __swab16s);
500 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
501 } else if (S_ISREG(r->ur_mode)){
502 r->ur_eadata = lustre_msg_buf (req->rq_reqmsg,
504 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
506 /* Hm, no other users so far? */
513 static int mds_link_unpack(struct ptlrpc_request *req, int offset,
514 struct mds_update_record *r)
516 struct mds_rec_link *rec;
519 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
520 lustre_swab_mds_rec_link);
524 r->ur_id1 = &rec->lk_id1;
525 r->ur_id2 = &rec->lk_id2;
526 r->ur_time = rec->lk_time;
528 LASSERT_REQSWAB (req, offset + 1);
529 r->ur_name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
530 if (r->ur_name == NULL)
532 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
536 static int mds_unlink_unpack(struct ptlrpc_request *req, int offset,
537 struct mds_update_record *r)
539 struct mds_rec_unlink *rec;
542 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
543 lustre_swab_mds_rec_unlink);
547 r->ur_mode = rec->ul_mode;
548 r->ur_id1 = &rec->ul_id1;
549 r->ur_id2 = &rec->ul_id2;
550 r->ur_time = rec->ul_time;
552 LASSERT_REQSWAB (req, offset + 1);
553 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
554 if (r->ur_name == NULL)
556 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
560 static int mds_rename_unpack(struct ptlrpc_request *req, int offset,
561 struct mds_update_record *r)
563 struct mds_rec_rename *rec;
566 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
567 lustre_swab_mds_rec_rename);
571 r->ur_id1 = &rec->rn_id1;
572 r->ur_id2 = &rec->rn_id2;
573 r->ur_time = rec->rn_time;
575 LASSERT_REQSWAB (req, offset + 1);
576 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
577 if (r->ur_name == NULL)
579 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
581 LASSERT_REQSWAB (req, offset + 2);
582 r->ur_tgt = lustre_msg_string(req->rq_reqmsg, offset + 2, 0);
583 if (r->ur_tgt == NULL)
585 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
589 static int mds_open_unpack(struct ptlrpc_request *req, int offset,
590 struct mds_update_record *r)
592 struct mds_rec_create *rec;
595 rec = lustre_swab_reqbuf (req, offset, sizeof (*rec),
596 lustre_swab_mds_rec_create);
600 r->ur_id1 = &rec->cr_id;
601 r->ur_id2 = &rec->cr_replayid;
602 r->ur_mode = rec->cr_mode;
603 r->ur_rdev = rec->cr_rdev;
604 r->ur_time = rec->cr_time;
605 r->ur_flags = rec->cr_flags;
607 LASSERT_REQSWAB (req, offset + 1);
608 r->ur_name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
609 if (r->ur_name == NULL)
611 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
613 LASSERT_REQSWAB (req, offset + 2);
615 if (req->rq_reqmsg->bufcount > offset + 2) {
616 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
617 if (r->ur_eadata == NULL)
619 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
622 if (rec->cr_flags & MDS_OPEN_HAS_KEY) {
623 LASSERT(req->rq_reqmsg->bufcount > offset + 3);
624 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 3, 0);
625 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 3];
630 typedef int (*update_unpacker)(struct ptlrpc_request *req, int offset,
631 struct mds_update_record *r);
633 static update_unpacker mds_unpackers[REINT_MAX + 1] = {
634 [REINT_SETATTR] mds_setattr_unpack,
635 [REINT_CREATE] mds_create_unpack,
636 [REINT_LINK] mds_link_unpack,
637 [REINT_UNLINK] mds_unlink_unpack,
638 [REINT_RENAME] mds_rename_unpack,
639 [REINT_OPEN] mds_open_unpack,
642 int mds_update_unpack(struct ptlrpc_request *req, int offset,
643 struct mds_update_record *rec)
651 * NB don't lustre_swab_reqbuf() here. We're just taking a peek and we
652 * want to leave it to the specific unpacker once we've identified the
655 opcodep = lustre_msg_buf (req->rq_reqmsg, offset, sizeof(*opcodep));
660 if (lustre_msg_swabbed (req->rq_reqmsg))
663 if (opcode > REINT_MAX ||
664 mds_unpackers[opcode] == NULL) {
665 CERROR ("Unexpected opcode %d\n", opcode);
671 rec->ur_opcode = opcode;
673 rc = mds_unpackers[opcode](req, offset, rec);
676 rec->ur_fsuid = req->rq_uid;
682 * here we take simple rule: once uid/fsuid is root, we also squash
683 * the gid/fsgid, don't care setuid/setgid attributes.
686 int mds_squash_root(struct mds_obd *mds, struct mds_req_sec_desc *rsd,
689 if (!mds->mds_squash_uid || *peernid == mds->mds_nosquash_nid)
692 if (rsd->rsd_uid && rsd->rsd_fsuid)
695 CDEBUG(D_SEC, "squash req from "LPX64":"
696 "(%u:%u-%u:%u/%x)=>(%u:%u-%u:%u/%x)\n", *peernid,
697 rsd->rsd_uid, rsd->rsd_gid,
698 rsd->rsd_fsuid, rsd->rsd_fsgid, rsd->rsd_cap,
699 rsd->rsd_uid ? rsd->rsd_uid : mds->mds_squash_uid,
700 rsd->rsd_uid ? rsd->rsd_gid : mds->mds_squash_gid,
701 rsd->rsd_fsuid ? rsd->rsd_fsuid : mds->mds_squash_uid,
702 rsd->rsd_fsuid ? rsd->rsd_fsgid : mds->mds_squash_gid,
703 rsd->rsd_cap & ~CAP_FS_MASK);
705 if (rsd->rsd_uid == 0) {
706 rsd->rsd_uid = mds->mds_squash_uid;
707 rsd->rsd_gid = mds->mds_squash_gid;
709 if (rsd->rsd_fsuid == 0) {
710 rsd->rsd_fsuid = mds->mds_squash_uid;
711 rsd->rsd_fsgid = mds->mds_squash_gid;
713 rsd->rsd_cap &= ~CAP_FS_MASK;
718 /********************************
719 * MDS uid/gid mapping handling *
720 ********************************/
723 struct mds_idmap_entry* idmap_alloc_entry(__u32 rmt_id, __u32 lcl_id)
725 struct mds_idmap_entry *e;
727 OBD_ALLOC(e, sizeof(*e));
731 INIT_LIST_HEAD(&e->rmt_hash);
732 INIT_LIST_HEAD(&e->lcl_hash);
733 atomic_set(&e->refcount, 1);
740 void idmap_free_entry(struct mds_idmap_entry *e)
742 if (!list_empty(&e->rmt_hash))
743 list_del(&e->rmt_hash);
744 if (!list_empty(&e->lcl_hash))
745 list_del(&e->lcl_hash);
746 OBD_FREE(e, sizeof(*e));
750 int idmap_insert_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
751 struct mds_idmap_entry *new, const char *warn_msg)
753 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(new->rmt_id)];
754 struct list_head *lcl_head = &lcl_hash[MDS_IDMAP_HASHFUNC(new->lcl_id)];
755 struct mds_idmap_entry *e;
757 list_for_each_entry(e, rmt_head, rmt_hash) {
758 if (e->rmt_id == new->rmt_id &&
759 e->lcl_id == new->lcl_id) {
760 atomic_inc(&e->refcount);
763 if (e->rmt_id == new->rmt_id && warn_msg)
764 CWARN("%s: rmt id %u already map to %u (new %u)\n",
765 warn_msg, e->rmt_id, e->lcl_id, new->lcl_id);
766 if (e->lcl_id == new->lcl_id && warn_msg)
767 CWARN("%s: lcl id %u already be mapped from %u "
768 "(new %u)\n", warn_msg,
769 e->lcl_id, e->rmt_id, new->rmt_id);
772 list_add_tail(rmt_head, &new->rmt_hash);
773 list_add_tail(lcl_head, &new->lcl_hash);
778 int idmap_remove_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
779 __u32 rmt_id, __u32 lcl_id)
781 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(rmt_id)];
782 struct mds_idmap_entry *e;
784 list_for_each_entry(e, rmt_head, rmt_hash) {
785 if (e->rmt_id == rmt_id && e->lcl_id == lcl_id) {
786 if (atomic_dec_and_test(&e->refcount)) {
787 list_del(&e->rmt_hash);
788 list_del(&e->lcl_hash);
789 OBD_FREE(e, sizeof(*e));
798 int mds_idmap_add(struct mds_idmap_table *tbl,
799 uid_t rmt_uid, uid_t lcl_uid,
800 gid_t rmt_gid, gid_t lcl_gid)
802 struct mds_idmap_entry *ue, *ge;
808 ue = idmap_alloc_entry(rmt_uid, lcl_uid);
811 ge = idmap_alloc_entry(rmt_gid, lcl_gid);
813 idmap_free_entry(ue);
817 spin_lock(&tbl->mit_lock);
819 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
820 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
821 ue, "UID mapping")) {
822 idmap_free_entry(ue);
825 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
826 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
827 ge, "GID mapping")) {
828 idmap_free_entry(ge);
831 spin_unlock(&tbl->mit_lock);
835 int mds_idmap_del(struct mds_idmap_table *tbl,
836 uid_t rmt_uid, uid_t lcl_uid,
837 gid_t rmt_gid, gid_t lcl_gid)
844 spin_lock(&tbl->mit_lock);
845 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
846 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
848 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
849 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
851 spin_unlock(&tbl->mit_lock);
856 __u32 idmap_lookup_id(struct list_head *hash, int reverse, __u32 id)
858 struct list_head *head = &hash[MDS_IDMAP_HASHFUNC(id)];
859 struct mds_idmap_entry *e;
862 list_for_each_entry(e, head, rmt_hash) {
866 return MDS_IDMAP_NOTFOUND;
868 list_for_each_entry(e, head, lcl_hash) {
872 return MDS_IDMAP_NOTFOUND;
876 int mds_idmap_lookup_uid(struct mds_idmap_table *tbl, int reverse, uid_t uid)
878 struct list_head *hash;
881 return MDS_IDMAP_NOTFOUND;
884 hash = tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX];
886 hash = tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX];
888 spin_lock(&tbl->mit_lock);
889 uid = idmap_lookup_id(hash, reverse, uid);
890 spin_unlock(&tbl->mit_lock);
895 int mds_idmap_lookup_gid(struct mds_idmap_table *tbl, int reverse, gid_t gid)
897 struct list_head *hash;
900 return MDS_IDMAP_NOTFOUND;
903 hash = tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX];
905 hash = tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX];
907 spin_lock(&tbl->mit_lock);
908 gid = idmap_lookup_id(hash, reverse, gid);
909 spin_unlock(&tbl->mit_lock);
914 struct mds_idmap_table *mds_idmap_alloc()
916 struct mds_idmap_table *tbl;
919 OBD_ALLOC(tbl, sizeof(*tbl));
923 spin_lock_init(&tbl->mit_lock);
924 for (i = 0; i < MDS_IDMAP_N_HASHES; i++)
925 for (j = 0; j < MDS_IDMAP_HASHSIZE; j++)
926 INIT_LIST_HEAD(&tbl->mit_idmaps[i][j]);
931 static void idmap_clear_rmt_hash(struct list_head *list)
933 struct mds_idmap_entry *e;
936 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
937 while (!list_empty(&list[i])) {
938 e = list_entry(list[i].next, struct mds_idmap_entry,
945 void mds_idmap_free(struct mds_idmap_table *tbl)
949 spin_lock(&tbl->mit_lock);
950 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX]);
951 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX]);
953 /* paranoid checking */
954 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
955 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX][i]));
956 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX][i]));
958 spin_unlock(&tbl->mit_lock);
960 OBD_FREE(tbl, sizeof(*tbl));
963 /*********************************
964 * helpers doing mapping for MDS *
965 *********************************/
968 * we allow remote setuid/setgid to an "authencated" one,
969 * this policy probably change later.
972 int mds_req_secdesc_do_map(struct mds_export_data *med,
973 struct mds_req_sec_desc *rsd)
975 struct mds_idmap_table *idmap = med->med_idmap;
979 uid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_uid);
980 if (uid == MDS_IDMAP_NOTFOUND) {
981 CERROR("can't find map for uid %u\n", rsd->rsd_uid);
985 if (rsd->rsd_uid == rsd->rsd_fsuid)
988 fsuid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_fsuid);
989 if (fsuid == MDS_IDMAP_NOTFOUND) {
990 CERROR("can't find map for fsuid %u\n", rsd->rsd_fsuid);
995 gid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_gid);
996 if (gid == MDS_IDMAP_NOTFOUND) {
997 CERROR("can't find map for gid %u\n", rsd->rsd_gid);
1001 if (rsd->rsd_gid == rsd->rsd_fsgid)
1004 fsgid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_fsgid);
1005 if (fsgid == MDS_IDMAP_NOTFOUND) {
1006 CERROR("can't find map for fsgid %u\n", rsd->rsd_fsgid);
1013 rsd->rsd_fsuid = fsuid;
1014 rsd->rsd_fsgid = fsgid;
1019 void mds_body_do_reverse_map(struct mds_export_data *med,
1020 struct mds_body *body)
1025 if (!med->med_remote)
1029 if (body->valid & OBD_MD_FLUID) {
1030 uid = mds_idmap_lookup_uid(med->med_idmap, 1, body->uid);
1031 if (uid == MDS_IDMAP_NOTFOUND) {
1032 uid = med->med_nllu;
1033 if (body->valid & OBD_MD_FLMODE) {
1034 body->mode = (body->mode & ~S_IRWXU) |
1035 ((body->mode & S_IRWXO) << 6);
1040 if (body->valid & OBD_MD_FLGID) {
1041 gid = mds_idmap_lookup_gid(med->med_idmap, 1, body->gid);
1042 if (gid == MDS_IDMAP_NOTFOUND) {
1043 gid = med->med_nllg;
1044 if (body->valid & OBD_MD_FLMODE) {
1045 body->mode = (body->mode & ~S_IRWXG) |
1046 ((body->mode & S_IRWXO) << 3);
1055 /**********************
1056 * MDS ucred handling *
1057 **********************/
1059 static inline void drop_ucred_ginfo(struct lvfs_ucred *ucred)
1061 if (ucred->luc_ginfo) {
1062 put_group_info(ucred->luc_ginfo);
1063 ucred->luc_ginfo = NULL;
1067 static inline void drop_ucred_lsd(struct lvfs_ucred *ucred)
1069 if (ucred->luc_lsd) {
1070 mds_put_lsd(ucred->luc_lsd);
1071 ucred->luc_lsd = NULL;
1076 * the heart of the uid/gid handling and security checking.
1078 * root could set any group_info if we allowed setgroups, while
1079 * normal user only could 'reduce' their group members -- which
1080 * is somewhat expensive.
1082 * authenticated as mds user (using mds service credential) could
1083 * bypass all checkings.
1085 int mds_init_ucred(struct lvfs_ucred *ucred,
1086 struct ptlrpc_request *req,
1087 struct mds_req_sec_desc *rsd)
1089 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1090 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
1091 struct lustre_sec_desc *lsd;
1092 ptl_nid_t peernid = req->rq_peer.peer_id.nid;
1093 struct group_info *gnew;
1094 unsigned int setuid, setgid, strong_sec, root_squashed;
1100 LASSERT(rsd->rsd_ngroups <= LUSTRE_MAX_GROUPS);
1102 if (SEC_FLAVOR_MAJOR(req->rq_req_secflvr) == PTLRPCS_FLVR_MAJOR_GSS &&
1103 (SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_AUTH ||
1104 SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_PRIV))
1109 LASSERT(!(req->rq_remote_realm && !strong_sec));
1111 if (strong_sec && req->rq_auth_uid == -1) {
1112 CWARN("user not authenticated, deny access\n");
1116 /* sanity check: if we use strong authentication, we expect the
1117 * uid which client claimed is true.
1118 * not apply to special mds user .
1120 if (!req->rq_auth_usr_mds && strong_sec) {
1121 if (!med->med_remote) {
1122 if (req->rq_auth_uid != rsd->rsd_uid) {
1123 CERROR("local client "LPU64": auth uid %u "
1124 "while client claim %u:%u/%u:%u\n",
1125 peernid, req->rq_auth_uid,
1126 rsd->rsd_uid, rsd->rsd_gid,
1127 rsd->rsd_fsuid, rsd->rsd_fsgid);
1131 if (req->rq_mapped_uid == MDS_IDMAP_NOTFOUND) {
1132 CWARN("no mapping found, deny\n");
1136 if (mds_req_secdesc_do_map(med, rsd))
1139 if (req->rq_mapped_uid != rsd->rsd_uid) {
1140 CERROR("remote client "LPU64": auth uid %u "
1141 "while client claim %u:%u/%u:%u\n",
1142 peernid, req->rq_auth_uid,
1143 rsd->rsd_uid, rsd->rsd_gid,
1144 rsd->rsd_fsuid, rsd->rsd_fsgid);
1150 /* now LSD come into play */
1151 ucred->luc_ginfo = NULL;
1152 ucred->luc_lsd = lsd = mds_get_lsd(rsd->rsd_uid);
1155 CERROR("Deny access without LSD: uid %d\n", rsd->rsd_uid);
1159 lsd_perms = mds_lsd_get_perms(lsd, med->med_remote, 0, peernid);
1161 /* check setuid/setgid permissions.
1162 * again not apply to special mds user.
1164 if (!req->rq_auth_usr_mds) {
1165 /* find out the setuid/setgid attempt */
1166 setuid = (rsd->rsd_uid != rsd->rsd_fsuid);
1167 setgid = (rsd->rsd_gid != rsd->rsd_fsgid ||
1168 rsd->rsd_gid != lsd->lsd_gid);
1170 /* check permission of setuid */
1171 if (setuid && !(lsd_perms & LSD_PERM_SETUID)) {
1172 CWARN("mds blocked setuid attempt (%u -> %u) "
1173 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_fsuid,
1178 /* check permission of setgid */
1179 if (setgid && !(lsd_perms & LSD_PERM_SETGID)) {
1180 CWARN("mds blocked setgid attempt (%u:%u/%u:%u -> %u) "
1181 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_gid,
1182 rsd->rsd_fsuid, rsd->rsd_fsgid, lsd->lsd_gid,
1188 root_squashed = mds_squash_root(mds, rsd, &peernid);
1190 /* remove privilege for non-root user */
1192 rsd->rsd_cap &= ~CAP_FS_MASK;
1194 /* by now every fields other than groups in rsd have been granted */
1195 ucred->luc_nid = peernid;
1196 ucred->luc_uid = rsd->rsd_uid;
1197 ucred->luc_gid = rsd->rsd_gid;
1198 ucred->luc_fsuid = rsd->rsd_fsuid;
1199 ucred->luc_fsgid = rsd->rsd_fsgid;
1200 ucred->luc_cap = rsd->rsd_cap;
1202 /* don't use any supplementary group if we squashed root.
1203 * XXX The exact behavior of root_squash is not defined, we just
1204 * keep the reminder here */
1208 /* install groups from LSD */
1209 if (lsd->lsd_ginfo) {
1210 ucred->luc_ginfo = lsd->lsd_ginfo;
1211 get_group_info(ucred->luc_ginfo);
1214 /* everything is done if we don't allow setgroups, or it is
1215 * from remote client (which implies forced to be no-setgroups).
1217 * Note: remote user's supplementary groups sent along the request
1218 * (if any) are all ignored, but we make the mapped local user's
1219 * supplementary groups take effect.
1221 if (med->med_remote || !(lsd_perms & LSD_PERM_SETGRP))
1224 /* root could set any groups as he want (if allowed), normal
1225 * users only could reduce his group array.
1227 if (ucred->luc_uid == 0) {
1228 drop_ucred_ginfo(ucred);
1230 if (rsd->rsd_ngroups == 0)
1233 gnew = groups_alloc(rsd->rsd_ngroups);
1235 CERROR("out of memory\n");
1236 drop_ucred_lsd(ucred);
1239 groups_from_buffer(gnew, rsd->rsd_groups);
1240 groups_sort(gnew); /* don't rely on client doing this */
1242 ucred->luc_ginfo = gnew;
1244 __u32 set = 0, cur = 0;
1245 struct group_info *ginfo = ucred->luc_ginfo;
1250 /* Note: freeing a group_info count on 'nblocks' instead of
1251 * 'ngroups', thus we can safely alloc enough buffer and reduce
1252 * and ngroups number later.
1254 gnew = groups_alloc(rsd->rsd_ngroups);
1256 CERROR("out of memory\n");
1257 drop_ucred_ginfo(ucred);
1258 drop_ucred_lsd(ucred);
1262 while (cur < rsd->rsd_ngroups) {
1263 if (groups_search(ginfo, rsd->rsd_groups[cur])) {
1264 GROUP_AT(gnew, set) = rsd->rsd_groups[cur];
1269 gnew->ngroups = set;
1271 put_group_info(ucred->luc_ginfo);
1272 ucred->luc_ginfo = gnew;
1277 void mds_exit_ucred(struct lvfs_ucred *ucred)
1280 drop_ucred_ginfo(ucred);
1281 drop_ucred_lsd(ucred);