1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #define DEBUG_SUBSYSTEM S_MDS
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/errno.h>
31 #include <linux/version.h>
32 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
33 # include <linux/locks.h> // for wait_on_buffer
35 # include <linux/buffer_head.h> // for wait_on_buffer
37 #include <linux/unistd.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46 #include <asm/segment.h>
47 #include <linux/random.h>
49 #include <linux/obd_support.h>
50 #include <linux/lustre_lib.h>
51 #include <linux/lustre_sec.h>
52 #include <linux/lustre_ucache.h>
53 #include <linux/lustre_gs.h>
54 #include <linux/lustre_fsfilt.h>
55 #include "mds_internal.h"
57 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
58 struct group_info *groups_alloc(int ngroups)
60 struct group_info *ginfo;
62 LASSERT(ngroups <= NGROUPS_SMALL);
64 OBD_ALLOC(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
67 ginfo->ngroups = ngroups;
69 ginfo->blocks[0] = ginfo->small_block;
70 atomic_set(&ginfo->usage, 1);
75 void groups_free(struct group_info *ginfo)
77 LASSERT(ginfo->ngroups <= NGROUPS_SMALL);
78 LASSERT(ginfo->nblocks == 1);
79 LASSERT(ginfo->blocks[0] == ginfo->small_block);
81 OBD_FREE(ginfo, sizeof(*ginfo) + 1 * sizeof(gid_t *));
84 /* for 2.4 the group number is small, so simply search the
87 int groups_search(struct group_info *ginfo, gid_t grp)
94 for (i = 0; i < ginfo->ngroups; i++)
95 if (GROUP_AT(ginfo, i) == grp)
102 void groups_sort(struct group_info *ginfo)
104 int base, max, stride;
105 int gidsetsize = ginfo->ngroups;
107 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
112 max = gidsetsize - stride;
113 for (base = 0; base < max; base++) {
115 int right = left + stride;
116 gid_t tmp = GROUP_AT(ginfo, right);
118 while (left >= 0 && GROUP_AT(ginfo, left) > tmp) {
119 GROUP_AT(ginfo, right) =
120 GROUP_AT(ginfo, left);
124 GROUP_AT(ginfo, right) = tmp;
130 int groups_search(struct group_info *ginfo, gid_t grp)
138 right = ginfo->ngroups;
139 while (left < right) {
140 int mid = (left + right) / 2;
141 int cmp = grp - GROUP_AT(ginfo, mid);
153 void groups_from_buffer(struct group_info *ginfo, __u32 *gids)
155 int i, ngroups = ginfo->ngroups;
157 for (i = 0; i < ginfo->nblocks; i++) {
158 int count = min(NGROUPS_PER_BLOCK, ngroups);
160 memcpy(ginfo->blocks[i], gids, count * sizeof(__u32));
161 gids += NGROUPS_PER_BLOCK;
166 void mds_pack_dentry2id(struct obd_device *obd,
167 struct lustre_id *id,
168 struct dentry *dentry,
171 id_ino(id) = dentry->d_inum;
172 id_gen(id) = dentry->d_generation;
175 id_fid(id) = dentry->d_fid;
176 id_group(id) = dentry->d_mdsnum;
180 void mds_pack_dentry2body(struct obd_device *obd,
182 struct dentry *dentry,
185 b->valid |= OBD_MD_FLID | OBD_MD_FLGENER |
189 b->valid |= OBD_MD_FID;
191 mds_pack_dentry2id(obd, &b->id1, dentry, fid);
194 int mds_pack_inode2id(struct obd_device *obd,
195 struct lustre_id *id,
203 /* we have to avoid deadlock. */
204 if (!down_trylock(&inode->i_sem)) {
205 rc = mds_read_inode_sid(obd, inode, id);
208 rc = mds_read_inode_sid(obd, inode, id);
213 id_ino(id) = inode->i_ino;
214 id_gen(id) = inode->i_generation;
215 id_type(id) = (S_IFMT & inode->i_mode);
220 void mds_inode2id(struct obd_device *obd, struct lustre_id *id,
221 struct inode *inode, __u64 fid)
223 struct mds_obd *mds = &obd->u.mds;
226 LASSERT(inode != NULL);
231 id_ino(id) = inode->i_ino;
232 id_group(id) = mds->mds_num;
233 id_gen(id) = inode->i_generation;
234 id_type(id) = (S_IFMT & inode->i_mode);
239 int mds_pack_gskey(struct obd_device *obd, struct lustre_msg *repmsg,
240 int *offset, struct mds_body *body, struct inode *inode)
242 struct crypto_key_md *md_key;
243 struct crypto_key *ckey;
244 __u32 buflen, *sizep;
249 sizep = lustre_msg_buf(repmsg, (*offset)++, 4);
251 CERROR("can't locate returned ckey size buf\n");
254 *sizep = cpu_to_le32(sizeof(*ckey));
256 OBD_ALLOC(md_key, sizeof(*md_key));
258 buflen = repmsg->buflens[*offset];
259 buf = lustre_msg_buf(repmsg, (*offset)++, buflen);
261 size = fsfilt_get_md(obd, inode, md_key, sizeof(*md_key),
265 CERROR("Can not get gskey from MDS ino %lu rc %d\n",
267 GOTO(out, rc = size);
269 if (le32_to_cpu(md_key->md_magic) != MD_KEY_MAGIC) {
270 CDEBUG(D_INFO, "given match %x != magic %x\n",
271 md_key->md_magic, MD_KEY_MAGIC);
275 CDEBUG(D_INFO, "get key %s mac %s for ino %lu size %d \n",
276 md_key->md_ck.ck_key, md_key->md_ck.ck_mac, inode->i_ino, size);
277 ckey=(struct crypto_key*)buf;
279 memcpy(ckey, &md_key->md_ck, sizeof(*ckey));
280 body->valid |= OBD_MD_FLKEY;
282 OBD_FREE(md_key, sizeof(*md_key));
286 static int mds_get_gskey(struct inode *inode, struct crypto_key *ckey)
289 /*tmp create gs key here*/
290 LASSERT(ckey->ck_type == MKS_TYPE);
291 get_random_bytes(ckey->ck_key, KEY_SIZE);
295 int mds_set_gskey(struct obd_device *obd, void *handle,
296 struct inode *inode, void *key, int key_len,
299 struct crypto_key_md *md_key = NULL;
300 struct crypto_key *ckey = (struct crypto_key *)key;
307 LASSERT(ckey->ck_type == MKS_TYPE || ckey->ck_type == GKS_TYPE);
309 OBD_ALLOC(md_key, sizeof(*md_key));
310 if (ckey->ck_type == MKS_TYPE) {
311 mds_get_gskey(inode, ckey);
314 rc = fsfilt_get_md(obd, inode, md_key, sizeof(*md_key),
318 LASSERT(le32_to_cpu(md_key->md_magic) == MD_KEY_MAGIC ||
319 md_key->md_magic == 0);
321 if (le32_to_cpu(md_key->md_magic) == MD_KEY_MAGIC) {
322 CDEBUG(D_INFO, "reset key %s mac %s", md_key->md_ck.ck_mac,
323 md_key->md_ck.ck_key);
326 md_key->md_magic = cpu_to_le32(MD_KEY_MAGIC);
327 /*get key and mac from request buffer*/
328 if (valid & ATTR_MAC) {
329 memcpy(md_key->md_ck.ck_mac, ckey->ck_mac, MAC_SIZE);
330 CDEBUG(D_INFO, "set mac %s for ino %lu \n",
331 md_key->md_ck.ck_mac, inode->i_ino);
333 if (valid & ATTR_KEY) {
334 memcpy(md_key->md_ck.ck_key, ckey->ck_key, KEY_SIZE);
335 CDEBUG(D_INFO, "set key %s for ino %lu \n",
336 md_key->md_ck.ck_key, inode->i_ino);
338 rc = fsfilt_set_md(obd, inode, handle, md_key, sizeof(*md_key), EA_KEY);
341 OBD_FREE(md_key, sizeof(*md_key));
345 int mds_set_crypto_type(struct obd_device *obd, void *val, __u32 vallen)
347 struct mds_obd *mds = &obd->u.mds;
349 if (vallen >= strlen("mks") &&
350 memcmp(val, "mks", vallen) == 0) {
351 mds->mds_crypto_type = MKS_TYPE;
352 CDEBUG(D_IOCTL, "mks type\n");
354 if (vallen >= strlen("gks") &&
355 memcmp(val, "gks", vallen) == 0) {
356 mds->mds_crypto_type = GKS_TYPE;
357 CDEBUG(D_IOCTL, "gks type \n");
362 /* Note that we can copy all of the fields, just some will not be "valid" */
363 void mds_pack_inode2body(struct obd_device *obd, struct mds_body *b,
364 struct inode *inode, int fid)
366 b->valid |= OBD_MD_FLID | OBD_MD_FLCTIME | OBD_MD_FLUID |
367 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLTYPE |
368 OBD_MD_FLMODE | OBD_MD_FLNLINK | OBD_MD_FLGENER |
369 OBD_MD_FLATIME | OBD_MD_FLMTIME; /* bug 2020 */
371 if (!S_ISREG(inode->i_mode)) {
372 b->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
373 OBD_MD_FLATIME | OBD_MD_FLMTIME |
376 b->atime = LTIME_S(inode->i_atime);
377 b->mtime = LTIME_S(inode->i_mtime);
378 b->ctime = LTIME_S(inode->i_ctime);
379 b->mode = inode->i_mode;
380 b->size = inode->i_size;
381 b->blocks = inode->i_blocks;
382 b->uid = inode->i_uid;
383 b->gid = inode->i_gid;
384 b->flags = inode->i_flags;
385 b->rdev = inode->i_rdev;
387 /* Return the correct link count for orphan inodes */
388 if (mds_inode_is_orphan(inode)) {
390 } else if (S_ISDIR(inode->i_mode)) {
393 b->nlink = inode->i_nlink;
397 b->valid |= OBD_MD_FID;
399 mds_pack_inode2id(obd, &b->id1, inode, fid);
403 static int mds_setattr_unpack(struct ptlrpc_request *req, int offset,
404 struct mds_update_record *r)
406 struct iattr *attr = &r->ur_iattr;
407 struct mds_rec_setattr *rec;
410 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
411 lustre_swab_mds_rec_setattr);
415 r->ur_id1 = &rec->sa_id;
416 r->ur_flags = rec->sa_flags;
417 attr->ia_valid = rec->sa_valid;
418 attr->ia_mode = rec->sa_mode;
419 attr->ia_uid = rec->sa_uid;
420 attr->ia_gid = rec->sa_gid;
421 attr->ia_size = rec->sa_size;
422 LTIME_S(attr->ia_atime) = rec->sa_atime;
423 LTIME_S(attr->ia_mtime) = rec->sa_mtime;
424 LTIME_S(attr->ia_ctime) = rec->sa_ctime;
425 attr->ia_attr_flags = rec->sa_attr_flags;
427 LASSERT_REQSWAB(req, offset + 1);
428 if (req->rq_reqmsg->bufcount > offset + 1) {
429 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg,
431 if (r->ur_eadata == NULL)
433 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 1];
436 if (req->rq_reqmsg->bufcount > offset + 2) {
437 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
438 if (r->ur_ea2data == NULL)
441 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 2];
444 if (req->rq_reqmsg->bufcount > offset + 3) {
445 r->ur_ea3data = lustre_msg_buf(req->rq_reqmsg, offset + 3, 0);
446 if (r->ur_ea3data == NULL)
449 r->ur_ea3datalen = req->rq_reqmsg->buflens[offset + 3];
455 static int mds_create_unpack(struct ptlrpc_request *req, int offset,
456 struct mds_update_record *r)
458 struct mds_rec_create *rec;
461 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
462 lustre_swab_mds_rec_create);
466 r->ur_id1 = &rec->cr_id;
467 r->ur_id2 = &rec->cr_replayid;
468 r->ur_mode = rec->cr_mode;
469 r->ur_rdev = rec->cr_rdev;
470 r->ur_time = rec->cr_time;
471 r->ur_flags = rec->cr_flags;
473 LASSERT_REQSWAB(req, offset + 1);
474 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
475 if (r->ur_name == NULL)
477 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
479 LASSERT_REQSWAB(req, offset + 2);
480 if (req->rq_reqmsg->bufcount > offset + 2) {
481 if (S_ISLNK(r->ur_mode)) {
482 r->ur_tgt = lustre_msg_string(req->rq_reqmsg,
484 if (r->ur_tgt == NULL)
486 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
487 } else if (S_ISDIR(r->ur_mode) ) {
488 /* Stripe info for mkdir - just a 16bit integer */
489 if (req->rq_reqmsg->buflens[offset + 2] != 2) {
490 CERROR("mkdir stripe info does not match "
491 "expected size %d vs 2\n",
492 req->rq_reqmsg->buflens[offset + 2]);
495 r->ur_eadata = lustre_swab_buf(req->rq_reqmsg,
498 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
499 } else if (S_ISREG(r->ur_mode)){
500 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg,
502 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
504 /* Hm, no other users so far? */
511 static int mds_link_unpack(struct ptlrpc_request *req, int offset,
512 struct mds_update_record *r)
514 struct mds_rec_link *rec;
517 rec = lustre_swab_reqbuf(req, offset, sizeof(*rec),
518 lustre_swab_mds_rec_link);
522 r->ur_id1 = &rec->lk_id1;
523 r->ur_id2 = &rec->lk_id2;
524 r->ur_time = rec->lk_time;
525 r->ur_flags = rec->lk_flags;
527 LASSERT_REQSWAB(req, offset + 1);
528 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
529 if (r->ur_name == NULL)
531 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
535 static int mds_unlink_unpack(struct ptlrpc_request *req, int offset,
536 struct mds_update_record *r)
538 struct mds_rec_unlink *rec;
541 rec = lustre_swab_reqbuf(req, offset, sizeof (*rec),
542 lustre_swab_mds_rec_unlink);
546 r->ur_mode = rec->ul_mode;
547 r->ur_id1 = &rec->ul_id1;
548 r->ur_id2 = &rec->ul_id2;
549 r->ur_time = rec->ul_time;
550 r->ur_flags = rec->ul_flags;
552 LASSERT_REQSWAB(req, offset + 1);
553 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
554 if (r->ur_name == NULL)
556 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
560 static int mds_rename_unpack(struct ptlrpc_request *req, int offset,
561 struct mds_update_record *r)
563 struct mds_rec_rename *rec;
566 rec = lustre_swab_reqbuf(req, offset, sizeof (*rec),
567 lustre_swab_mds_rec_rename);
571 r->ur_id1 = &rec->rn_id1;
572 r->ur_id2 = &rec->rn_id2;
573 r->ur_time = rec->rn_time;
574 r->ur_flags = rec->rn_flags;
576 LASSERT_REQSWAB(req, offset + 1);
577 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
578 if (r->ur_name == NULL)
580 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
582 LASSERT_REQSWAB(req, offset + 2);
583 r->ur_tgt = lustre_msg_string(req->rq_reqmsg, offset + 2, 0);
584 if (r->ur_tgt == NULL)
586 r->ur_tgtlen = req->rq_reqmsg->buflens[offset + 2];
590 static int mds_open_unpack(struct ptlrpc_request *req, int offset,
591 struct mds_update_record *r)
593 struct mds_rec_create *rec;
596 rec = lustre_swab_reqbuf(req, offset, sizeof (*rec),
597 lustre_swab_mds_rec_create);
601 r->ur_id1 = &rec->cr_id;
602 r->ur_id2 = &rec->cr_replayid;
603 r->ur_mode = rec->cr_mode;
604 r->ur_rdev = rec->cr_rdev;
605 r->ur_time = rec->cr_time;
606 r->ur_flags = rec->cr_flags;
608 LASSERT_REQSWAB(req, offset + 1);
609 r->ur_name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
611 if (r->ur_name == NULL)
613 r->ur_namelen = req->rq_reqmsg->buflens[offset + 1];
615 LASSERT_REQSWAB(req, offset + 2);
616 if (req->rq_reqmsg->bufcount > offset + 2) {
617 r->ur_eadata = lustre_msg_buf(req->rq_reqmsg, offset + 2, 0);
618 if (r->ur_eadata == NULL)
620 r->ur_eadatalen = req->rq_reqmsg->buflens[offset + 2];
623 if (rec->cr_flags & MDS_OPEN_HAS_KEY) {
624 LASSERT(req->rq_reqmsg->bufcount > offset + 3);
625 r->ur_ea2data = lustre_msg_buf(req->rq_reqmsg, offset + 3, 0);
626 r->ur_ea2datalen = req->rq_reqmsg->buflens[offset + 3];
631 typedef int (*update_unpacker)(struct ptlrpc_request *req, int offset,
632 struct mds_update_record *r);
634 static update_unpacker mds_unpackers[REINT_MAX + 1] = {
635 [REINT_SETATTR] mds_setattr_unpack,
636 [REINT_CREATE] mds_create_unpack,
637 [REINT_LINK] mds_link_unpack,
638 [REINT_UNLINK] mds_unlink_unpack,
639 [REINT_RENAME] mds_rename_unpack,
640 [REINT_OPEN] mds_open_unpack,
643 int mds_update_unpack(struct ptlrpc_request *req, int offset,
644 struct mds_update_record *rec)
651 /* NB don't lustre_swab_reqbuf() here. We're just taking a peek and we
652 * want to leave it to the specific unpacker once we've identified the
654 opcodep = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*opcodep));
659 if (lustre_msg_swabbed(req->rq_reqmsg))
662 if (opcode > REINT_MAX ||
663 mds_unpackers[opcode] == NULL) {
664 CERROR("Unexpected opcode %d\n", opcode);
670 rec->ur_opcode = opcode;
672 rc = mds_unpackers[opcode](req, offset, rec);
675 rec->ur_fsuid = req->rq_uid;
681 * here we take simple rule: once uid/fsuid is root, we also squash
682 * the gid/fsgid, don't care setuid/setgid attributes.
685 int mds_squash_root(struct mds_obd *mds, struct mds_req_sec_desc *rsd,
688 if (!mds->mds_squash_uid || *peernid == mds->mds_nosquash_nid)
691 if (rsd->rsd_uid && rsd->rsd_fsuid)
694 CDEBUG(D_SEC, "squash req from "LPX64":"
695 "(%u:%u-%u:%u/%x)=>(%u:%u-%u:%u/%x)\n", *peernid,
696 rsd->rsd_uid, rsd->rsd_gid,
697 rsd->rsd_fsuid, rsd->rsd_fsgid, rsd->rsd_cap,
698 rsd->rsd_uid ? rsd->rsd_uid : mds->mds_squash_uid,
699 rsd->rsd_uid ? rsd->rsd_gid : mds->mds_squash_gid,
700 rsd->rsd_fsuid ? rsd->rsd_fsuid : mds->mds_squash_uid,
701 rsd->rsd_fsuid ? rsd->rsd_fsgid : mds->mds_squash_gid,
702 rsd->rsd_cap & ~CAP_FS_MASK);
704 if (rsd->rsd_uid == 0) {
705 rsd->rsd_uid = mds->mds_squash_uid;
706 rsd->rsd_gid = mds->mds_squash_gid;
708 if (rsd->rsd_fsuid == 0) {
709 rsd->rsd_fsuid = mds->mds_squash_uid;
710 rsd->rsd_fsgid = mds->mds_squash_gid;
712 rsd->rsd_cap &= ~CAP_FS_MASK;
717 /********************************
718 * MDS uid/gid mapping handling *
719 ********************************/
722 struct mds_idmap_entry* idmap_alloc_entry(__u32 rmt_id, __u32 lcl_id)
724 struct mds_idmap_entry *e;
726 OBD_ALLOC(e, sizeof(*e));
730 INIT_LIST_HEAD(&e->rmt_hash);
731 INIT_LIST_HEAD(&e->lcl_hash);
732 atomic_set(&e->refcount, 1);
739 void idmap_free_entry(struct mds_idmap_entry *e)
741 if (!list_empty(&e->rmt_hash))
742 list_del(&e->rmt_hash);
743 if (!list_empty(&e->lcl_hash))
744 list_del(&e->lcl_hash);
745 OBD_FREE(e, sizeof(*e));
749 int idmap_insert_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
750 struct mds_idmap_entry *new, const char *warn_msg)
752 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(new->rmt_id)];
753 struct list_head *lcl_head = &lcl_hash[MDS_IDMAP_HASHFUNC(new->lcl_id)];
754 struct mds_idmap_entry *e;
756 list_for_each_entry(e, rmt_head, rmt_hash) {
757 if (e->rmt_id == new->rmt_id &&
758 e->lcl_id == new->lcl_id) {
759 atomic_inc(&e->refcount);
762 if (e->rmt_id == new->rmt_id && warn_msg)
763 CWARN("%s: rmt id %u already map to %u (new %u)\n",
764 warn_msg, e->rmt_id, e->lcl_id, new->lcl_id);
765 if (e->lcl_id == new->lcl_id && warn_msg)
766 CWARN("%s: lcl id %u already be mapped from %u "
767 "(new %u)\n", warn_msg,
768 e->lcl_id, e->rmt_id, new->rmt_id);
771 list_add_tail(rmt_head, &new->rmt_hash);
772 list_add_tail(lcl_head, &new->lcl_hash);
777 int idmap_remove_entry(struct list_head *rmt_hash, struct list_head *lcl_hash,
778 __u32 rmt_id, __u32 lcl_id)
780 struct list_head *rmt_head = &rmt_hash[MDS_IDMAP_HASHFUNC(rmt_id)];
781 struct mds_idmap_entry *e;
783 list_for_each_entry(e, rmt_head, rmt_hash) {
784 if (e->rmt_id == rmt_id && e->lcl_id == lcl_id) {
785 if (atomic_dec_and_test(&e->refcount)) {
786 list_del(&e->rmt_hash);
787 list_del(&e->lcl_hash);
788 OBD_FREE(e, sizeof(*e));
797 int mds_idmap_add(struct mds_idmap_table *tbl,
798 uid_t rmt_uid, uid_t lcl_uid,
799 gid_t rmt_gid, gid_t lcl_gid)
801 struct mds_idmap_entry *ue, *ge;
807 ue = idmap_alloc_entry(rmt_uid, lcl_uid);
810 ge = idmap_alloc_entry(rmt_gid, lcl_gid);
812 idmap_free_entry(ue);
816 spin_lock(&tbl->mit_lock);
818 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
819 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
820 ue, "UID mapping")) {
821 idmap_free_entry(ue);
824 if (idmap_insert_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
825 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
826 ge, "GID mapping")) {
827 idmap_free_entry(ge);
830 spin_unlock(&tbl->mit_lock);
834 int mds_idmap_del(struct mds_idmap_table *tbl,
835 uid_t rmt_uid, uid_t lcl_uid,
836 gid_t rmt_gid, gid_t lcl_gid)
843 spin_lock(&tbl->mit_lock);
844 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX],
845 tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX],
847 idmap_remove_entry(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX],
848 tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX],
850 spin_unlock(&tbl->mit_lock);
855 __u32 idmap_lookup_id(struct list_head *hash, int reverse, __u32 id)
857 struct list_head *head = &hash[MDS_IDMAP_HASHFUNC(id)];
858 struct mds_idmap_entry *e;
861 list_for_each_entry(e, head, rmt_hash) {
865 return MDS_IDMAP_NOTFOUND;
867 list_for_each_entry(e, head, lcl_hash) {
871 return MDS_IDMAP_NOTFOUND;
875 int mds_idmap_lookup_uid(struct mds_idmap_table *tbl, int reverse, uid_t uid)
877 struct list_head *hash;
880 return MDS_IDMAP_NOTFOUND;
883 hash = tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX];
885 hash = tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX];
887 spin_lock(&tbl->mit_lock);
888 uid = idmap_lookup_id(hash, reverse, uid);
889 spin_unlock(&tbl->mit_lock);
894 int mds_idmap_lookup_gid(struct mds_idmap_table *tbl, int reverse, gid_t gid)
896 struct list_head *hash;
899 return MDS_IDMAP_NOTFOUND;
902 hash = tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX];
904 hash = tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX];
906 spin_lock(&tbl->mit_lock);
907 gid = idmap_lookup_id(hash, reverse, gid);
908 spin_unlock(&tbl->mit_lock);
913 struct mds_idmap_table *mds_idmap_alloc()
915 struct mds_idmap_table *tbl;
918 OBD_ALLOC(tbl, sizeof(*tbl));
922 spin_lock_init(&tbl->mit_lock);
923 for (i = 0; i < MDS_IDMAP_N_HASHES; i++)
924 for (j = 0; j < MDS_IDMAP_HASHSIZE; j++)
925 INIT_LIST_HEAD(&tbl->mit_idmaps[i][j]);
930 static void idmap_clear_rmt_hash(struct list_head *list)
932 struct mds_idmap_entry *e;
935 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
936 while (!list_empty(&list[i])) {
937 e = list_entry(list[i].next, struct mds_idmap_entry,
944 void mds_idmap_free(struct mds_idmap_table *tbl)
948 spin_lock(&tbl->mit_lock);
949 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_UIDMAP_IDX]);
950 idmap_clear_rmt_hash(tbl->mit_idmaps[MDS_RMT_GIDMAP_IDX]);
952 /* paranoid checking */
953 for (i = 0; i < MDS_IDMAP_HASHSIZE; i++) {
954 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_UIDMAP_IDX][i]));
955 LASSERT(list_empty(&tbl->mit_idmaps[MDS_LCL_GIDMAP_IDX][i]));
957 spin_unlock(&tbl->mit_lock);
959 OBD_FREE(tbl, sizeof(*tbl));
962 /*********************************
963 * helpers doing mapping for MDS *
964 *********************************/
967 * we allow remote setuid/setgid to an "authencated" one,
968 * this policy probably change later.
971 int mds_req_secdesc_do_map(struct mds_export_data *med,
972 struct mds_req_sec_desc *rsd)
974 struct mds_idmap_table *idmap = med->med_idmap;
978 uid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_uid);
979 if (uid == MDS_IDMAP_NOTFOUND) {
980 CERROR("can't find map for uid %u\n", rsd->rsd_uid);
984 if (rsd->rsd_uid == rsd->rsd_fsuid)
987 fsuid = mds_idmap_lookup_uid(idmap, 0, rsd->rsd_fsuid);
988 if (fsuid == MDS_IDMAP_NOTFOUND) {
989 CERROR("can't find map for fsuid %u\n", rsd->rsd_fsuid);
994 gid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_gid);
995 if (gid == MDS_IDMAP_NOTFOUND) {
996 CERROR("can't find map for gid %u\n", rsd->rsd_gid);
1000 if (rsd->rsd_gid == rsd->rsd_fsgid)
1003 fsgid = mds_idmap_lookup_gid(idmap, 0, rsd->rsd_fsgid);
1004 if (fsgid == MDS_IDMAP_NOTFOUND) {
1005 CERROR("can't find map for fsgid %u\n", rsd->rsd_fsgid);
1012 rsd->rsd_fsuid = fsuid;
1013 rsd->rsd_fsgid = fsgid;
1018 void mds_body_do_reverse_map(struct mds_export_data *med,
1019 struct mds_body *body)
1024 if (!med->med_remote)
1028 if (body->valid & OBD_MD_FLUID) {
1029 uid = mds_idmap_lookup_uid(med->med_idmap, 1, body->uid);
1030 if (uid == MDS_IDMAP_NOTFOUND) {
1031 uid = med->med_nllu;
1032 if (body->valid & OBD_MD_FLMODE) {
1033 body->mode = (body->mode & ~S_IRWXU) |
1034 ((body->mode & S_IRWXO) << 6);
1039 if (body->valid & OBD_MD_FLGID) {
1040 gid = mds_idmap_lookup_gid(med->med_idmap, 1, body->gid);
1041 if (gid == MDS_IDMAP_NOTFOUND) {
1042 gid = med->med_nllg;
1043 if (body->valid & OBD_MD_FLMODE) {
1044 body->mode = (body->mode & ~S_IRWXG) |
1045 ((body->mode & S_IRWXO) << 3);
1054 /**********************
1055 * MDS ucred handling *
1056 **********************/
1058 static inline void drop_ucred_ginfo(struct lvfs_ucred *ucred)
1060 if (ucred->luc_ginfo) {
1061 put_group_info(ucred->luc_ginfo);
1062 ucred->luc_ginfo = NULL;
1066 static inline void drop_ucred_lsd(struct lvfs_ucred *ucred)
1068 if (ucred->luc_lsd) {
1069 mds_put_lsd(ucred->luc_lsd);
1070 ucred->luc_lsd = NULL;
1075 * the heart of the uid/gid handling and security checking.
1077 * root could set any group_info if we allowed setgroups, while
1078 * normal user only could 'reduce' their group members -- which
1079 * is somewhat expensive.
1081 * authenticated as mds user (using mds service credential) could
1082 * bypass all checkings.
1084 int mds_init_ucred(struct lvfs_ucred *ucred,
1085 struct ptlrpc_request *req,
1086 struct mds_req_sec_desc *rsd)
1088 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1089 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
1090 struct lustre_sec_desc *lsd;
1091 ptl_nid_t peernid = req->rq_peer.peer_id.nid;
1092 struct group_info *gnew;
1093 unsigned int setuid, setgid, strong_sec, root_squashed;
1099 LASSERT(rsd->rsd_ngroups <= LUSTRE_MAX_GROUPS);
1101 if (SEC_FLAVOR_MAJOR(req->rq_req_secflvr) == PTLRPCS_FLVR_MAJOR_GSS &&
1102 (SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_AUTH ||
1103 SEC_FLAVOR_SVC(req->rq_req_secflvr) == PTLRPCS_SVC_PRIV))
1108 LASSERT(!(req->rq_remote_realm && !strong_sec));
1110 if (strong_sec && req->rq_auth_uid == -1) {
1111 CWARN("user not authenticated, deny access\n");
1115 /* sanity check: if we use strong authentication, we expect the
1116 * uid which client claimed is true.
1117 * not apply to special mds user .
1119 if (!req->rq_auth_usr_mds && strong_sec) {
1120 if (!med->med_remote) {
1121 if (req->rq_auth_uid != rsd->rsd_uid) {
1122 CERROR("local client "LPU64": auth uid %u "
1123 "while client claim %u:%u/%u:%u\n",
1124 peernid, req->rq_auth_uid,
1125 rsd->rsd_uid, rsd->rsd_gid,
1126 rsd->rsd_fsuid, rsd->rsd_fsgid);
1130 if (req->rq_mapped_uid == MDS_IDMAP_NOTFOUND) {
1131 CWARN("no mapping found, deny\n");
1135 if (mds_req_secdesc_do_map(med, rsd))
1138 if (req->rq_mapped_uid != rsd->rsd_uid) {
1139 CERROR("remote client "LPU64": auth uid %u "
1140 "while client claim %u:%u/%u:%u\n",
1141 peernid, req->rq_auth_uid,
1142 rsd->rsd_uid, rsd->rsd_gid,
1143 rsd->rsd_fsuid, rsd->rsd_fsgid);
1149 /* now LSD come into play */
1150 ucred->luc_ginfo = NULL;
1151 ucred->luc_lsd = lsd = mds_get_lsd(rsd->rsd_uid);
1154 CERROR("Deny access without LSD: uid %d\n", rsd->rsd_uid);
1158 lsd_perms = mds_lsd_get_perms(lsd, med->med_remote, 0, peernid);
1160 /* check setuid/setgid permissions.
1161 * again not apply to special mds user.
1163 if (!req->rq_auth_usr_mds) {
1164 /* find out the setuid/setgid attempt */
1165 setuid = (rsd->rsd_uid != rsd->rsd_fsuid);
1166 setgid = (rsd->rsd_gid != rsd->rsd_fsgid ||
1167 rsd->rsd_gid != lsd->lsd_gid);
1169 /* check permission of setuid */
1170 if (setuid && !(lsd_perms & LSD_PERM_SETUID)) {
1171 CWARN("mds blocked setuid attempt (%u -> %u) "
1172 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_fsuid,
1177 /* check permission of setgid */
1178 if (setgid && !(lsd_perms & LSD_PERM_SETGID)) {
1179 CWARN("mds blocked setgid attempt (%u:%u/%u:%u -> %u) "
1180 "from "LPU64"\n", rsd->rsd_uid, rsd->rsd_gid,
1181 rsd->rsd_fsuid, rsd->rsd_fsgid, lsd->lsd_gid,
1187 root_squashed = mds_squash_root(mds, rsd, &peernid);
1189 /* remove privilege for non-root user */
1191 rsd->rsd_cap &= ~CAP_FS_MASK;
1193 /* by now every fields other than groups in rsd have been granted */
1194 ucred->luc_nid = peernid;
1195 ucred->luc_uid = rsd->rsd_uid;
1196 ucred->luc_gid = rsd->rsd_gid;
1197 ucred->luc_fsuid = rsd->rsd_fsuid;
1198 ucred->luc_fsgid = rsd->rsd_fsgid;
1199 ucred->luc_cap = rsd->rsd_cap;
1201 /* don't use any supplementary group if we squashed root.
1202 * XXX The exact behavior of root_squash is not defined, we just
1203 * keep the reminder here */
1207 /* install groups from LSD */
1208 if (lsd->lsd_ginfo) {
1209 ucred->luc_ginfo = lsd->lsd_ginfo;
1210 get_group_info(ucred->luc_ginfo);
1213 /* everything is done if we don't allow setgroups, or it is
1214 * from remote client (which implies forced to be no-setgroups).
1216 * Note: remote user's supplementary groups sent along the request
1217 * (if any) are all ignored, but we make the mapped local user's
1218 * supplementary groups take effect.
1220 if (med->med_remote || !(lsd_perms & LSD_PERM_SETGRP))
1223 /* root could set any groups as he want (if allowed), normal
1224 * users only could reduce his group array.
1226 if (ucred->luc_uid == 0) {
1227 drop_ucred_ginfo(ucred);
1229 if (rsd->rsd_ngroups == 0)
1232 gnew = groups_alloc(rsd->rsd_ngroups);
1234 CERROR("out of memory\n");
1235 drop_ucred_lsd(ucred);
1238 groups_from_buffer(gnew, rsd->rsd_groups);
1239 groups_sort(gnew); /* don't rely on client doing this */
1241 ucred->luc_ginfo = gnew;
1243 __u32 set = 0, cur = 0;
1244 struct group_info *ginfo = ucred->luc_ginfo;
1249 /* Note: freeing a group_info count on 'nblocks' instead of
1250 * 'ngroups', thus we can safely alloc enough buffer and reduce
1251 * and ngroups number later.
1253 gnew = groups_alloc(rsd->rsd_ngroups);
1255 CERROR("out of memory\n");
1256 drop_ucred_ginfo(ucred);
1257 drop_ucred_lsd(ucred);
1261 while (cur < rsd->rsd_ngroups) {
1262 if (groups_search(ginfo, rsd->rsd_groups[cur])) {
1263 GROUP_AT(gnew, set) = rsd->rsd_groups[cur];
1268 gnew->ngroups = set;
1270 put_group_info(ucred->luc_ginfo);
1271 ucred->luc_ginfo = gnew;
1276 void mds_exit_ucred(struct lvfs_ucred *ucred)
1279 drop_ucred_ginfo(ucred);
1280 drop_ucred_lsd(ucred);