4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/idmap.c
38 * Lustre user identity mapping.
40 * Author: Fan Yong <fanyong@clusterfs.com>
43 #define DEBUG_SUBSYSTEM S_SEC
45 #include <linux/user_namespace.h>
46 #ifdef HAVE_UIDGID_HEADER
47 # include <linux/uidgid.h>
49 #include <lustre_idmap.h>
50 #include <upcall_cache.h>
51 #include <md_object.h>
52 #include <obd_support.h>
54 #define lustre_get_group_info(group_info) do { \
55 atomic_inc(&(group_info)->usage); \
58 #define lustre_put_group_info(group_info) do { \
59 if (atomic_dec_and_test(&(group_info)->usage)) \
60 groups_free(group_info); \
64 * groups_search() is copied from linux kernel!
67 static int lustre_groups_search(struct group_info *group_info,
76 right = group_info->ngroups;
77 while (left < right) {
78 int mid = (left + right) / 2;
80 from_kgid(&init_user_ns, CFS_GROUP_AT(group_info, mid));
92 void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
95 int count = ginfo->ngroups;
97 /* fill group_info from gid array */
98 for (i = 0; i < ginfo->nblocks && count > 0; i++) {
99 int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
100 int off = i * CFS_NGROUPS_PER_BLOCK;
101 int len = cp_count * sizeof(*glist);
103 memcpy(ginfo->blocks[i], glist + off, len);
107 EXPORT_SYMBOL(lustre_groups_from_list);
109 /* groups_sort() is copied from linux kernel! */
110 /* a simple shell-metzner sort */
111 void lustre_groups_sort(struct group_info *group_info)
113 int base, max, stride;
114 int gidsetsize = group_info->ngroups;
116 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
121 max = gidsetsize - stride;
122 for (base = 0; base < max; base++) {
124 int right = left + stride;
125 gid_t tmp = from_kgid(&init_user_ns,
126 CFS_GROUP_AT(group_info, right));
129 tmp < from_kgid(&init_user_ns,
130 CFS_GROUP_AT(group_info, left))) {
131 CFS_GROUP_AT(group_info, right) =
132 CFS_GROUP_AT(group_info, left);
136 CFS_GROUP_AT(group_info, right) =
137 make_kgid(&init_user_ns, tmp);
142 EXPORT_SYMBOL(lustre_groups_sort);
144 int lustre_in_group_p(struct lu_ucred *mu, gid_t grp)
148 if (grp != mu->uc_fsgid) {
149 struct group_info *group_info = NULL;
151 if (mu->uc_ginfo || !mu->uc_identity ||
152 mu->uc_valid == UCRED_OLD)
153 if (grp == mu->uc_suppgids[0] ||
154 grp == mu->uc_suppgids[1])
158 group_info = mu->uc_ginfo;
159 else if (mu->uc_identity)
160 group_info = mu->uc_identity->mi_ginfo;
165 lustre_get_group_info(group_info);
166 rc = lustre_groups_search(group_info, grp);
167 lustre_put_group_info(group_info);
171 EXPORT_SYMBOL(lustre_in_group_p);
173 struct lustre_idmap_entry {
174 struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
175 struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
176 struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
177 struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
178 uid_t lie_rmt_uid; /* remote uid */
179 uid_t lie_lcl_uid; /* local uid */
180 gid_t lie_rmt_gid; /* remote gid */
181 gid_t lie_lcl_gid; /* local gid */
184 static inline __u32 lustre_idmap_hashfunc(__u32 id)
186 return id & (CFS_IDMAP_HASHSIZE - 1);
190 struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid,
191 gid_t rmt_gid, gid_t lcl_gid)
193 struct lustre_idmap_entry *e;
199 INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
200 INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
201 INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
202 INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
203 e->lie_rmt_uid = rmt_uid;
204 e->lie_lcl_uid = lcl_uid;
205 e->lie_rmt_gid = rmt_gid;
206 e->lie_lcl_gid = lcl_gid;
211 static void idmap_entry_free(struct lustre_idmap_entry *e)
213 list_del(&e->lie_rmt_uid_hash);
214 list_del(&e->lie_lcl_uid_hash);
215 list_del(&e->lie_rmt_gid_hash);
216 list_del(&e->lie_lcl_gid_hash);
222 * NULL: not found entry
223 * ERR_PTR(-EACCES): found 1(remote):N(local) mapped entry
224 * others: found normal entry
227 struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
228 uid_t rmt_uid, uid_t lcl_uid,
229 gid_t rmt_gid, gid_t lcl_gid)
231 struct list_head *head;
232 struct lustre_idmap_entry *e;
234 head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
235 list_for_each_entry(e, head, lie_rmt_uid_hash)
236 if (e->lie_rmt_uid == rmt_uid) {
237 if (e->lie_lcl_uid == lcl_uid) {
238 if (e->lie_rmt_gid == rmt_gid &&
239 e->lie_lcl_gid == lcl_gid)
240 /* must be quaternion match */
243 /* 1:N uid mapping */
244 CERROR("rmt uid %u already be mapped to %u"
245 " (new %u)\n", e->lie_rmt_uid,
246 e->lie_lcl_uid, lcl_uid);
247 return ERR_PTR(-EACCES);
251 head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
252 list_for_each_entry(e, head, lie_rmt_gid_hash)
253 if (e->lie_rmt_gid == rmt_gid) {
254 if (e->lie_lcl_gid == lcl_gid) {
255 if (unlikely(e->lie_rmt_uid == rmt_uid &&
256 e->lie_lcl_uid == lcl_uid))
257 /* after uid mapping search above,
258 * we should never come here */
261 /* 1:N gid mapping */
262 CERROR("rmt gid %u already be mapped to %u"
263 " (new %u)\n", e->lie_rmt_gid,
264 e->lie_lcl_gid, lcl_gid);
265 return ERR_PTR(-EACCES);
272 static __u32 idmap_lookup_uid(struct list_head *hash, int reverse,
275 struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
276 struct lustre_idmap_entry *e;
279 list_for_each_entry(e, head, lie_rmt_uid_hash)
280 if (e->lie_rmt_uid == uid)
281 return e->lie_lcl_uid;
283 list_for_each_entry(e, head, lie_lcl_uid_hash)
284 if (e->lie_lcl_uid == uid)
285 return e->lie_rmt_uid;
288 return CFS_IDMAP_NOTFOUND;
291 static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
293 struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
294 struct lustre_idmap_entry *e;
297 list_for_each_entry(e, head, lie_rmt_gid_hash)
298 if (e->lie_rmt_gid == gid)
299 return e->lie_lcl_gid;
301 list_for_each_entry(e, head, lie_lcl_gid_hash)
302 if (e->lie_lcl_gid == gid)
303 return e->lie_rmt_gid;
306 return CFS_IDMAP_NOTFOUND;
309 int lustre_idmap_add(struct lustre_idmap_table *t,
310 uid_t ruid, uid_t luid,
311 gid_t rgid, gid_t lgid)
313 struct lustre_idmap_entry *e0, *e1;
317 spin_lock(&t->lit_lock);
318 e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
319 spin_unlock(&t->lit_lock);
321 e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
325 spin_lock(&t->lit_lock);
326 e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
328 list_add_tail(&e0->lie_rmt_uid_hash,
329 &t->lit_idmaps[RMT_UIDMAP_IDX]
330 [lustre_idmap_hashfunc(ruid)]);
331 list_add_tail(&e0->lie_lcl_uid_hash,
332 &t->lit_idmaps[LCL_UIDMAP_IDX]
333 [lustre_idmap_hashfunc(luid)]);
334 list_add_tail(&e0->lie_rmt_gid_hash,
335 &t->lit_idmaps[RMT_GIDMAP_IDX]
336 [lustre_idmap_hashfunc(rgid)]);
337 list_add_tail(&e0->lie_lcl_gid_hash,
338 &t->lit_idmaps[LCL_GIDMAP_IDX]
339 [lustre_idmap_hashfunc(lgid)]);
341 spin_unlock(&t->lit_lock);
343 idmap_entry_free(e0);
347 } else if (IS_ERR(e0)) {
353 EXPORT_SYMBOL(lustre_idmap_add);
355 int lustre_idmap_del(struct lustre_idmap_table *t,
356 uid_t ruid, uid_t luid,
357 gid_t rgid, gid_t lgid)
359 struct lustre_idmap_entry *e;
364 spin_lock(&t->lit_lock);
365 e = idmap_search_entry(t, ruid, luid, rgid, lgid);
370 spin_unlock(&t->lit_lock);
374 EXPORT_SYMBOL(lustre_idmap_del);
376 int lustre_idmap_lookup_uid(struct lu_ucred *mu,
377 struct lustre_idmap_table *t,
378 int reverse, uid_t uid)
380 struct list_head *hash;
382 if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) {
384 if (uid == mu->uc_o_uid)
386 else if (uid == mu->uc_o_fsuid)
389 if (uid == mu->uc_uid)
391 else if (uid == mu->uc_fsuid)
392 return mu->uc_o_fsuid;
397 return CFS_IDMAP_NOTFOUND;
399 hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
401 spin_lock(&t->lit_lock);
402 uid = idmap_lookup_uid(hash, reverse, uid);
403 spin_unlock(&t->lit_lock);
407 EXPORT_SYMBOL(lustre_idmap_lookup_uid);
409 int lustre_idmap_lookup_gid(struct lu_ucred *mu, struct lustre_idmap_table *t,
410 int reverse, gid_t gid)
412 struct list_head *hash;
414 if (mu && (mu->uc_valid == UCRED_OLD || mu->uc_valid == UCRED_NEW)) {
416 if (gid == mu->uc_o_gid)
418 else if (gid == mu->uc_o_fsgid)
421 if (gid == mu->uc_gid)
423 else if (gid == mu->uc_fsgid)
424 return mu->uc_o_fsgid;
429 return CFS_IDMAP_NOTFOUND;
431 hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
433 spin_lock(&t->lit_lock);
434 gid = idmap_lookup_gid(hash, reverse, gid);
435 spin_unlock(&t->lit_lock);
439 EXPORT_SYMBOL(lustre_idmap_lookup_gid);
441 struct lustre_idmap_table *lustre_idmap_init(void)
443 struct lustre_idmap_table *t;
447 if(unlikely(t == NULL))
448 return (ERR_PTR(-ENOMEM));
450 spin_lock_init(&t->lit_lock);
451 for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
452 for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
453 INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
457 EXPORT_SYMBOL(lustre_idmap_init);
459 void lustre_idmap_fini(struct lustre_idmap_table *t)
461 struct list_head *list;
462 struct lustre_idmap_entry *e;
466 list = t->lit_idmaps[RMT_UIDMAP_IDX];
467 spin_lock(&t->lit_lock);
468 for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
469 while (!list_empty(&list[i])) {
470 e = list_entry(list[i].next,
471 struct lustre_idmap_entry,
475 spin_unlock(&t->lit_lock);
479 EXPORT_SYMBOL(lustre_idmap_fini);