1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/idmap.c
38 * Lustre user identity mapping.
40 * Author: Fan Yong <fanyong@clusterfs.com>
44 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_SEC
49 #include <lustre_idmap.h>
50 #include <obd_support.h>
52 #define lustre_get_group_info(group_info) do { \
53 atomic_inc(&(group_info)->usage); \
56 #define lustre_put_group_info(group_info) do { \
57 if (atomic_dec_and_test(&(group_info)->usage)) \
58 groups_free(group_info); \
62 * groups_search() is copied from linux kernel!
65 static int lustre_groups_search(struct group_info *group_info, gid_t grp)
73 right = group_info->ngroups;
74 while (left < right) {
75 int mid = (left + right) / 2;
76 int cmp = grp - CFS_GROUP_AT(group_info, mid);
88 void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
91 int count = ginfo->ngroups;
93 /* fill group_info from gid array */
94 for (i = 0; i < ginfo->nblocks && count > 0; i++) {
95 int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
96 int off = i * CFS_NGROUPS_PER_BLOCK;
97 int len = cp_count * sizeof(*glist);
99 memcpy(ginfo->blocks[i], glist + off, len);
103 EXPORT_SYMBOL(lustre_groups_from_list);
105 /* groups_sort() is copied from linux kernel! */
106 /* a simple shell-metzner sort */
107 void lustre_groups_sort(struct group_info *group_info)
109 int base, max, stride;
110 int gidsetsize = group_info->ngroups;
112 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
117 max = gidsetsize - stride;
118 for (base = 0; base < max; base++) {
120 int right = left + stride;
121 gid_t tmp = CFS_GROUP_AT(group_info, right);
124 CFS_GROUP_AT(group_info, left) > tmp) {
125 CFS_GROUP_AT(group_info, right) =
126 CFS_GROUP_AT(group_info, left);
130 CFS_GROUP_AT(group_info, right) = tmp;
135 EXPORT_SYMBOL(lustre_groups_sort);
137 int lustre_in_group_p(struct md_ucred *mu, gid_t grp)
141 if (grp != mu->mu_fsgid) {
142 struct group_info *group_info = NULL;
144 if (mu->mu_ginfo || !mu->mu_identity ||
145 mu->mu_valid == UCRED_OLD)
146 if (grp == mu->mu_suppgids[0] ||
147 grp == mu->mu_suppgids[1])
151 group_info = mu->mu_ginfo;
152 else if (mu->mu_identity)
153 group_info = mu->mu_identity->mi_ginfo;
158 lustre_get_group_info(group_info);
159 rc = lustre_groups_search(group_info, grp);
160 lustre_put_group_info(group_info);
164 EXPORT_SYMBOL(lustre_in_group_p);
166 struct lustre_idmap_entry {
167 struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
168 struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
169 struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
170 struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
171 uid_t lie_rmt_uid; /* remote uid */
172 uid_t lie_lcl_uid; /* local uid */
173 gid_t lie_rmt_gid; /* remote gid */
174 gid_t lie_lcl_gid; /* local gid */
177 static inline __u32 lustre_idmap_hashfunc(__u32 id)
179 return id & (CFS_IDMAP_HASHSIZE - 1);
183 struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid,
184 gid_t rmt_gid, gid_t lcl_gid)
186 struct lustre_idmap_entry *e;
192 CFS_INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
193 CFS_INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
194 CFS_INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
195 CFS_INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
196 e->lie_rmt_uid = rmt_uid;
197 e->lie_lcl_uid = lcl_uid;
198 e->lie_rmt_gid = rmt_gid;
199 e->lie_lcl_gid = lcl_gid;
204 static void idmap_entry_free(struct lustre_idmap_entry *e)
206 if (!list_empty(&e->lie_rmt_uid_hash))
207 list_del(&e->lie_rmt_uid_hash);
208 if (!list_empty(&e->lie_lcl_uid_hash))
209 list_del(&e->lie_lcl_uid_hash);
210 if (!list_empty(&e->lie_rmt_gid_hash))
211 list_del(&e->lie_rmt_gid_hash);
212 if (!list_empty(&e->lie_lcl_gid_hash))
213 list_del(&e->lie_lcl_gid_hash);
219 * NULL: not found entry
220 * ERR_PTR(-EACCES): found 1(remote):N(local) mapped entry
221 * others: found normal entry
224 struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
225 uid_t rmt_uid, uid_t lcl_uid,
226 gid_t rmt_gid, gid_t lcl_gid)
228 struct list_head *head;
229 struct lustre_idmap_entry *e;
231 head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
232 list_for_each_entry(e, head, lie_rmt_uid_hash)
233 if (e->lie_rmt_uid == rmt_uid) {
234 if (e->lie_lcl_uid == lcl_uid) {
235 if (e->lie_rmt_gid == rmt_gid &&
236 e->lie_lcl_gid == lcl_gid)
237 /* must be quaternion match */
240 /* 1:N uid mapping */
241 CERROR("rmt uid %u already be mapped to %u"
242 " (new %u)\n", e->lie_rmt_uid,
243 e->lie_lcl_uid, lcl_uid);
244 return ERR_PTR(-EACCES);
248 head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
249 list_for_each_entry(e, head, lie_rmt_gid_hash)
250 if (e->lie_rmt_gid == rmt_gid) {
251 if (e->lie_lcl_gid == lcl_gid) {
252 if (unlikely(e->lie_rmt_uid == rmt_uid &&
253 e->lie_lcl_uid == lcl_uid))
254 /* after uid mapping search above,
255 * we should never come here */
258 /* 1:N gid mapping */
259 CERROR("rmt gid %u already be mapped to %u"
260 " (new %u)\n", e->lie_rmt_gid,
261 e->lie_lcl_gid, lcl_gid);
262 return ERR_PTR(-EACCES);
269 static __u32 idmap_lookup_uid(struct list_head *hash, int reverse, __u32 uid)
271 struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
272 struct lustre_idmap_entry *e;
275 list_for_each_entry(e, head, lie_rmt_uid_hash)
276 if (e->lie_rmt_uid == uid)
277 return e->lie_lcl_uid;
279 list_for_each_entry(e, head, lie_lcl_uid_hash)
280 if (e->lie_lcl_uid == uid)
281 return e->lie_rmt_uid;
284 return CFS_IDMAP_NOTFOUND;
287 static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
289 struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
290 struct lustre_idmap_entry *e;
293 list_for_each_entry(e, head, lie_rmt_gid_hash)
294 if (e->lie_rmt_gid == gid)
295 return e->lie_lcl_gid;
297 list_for_each_entry(e, head, lie_lcl_gid_hash)
298 if (e->lie_lcl_gid == gid)
299 return e->lie_rmt_gid;
302 return CFS_IDMAP_NOTFOUND;
305 int lustre_idmap_add(struct lustre_idmap_table *t,
306 uid_t ruid, uid_t luid,
307 gid_t rgid, gid_t lgid)
309 struct lustre_idmap_entry *e0, *e1;
313 spin_lock(&t->lit_lock);
314 e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
315 spin_unlock(&t->lit_lock);
317 e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
321 spin_lock(&t->lit_lock);
322 e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
324 list_add_tail(&e0->lie_rmt_uid_hash,
325 &t->lit_idmaps[RMT_UIDMAP_IDX]
326 [lustre_idmap_hashfunc(ruid)]);
327 list_add_tail(&e0->lie_lcl_uid_hash,
328 &t->lit_idmaps[LCL_UIDMAP_IDX]
329 [lustre_idmap_hashfunc(luid)]);
330 list_add_tail(&e0->lie_rmt_gid_hash,
331 &t->lit_idmaps[RMT_GIDMAP_IDX]
332 [lustre_idmap_hashfunc(rgid)]);
333 list_add_tail(&e0->lie_lcl_gid_hash,
334 &t->lit_idmaps[LCL_GIDMAP_IDX]
335 [lustre_idmap_hashfunc(lgid)]);
337 spin_unlock(&t->lit_lock);
339 idmap_entry_free(e0);
343 } else if (IS_ERR(e0)) {
349 EXPORT_SYMBOL(lustre_idmap_add);
351 int lustre_idmap_del(struct lustre_idmap_table *t,
352 uid_t ruid, uid_t luid,
353 gid_t rgid, gid_t lgid)
355 struct lustre_idmap_entry *e;
360 spin_lock(&t->lit_lock);
361 e = idmap_search_entry(t, ruid, luid, rgid, lgid);
366 spin_unlock(&t->lit_lock);
370 EXPORT_SYMBOL(lustre_idmap_del);
372 int lustre_idmap_lookup_uid(struct md_ucred *mu,
373 struct lustre_idmap_table *t,
374 int reverse, uid_t uid)
376 struct list_head *hash;
378 if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
380 if (uid == mu->mu_o_uid)
382 else if (uid == mu->mu_o_fsuid)
385 if (uid == mu->mu_uid)
387 else if (uid == mu->mu_fsuid)
388 return mu->mu_o_fsuid;
393 return CFS_IDMAP_NOTFOUND;
395 hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
397 spin_lock(&t->lit_lock);
398 uid = idmap_lookup_uid(hash, reverse, uid);
399 spin_unlock(&t->lit_lock);
403 EXPORT_SYMBOL(lustre_idmap_lookup_uid);
405 int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
406 int reverse, gid_t gid)
408 struct list_head *hash;
410 if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
412 if (gid == mu->mu_o_gid)
414 else if (gid == mu->mu_o_fsgid)
417 if (gid == mu->mu_gid)
419 else if (gid == mu->mu_fsgid)
420 return mu->mu_o_fsgid;
425 return CFS_IDMAP_NOTFOUND;
427 hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
429 spin_lock(&t->lit_lock);
430 gid = idmap_lookup_gid(hash, reverse, gid);
431 spin_unlock(&t->lit_lock);
435 EXPORT_SYMBOL(lustre_idmap_lookup_gid);
437 struct lustre_idmap_table *lustre_idmap_init(void)
439 struct lustre_idmap_table *t;
443 if(unlikely(t == NULL))
444 return (ERR_PTR(-ENOMEM));
446 spin_lock_init(&t->lit_lock);
447 for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
448 for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
449 INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
453 EXPORT_SYMBOL(lustre_idmap_init);
455 void lustre_idmap_fini(struct lustre_idmap_table *t)
457 struct list_head *list;
458 struct lustre_idmap_entry *e;
462 list = t->lit_idmaps[RMT_UIDMAP_IDX];
463 spin_lock(&t->lit_lock);
464 for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
465 while (!list_empty(&list[i])) {
466 e = list_entry(list[i].next, struct lustre_idmap_entry,
470 spin_unlock(&t->lit_lock);
474 EXPORT_SYMBOL(lustre_idmap_fini);