1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/obdclass/idmap.c
5 * Lustre user identity mapping.
6 * Author: Fan Yong <fanyong@clusterfs.com>
8 * Copyright (C) 2004-2007 Cluster File Systems, Inc.
11 * This file is part of Lustre, http://www.lustre.org.
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
28 # define EXPORT_SYMTAB
31 #define DEBUG_SUBSYSTEM S_SEC
33 #include <lustre_idmap.h>
34 #include <obd_support.h>
36 #define lustre_get_group_info(group_info) do { \
37 atomic_inc(&(group_info)->usage); \
40 #define lustre_put_group_info(group_info) do { \
41 if (atomic_dec_and_test(&(group_info)->usage)) \
42 groups_free(group_info); \
46 * groups_search() is copied from linux kernel!
49 static int lustre_groups_search(struct group_info *group_info, gid_t grp)
57 right = group_info->ngroups;
58 while (left < right) {
59 int mid = (left + right) / 2;
60 int cmp = grp - CFS_GROUP_AT(group_info, mid);
72 void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
75 int count = ginfo->ngroups;
77 /* fill group_info from gid array */
78 for (i = 0; i < ginfo->nblocks && count > 0; i++) {
79 int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
80 int off = i * CFS_NGROUPS_PER_BLOCK;
81 int len = cp_count * sizeof(*glist);
83 memcpy(ginfo->blocks[i], glist + off, len);
87 EXPORT_SYMBOL(lustre_groups_from_list);
89 /* groups_sort() is copied from linux kernel! */
90 /* a simple shell-metzner sort */
91 void lustre_groups_sort(struct group_info *group_info)
93 int base, max, stride;
94 int gidsetsize = group_info->ngroups;
96 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
101 max = gidsetsize - stride;
102 for (base = 0; base < max; base++) {
104 int right = left + stride;
105 gid_t tmp = CFS_GROUP_AT(group_info, right);
108 CFS_GROUP_AT(group_info, left) > tmp) {
109 CFS_GROUP_AT(group_info, right) =
110 CFS_GROUP_AT(group_info, left);
114 CFS_GROUP_AT(group_info, right) = tmp;
119 EXPORT_SYMBOL(lustre_groups_sort);
121 int lustre_in_group_p(struct md_ucred *mu, gid_t grp)
125 if (grp != mu->mu_fsgid) {
126 struct group_info *group_info = NULL;
128 if (mu->mu_ginfo || !mu->mu_identity ||
129 mu->mu_valid == UCRED_OLD)
130 if (grp == mu->mu_suppgids[0] ||
131 grp == mu->mu_suppgids[1])
135 group_info = mu->mu_ginfo;
136 else if (mu->mu_identity)
137 group_info = mu->mu_identity->mi_ginfo;
142 lustre_get_group_info(group_info);
143 rc = lustre_groups_search(group_info, grp);
144 lustre_put_group_info(group_info);
148 EXPORT_SYMBOL(lustre_in_group_p);
150 struct lustre_idmap_entry {
151 struct list_head lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
152 struct list_head lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
153 struct list_head lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
154 struct list_head lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
155 uid_t lie_rmt_uid; /* remote uid */
156 uid_t lie_lcl_uid; /* local uid */
157 gid_t lie_rmt_gid; /* remote gid */
158 gid_t lie_lcl_gid; /* local gid */
161 static inline __u32 lustre_idmap_hashfunc(__u32 id)
163 return id & (CFS_IDMAP_HASHSIZE - 1);
167 struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid,
168 gid_t rmt_gid, gid_t lcl_gid)
170 struct lustre_idmap_entry *e;
176 INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
177 INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
178 INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
179 INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
180 e->lie_rmt_uid = rmt_uid;
181 e->lie_lcl_uid = lcl_uid;
182 e->lie_rmt_gid = rmt_gid;
183 e->lie_lcl_gid = lcl_gid;
188 static void idmap_entry_free(struct lustre_idmap_entry *e)
190 if (!list_empty(&e->lie_rmt_uid_hash))
191 list_del(&e->lie_rmt_uid_hash);
192 if (!list_empty(&e->lie_lcl_uid_hash))
193 list_del(&e->lie_lcl_uid_hash);
194 if (!list_empty(&e->lie_rmt_gid_hash))
195 list_del(&e->lie_rmt_gid_hash);
196 if (!list_empty(&e->lie_lcl_gid_hash))
197 list_del(&e->lie_lcl_gid_hash);
203 * NULL: not found entry
204 * ERR_PTR(-EACCES): found 1(remote):N(local) mapped entry
205 * others: found normal entry
208 struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
209 uid_t rmt_uid, uid_t lcl_uid,
210 gid_t rmt_gid, gid_t lcl_gid)
212 struct list_head *head;
213 struct lustre_idmap_entry *e;
215 head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
216 list_for_each_entry(e, head, lie_rmt_uid_hash)
217 if (e->lie_rmt_uid == rmt_uid) {
218 if (e->lie_lcl_uid == lcl_uid) {
219 if (e->lie_rmt_gid == rmt_gid &&
220 e->lie_lcl_gid == lcl_gid)
221 /* must be quaternion match */
224 /* 1:N uid mapping */
225 CERROR("rmt uid %u already be mapped to %u"
226 " (new %u)\n", e->lie_rmt_uid,
227 e->lie_lcl_uid, lcl_uid);
228 return ERR_PTR(-EACCES);
232 head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
233 list_for_each_entry(e, head, lie_rmt_gid_hash)
234 if (e->lie_rmt_gid == rmt_gid) {
235 if (e->lie_lcl_gid == lcl_gid) {
236 if (unlikely(e->lie_rmt_uid == rmt_uid &&
237 e->lie_lcl_uid == lcl_uid))
238 /* after uid mapping search above,
239 * we should never come here */
242 /* 1:N gid mapping */
243 CERROR("rmt gid %u already be mapped to %u"
244 " (new %u)\n", e->lie_rmt_gid,
245 e->lie_lcl_gid, lcl_gid);
246 return ERR_PTR(-EACCES);
253 static __u32 idmap_lookup_uid(struct list_head *hash, int reverse, __u32 uid)
255 struct list_head *head = &hash[lustre_idmap_hashfunc(uid)];
256 struct lustre_idmap_entry *e;
259 list_for_each_entry(e, head, lie_rmt_uid_hash)
260 if (e->lie_rmt_uid == uid)
261 return e->lie_lcl_uid;
263 list_for_each_entry(e, head, lie_lcl_uid_hash)
264 if (e->lie_lcl_uid == uid)
265 return e->lie_rmt_uid;
268 return CFS_IDMAP_NOTFOUND;
271 static __u32 idmap_lookup_gid(struct list_head *hash, int reverse, __u32 gid)
273 struct list_head *head = &hash[lustre_idmap_hashfunc(gid)];
274 struct lustre_idmap_entry *e;
277 list_for_each_entry(e, head, lie_rmt_gid_hash)
278 if (e->lie_rmt_gid == gid)
279 return e->lie_lcl_gid;
281 list_for_each_entry(e, head, lie_lcl_gid_hash)
282 if (e->lie_lcl_gid == gid)
283 return e->lie_rmt_gid;
286 return CFS_IDMAP_NOTFOUND;
289 int lustre_idmap_add(struct lustre_idmap_table *t,
290 uid_t ruid, uid_t luid,
291 gid_t rgid, gid_t lgid)
293 struct lustre_idmap_entry *e0, *e1;
297 spin_lock(&t->lit_lock);
298 e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
299 spin_unlock(&t->lit_lock);
301 e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
305 spin_lock(&t->lit_lock);
306 e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
308 list_add_tail(&e0->lie_rmt_uid_hash,
309 &t->lit_idmaps[RMT_UIDMAP_IDX]
310 [lustre_idmap_hashfunc(ruid)]);
311 list_add_tail(&e0->lie_lcl_uid_hash,
312 &t->lit_idmaps[LCL_UIDMAP_IDX]
313 [lustre_idmap_hashfunc(luid)]);
314 list_add_tail(&e0->lie_rmt_gid_hash,
315 &t->lit_idmaps[RMT_GIDMAP_IDX]
316 [lustre_idmap_hashfunc(rgid)]);
317 list_add_tail(&e0->lie_lcl_gid_hash,
318 &t->lit_idmaps[LCL_GIDMAP_IDX]
319 [lustre_idmap_hashfunc(lgid)]);
321 spin_unlock(&t->lit_lock);
323 idmap_entry_free(e0);
327 } else if (IS_ERR(e0)) {
333 EXPORT_SYMBOL(lustre_idmap_add);
335 int lustre_idmap_del(struct lustre_idmap_table *t,
336 uid_t ruid, uid_t luid,
337 gid_t rgid, gid_t lgid)
339 struct lustre_idmap_entry *e;
344 spin_lock(&t->lit_lock);
345 e = idmap_search_entry(t, ruid, luid, rgid, lgid);
350 spin_unlock(&t->lit_lock);
354 EXPORT_SYMBOL(lustre_idmap_del);
356 int lustre_idmap_lookup_uid(struct md_ucred *mu,
357 struct lustre_idmap_table *t,
358 int reverse, uid_t uid)
360 struct list_head *hash;
362 if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
364 if (uid == mu->mu_o_uid)
366 else if (uid == mu->mu_o_fsuid)
369 if (uid == mu->mu_uid)
371 else if (uid == mu->mu_fsuid)
372 return mu->mu_o_fsuid;
377 return CFS_IDMAP_NOTFOUND;
379 hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
381 spin_lock(&t->lit_lock);
382 uid = idmap_lookup_uid(hash, reverse, uid);
383 spin_unlock(&t->lit_lock);
387 EXPORT_SYMBOL(lustre_idmap_lookup_uid);
389 int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
390 int reverse, gid_t gid)
392 struct list_head *hash;
394 if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
396 if (gid == mu->mu_o_gid)
398 else if (gid == mu->mu_o_fsgid)
401 if (gid == mu->mu_gid)
403 else if (gid == mu->mu_fsgid)
404 return mu->mu_o_fsgid;
409 return CFS_IDMAP_NOTFOUND;
411 hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
413 spin_lock(&t->lit_lock);
414 gid = idmap_lookup_gid(hash, reverse, gid);
415 spin_unlock(&t->lit_lock);
419 EXPORT_SYMBOL(lustre_idmap_lookup_gid);
421 struct lustre_idmap_table *lustre_idmap_init(void)
423 struct lustre_idmap_table *t;
427 if(unlikely(t == NULL))
428 return (ERR_PTR(-ENOMEM));
430 spin_lock_init(&t->lit_lock);
431 for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
432 for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
433 INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
437 EXPORT_SYMBOL(lustre_idmap_init);
439 void lustre_idmap_fini(struct lustre_idmap_table *t)
441 struct list_head *list;
442 struct lustre_idmap_entry *e;
446 list = t->lit_idmaps[RMT_UIDMAP_IDX];
447 spin_lock(&t->lit_lock);
448 for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
449 while (!list_empty(&list[i])) {
450 e = list_entry(list[i].next, struct lustre_idmap_entry,
454 spin_unlock(&t->lit_lock);
458 EXPORT_SYMBOL(lustre_idmap_fini);