1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/obdclass/capa.c
5 * Lustre Capability Cache Management
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Lai Siyao<lsy@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 # define EXPORT_SYMTAB
29 #define DEBUG_SUBSYSTEM S_SEC
32 #include <linux/version.h>
34 #include <asm/unistd.h>
35 #include <linux/slab.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
39 #include <linux/obd_class.h>
40 #include <linux/lustre_debug.h>
41 #include <linux/lustre_idl.h>
42 #include <linux/lustre_sec.h>
44 #include <liblustre.h>
47 #include <libcfs/list.h>
48 #include <linux/lustre_sec.h>
50 kmem_cache_t *capa_cachep = NULL;
52 /* capa_lock protect capa hash, list and content. */
53 spinlock_t capa_lock = SPIN_LOCK_UNLOCKED;
54 struct hlist_head *capa_hash;
55 struct list_head capa_list[3];
56 static int capa_count[3] = { 0 };
58 static char *capa_type_name[] = { "client", "mds", "filter" };
60 /* TODO: mdc and llite all need this, so define it here.
61 * in the future it will be moved to ll_sb_info to support multi-
63 struct timer_list ll_capa_timer;
65 EXPORT_SYMBOL(capa_lock);
66 EXPORT_SYMBOL(capa_hash);
67 EXPORT_SYMBOL(capa_list);
68 EXPORT_SYMBOL(ll_capa_timer);
70 static inline int const
71 capa_hashfn(unsigned int uid, __u64 mdsid, unsigned long ino)
73 return (ino ^ uid) * (unsigned long)(mdsid + 1) % NR_CAPAHASH;
76 int capa_op(int flags)
78 if (flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
80 else if (flags & FMODE_READ)
83 LBUG(); /* should be either MAY_READ or MAY_WRITE */
87 static struct obd_capa *
88 find_capa(struct hlist_head *head, uid_t uid, int capa_op, __u64 mdsid,
89 unsigned long ino, __u32 igen, int type)
91 struct hlist_node *pos;
92 struct obd_capa *ocapa;
95 CDEBUG(D_INODE, "find capa for (uid %u, op %d, mdsid "LPU64", ino %lu"
96 " igen %u, type %d\n", (unsigned) uid, capa_op, mdsid, ino, igen, type);
97 hlist_for_each_entry(ocapa, pos, head, c_hash) {
98 if (ocapa->c_capa.lc_ino != ino)
100 if (ocapa->c_capa.lc_igen != igen)
102 if (ocapa->c_capa.lc_mdsid != mdsid)
104 if ((ocapa->c_capa.lc_op & capa_op) != ocapa->c_capa.lc_op)
106 if (ocapa->c_type != type)
109 if (ocapa->c_type == CLIENT_CAPA)
110 ouid = ocapa->c_capa.lc_ruid;
112 ouid = ocapa->c_capa.lc_uid;
117 DEBUG_CAPA(D_INODE, &ocapa->c_capa, "found %s",
118 capa_type_name[ocapa->c_type]);
126 static struct obd_capa *
127 filter_find_capa(struct hlist_head *head, struct lustre_capa *capa)
129 struct hlist_node *pos;
130 struct obd_capa *ocapa;
132 hlist_for_each_entry(ocapa, pos, head, c_hash) {
133 if (ocapa->c_type != FILTER_CAPA)
135 if (!memcmp(&ocapa->c_capa, capa,
136 sizeof(struct lustre_capa_data))) {
138 DEBUG_CAPA(D_INODE, &ocapa->c_capa, "found %s",
139 capa_type_name[ocapa->c_type]);
147 inline void __capa_get(struct obd_capa *ocapa)
149 if (ocapa->c_type != CLIENT_CAPA)
150 atomic_inc(&ocapa->c_refc);
153 static struct obd_capa *
154 find_capa_locked(struct hlist_head *head, uid_t uid, int capa_op, __u64 mdsid,
155 unsigned long ino, __u32 igen, int type)
157 struct obd_capa *ocapa;
159 spin_lock(&capa_lock);
160 ocapa = find_capa(head, uid, capa_op, mdsid, ino, igen, type);
163 spin_unlock(&capa_lock);
168 static struct obd_capa *alloc_capa(void)
170 struct obd_capa *ocapa;
172 OBD_SLAB_ALLOC(ocapa, capa_cachep, SLAB_NOFS, sizeof(*ocapa));
174 INIT_HLIST_NODE(&ocapa->c_hash);
175 INIT_LIST_HEAD(&ocapa->c_list);
181 static void __capa_put(struct obd_capa *ocapa)
183 hlist_del_init(&ocapa->c_hash);
184 list_del_init(&ocapa->c_list);
185 capa_count[ocapa->c_type]--;
188 static void destroy_capa(struct obd_capa *ocapa)
190 OBD_SLAB_FREE(ocapa, capa_cachep, sizeof(*ocapa));
193 int capa_cache_init(void)
197 OBD_ALLOC(capa_hash, PAGE_SIZE);
201 nr_hash = PAGE_SIZE / sizeof(struct hlist_head);
202 LASSERT(nr_hash > NR_CAPAHASH);
204 for (i = 0; i < NR_CAPAHASH; i++)
205 INIT_HLIST_HEAD(capa_hash + i);
207 for (i = 0; i < 3; i++)
208 INIT_LIST_HEAD(&capa_list[i]);
213 void capa_cache_cleanup(void)
215 struct obd_capa *ocapa, *tmp;
218 for (i = MDS_CAPA; i <= FILTER_CAPA; i++) {
219 list_for_each_entry_safe(ocapa, tmp, &capa_list[i], c_list) {
225 OBD_FREE(capa_hash, PAGE_SIZE);
229 static inline void list_add_capa(struct obd_capa *ocapa, struct list_head *head)
231 struct obd_capa *tmp;
233 /* XXX: capa is sorted in client, this could be optimized */
234 if (ocapa->c_type == CLIENT_CAPA) {
235 list_for_each_entry_reverse(tmp, head, c_list) {
236 if (ocapa->c_capa.lc_expiry > tmp->c_capa.lc_expiry) {
237 list_add(&ocapa->c_list, &tmp->c_list);
241 list_add(&ocapa->c_list, head);
245 list_add_tail(&ocapa->c_list, head);
248 static inline void do_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
250 memcpy(&ocapa->c_capa, capa, sizeof(*capa));
253 static struct obd_capa *
254 get_new_capa_locked(struct hlist_head *head, int type, struct lustre_capa *capa)
256 uid_t uid = capa->lc_uid;
257 int capa_op = capa->lc_op;
258 __u64 mdsid = capa->lc_mdsid;
259 unsigned long ino = capa->lc_ino;
260 struct obd_capa *ocapa, *old;
262 ocapa = alloc_capa();
266 spin_lock(&capa_lock);
268 if (type == FILTER_CAPA)
269 old = filter_find_capa(head, capa);
271 old = find_capa(head, uid, capa_op, mdsid, ino,
272 capa->lc_igen, type);
275 do_update_capa(ocapa, capa);
276 ocapa->c_type = type;
277 list_add_capa(ocapa, &capa_list[type]);
278 hlist_add_head(&ocapa->c_hash, head);
279 if (type == CLIENT_CAPA)
280 INIT_LIST_HEAD(&ocapa->c_lli_list);
285 DEBUG_CAPA(D_INODE, &ocapa->c_capa, "new %s",
286 capa_type_name[type]);
288 if (type != CLIENT_CAPA && capa_count[type] > CAPA_CACHE_SIZE) {
289 struct list_head *node = capa_list[type].next;
290 struct obd_capa *tcapa;
293 /* free 12 unused capa from head */
294 while (node->next != &capa_list[type] && count < 12) {
295 tcapa = list_entry(node, struct obd_capa,
298 if (atomic_read(&tcapa->c_refc) > 0)
300 DEBUG_CAPA(D_INODE, &tcapa->c_capa,
302 capa_type_name[type]);
309 spin_unlock(&capa_lock);
312 spin_unlock(&capa_lock);
319 capa_get(uid_t uid, int capa_op,__u64 mdsid, unsigned long ino,
320 __u32 igen, int type)
322 struct hlist_head *head = capa_hash + capa_hashfn(uid, mdsid, ino);
323 struct obd_capa *ocapa;
325 ocapa = find_capa_locked(head, uid, capa_op, mdsid, ino, igen, type);
330 struct obd_capa * filter_capa_get(struct lustre_capa *capa)
332 struct hlist_head *head = capa_hash +
333 capa_hashfn(capa->lc_uid, capa->lc_mdsid, capa->lc_ino);
334 struct obd_capa *ocapa;
336 spin_lock(&capa_lock);
337 ocapa = filter_find_capa(head, capa);
340 spin_unlock(&capa_lock);
344 void capa_put(struct obd_capa *ocapa)
349 DEBUG_CAPA(D_INODE, &ocapa->c_capa, "put %s",
350 capa_type_name[ocapa->c_type]);
351 spin_lock(&capa_lock);
352 if (ocapa->c_type == CLIENT_CAPA) {
353 list_del_init(&ocapa->c_lli_list);
357 atomic_dec(&ocapa->c_refc);
359 spin_unlock(&capa_lock);
362 struct obd_capa *capa_renew(struct lustre_capa *capa, int type)
364 uid_t uid = capa->lc_uid;
365 int capa_op = capa->lc_op;
366 __u64 mdsid = capa->lc_mdsid;
367 unsigned long ino = capa->lc_ino;
368 struct hlist_head *head = capa_hash +
369 capa_hashfn(uid, mdsid, ino);
370 struct obd_capa *ocapa;
372 spin_lock(&capa_lock);
374 if (type == FILTER_CAPA)
375 ocapa = filter_find_capa(head, capa);
377 ocapa = find_capa(head, uid, capa_op, mdsid, ino,
378 capa->lc_igen, type);
380 DEBUG_CAPA(D_INFO, capa, "renew %s", capa_type_name[type]);
381 do_update_capa(ocapa, capa);
384 spin_unlock(&capa_lock);
387 ocapa = get_new_capa_locked(head, type, capa);
392 void capa_hmac(__u8 *key, struct lustre_capa *capa)
394 struct crypto_tfm *tfm;
395 int keylen = CAPA_KEY_LEN;
396 struct scatterlist sl = {
397 .page = virt_to_page(capa),
398 .offset = (unsigned long)(capa) % PAGE_SIZE,
399 .length = sizeof(struct lustre_capa_data),
402 tfm = crypto_alloc_tfm(CAPA_HMAC_ALG, 0);
404 crypto_hmac(tfm, key, &keylen, &sl, 1, capa->lc_hmac);
405 crypto_free_tfm(tfm);
408 void capa_dup(void *dst, struct obd_capa *ocapa)
410 spin_lock(&capa_lock);
411 memcpy(dst, &ocapa->c_capa, sizeof(ocapa->c_capa));
412 spin_unlock(&capa_lock);
415 void capa_dup2(void *dst, struct lustre_capa *capa)
417 spin_lock(&capa_lock);
418 memcpy(dst, capa, sizeof(*capa));
419 spin_unlock(&capa_lock);
422 int capa_expired(struct lustre_capa *capa)
426 do_gettimeofday(&tv);
427 return ((unsigned long )capa->lc_expiry <= tv.tv_sec) ? 1 : 0;
430 int __capa_is_to_expire(struct obd_capa *ocapa, struct timeval *tv)
432 int pre_expiry = capa_pre_expiry(&ocapa->c_capa);
434 /* XXX: in case the clock is inaccurate, minus one more
435 * pre_expiry to make sure the expiry won't miss */
436 return ((unsigned long)ocapa->c_capa.lc_expiry -
437 2 * pre_expiry <= tv->tv_sec)? 1 : 0;
440 int capa_is_to_expire(struct obd_capa *ocapa)
445 do_gettimeofday(&tv);
446 spin_lock(&capa_lock);
447 rc = __capa_is_to_expire(ocapa, &tv);
448 spin_unlock(&capa_lock);
453 EXPORT_SYMBOL(capa_op);
454 EXPORT_SYMBOL(capa_get);
455 EXPORT_SYMBOL(filter_capa_get);
456 EXPORT_SYMBOL(capa_put);
457 EXPORT_SYMBOL(capa_renew);
458 EXPORT_SYMBOL(__capa_get);
459 EXPORT_SYMBOL(capa_hmac);
460 EXPORT_SYMBOL(capa_dup);
461 EXPORT_SYMBOL(capa_dup2);
462 EXPORT_SYMBOL(capa_expired);
463 EXPORT_SYMBOL(__capa_is_to_expire);
464 EXPORT_SYMBOL(capa_is_to_expire);