1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * lustre/obdclass/capa.c
5 * Lustre Capability Cache Management
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Lai Siyao<lsy@clusterfs.com>
10 * This file is part of Lustre, http://www.lustre.org.
12 * Lustre is free software; you can redistribute it and/or
13 * modify it under the terms of version 2 of the GNU General Public
14 * License as published by the Free Software Foundation.
16 * Lustre is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with Lustre; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 # define EXPORT_SYMTAB
29 #define DEBUG_SUBSYSTEM S_SEC
31 #include <linux/version.h>
33 #include <asm/unistd.h>
34 #include <linux/slab.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
38 #include <linux/obd_class.h>
39 #include <linux/lustre_debug.h>
40 #include <linux/lustre_idl.h>
41 #include <linux/lustre_sec.h>
42 #include <libcfs/list.h>
44 kmem_cache_t *capa_cachep = NULL;
46 /* capa_lock protect capa hash, list and content. */
47 spinlock_t capa_lock = SPIN_LOCK_UNLOCKED;
48 struct hlist_head *capa_hash;
49 struct list_head capa_list[3];
50 static int capa_count[3] = { 0 };
52 /* TODO: mdc and llite all need this, so define it here.
53 * in the future it will be moved to ll_sb_info to support multi-
55 struct timer_list ll_capa_timer;
57 EXPORT_SYMBOL(capa_lock);
58 EXPORT_SYMBOL(capa_hash);
59 EXPORT_SYMBOL(capa_list);
60 EXPORT_SYMBOL(ll_capa_timer);
62 static inline int const
63 capa_hashfn(unsigned int uid, int capa_op, __u64 mdsid, unsigned long ino)
65 return (ino ^ uid) * (unsigned long)capa_op * (unsigned long)mdsid %
69 int capa_op(int flags)
71 if (flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
73 else if (flags & FMODE_READ)
76 LBUG(); /* should be either MAY_READ or MAY_WRITE */
80 static struct obd_capa *
81 find_capa(struct hlist_head *head, uid_t uid, int capa_op, __u64 mdsid,
82 unsigned long ino, int type)
84 struct hlist_node *pos;
85 struct obd_capa *ocapa;
88 CDEBUG(D_CACHE, "find_capa uid %u op %u mdsid "LPU64" ino %lu "
89 "type %d\n", uid, capa_op, mdsid, ino, type);
91 hlist_for_each_entry(ocapa, pos, head, c_hash) {
92 if (ocapa->c_capa.lc_uid != uid)
94 if (ocapa->c_capa.lc_op != capa_op)
96 if (ocapa->c_capa.lc_mdsid != mdsid)
98 if (ocapa->c_capa.lc_ino != ino)
100 if (ocapa->c_type != type)
108 inline void __capa_get(struct obd_capa *ocapa)
110 atomic_inc(&ocapa->c_refc);
113 static struct obd_capa *
114 find_capa_locked(struct hlist_head *head, uid_t uid, int capa_op, __u64 mdsid,
115 unsigned long ino, int type)
117 struct obd_capa *ocapa;
120 spin_lock(&capa_lock);
121 ocapa = find_capa(head, uid, capa_op, mdsid, ino, type);
124 spin_unlock(&capa_lock);
129 static struct obd_capa *alloc_capa(void)
131 struct obd_capa *ocapa;
134 OBD_SLAB_ALLOC(ocapa, capa_cachep, SLAB_NOFS, sizeof(*ocapa));
136 INIT_HLIST_NODE(&ocapa->c_hash);
137 INIT_LIST_HEAD(&ocapa->c_list);
143 static void destroy_capa(struct obd_capa *ocapa)
145 OBD_SLAB_FREE(ocapa, capa_cachep, sizeof(*ocapa));
148 int capa_cache_init(void)
150 int order = 0, nr_hash, i;
152 capa_hash = (struct hlist_head *)
153 __get_free_pages(GFP_ATOMIC, order);
155 panic("Cannot create capa_hash hash table");
157 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
158 LASSERT(nr_hash > NR_CAPAHASH);
160 for (i = 0; i < NR_CAPAHASH; i++)
161 INIT_HLIST_HEAD(capa_hash + i);
163 for (i =0; i < 3; i++)
164 INIT_LIST_HEAD(&capa_list[i]);
169 void capa_cache_cleanup(void)
171 struct obd_capa *ocapa;
172 struct hlist_node *pos, *n;
174 hlist_for_each_entry_safe(ocapa, pos, n, capa_hash, c_hash) {
175 hlist_del(&ocapa->c_hash);
176 list_del(&ocapa->c_list);
177 OBD_FREE(ocapa, sizeof(*ocapa));
182 static inline void list_add_capa(struct obd_capa *ocapa, struct list_head *head)
184 struct obd_capa *tmp;
186 /* XXX: capa is sorted in client, this could be optimized */
187 if (ocapa->c_type == CLIENT_CAPA) {
188 list_for_each_entry_reverse(tmp, head, c_list) {
189 if (ocapa->c_capa.lc_expiry > tmp->c_capa.lc_expiry) {
190 list_add(&ocapa->c_list, &tmp->c_list);
196 list_add_tail(&ocapa->c_list, head);
199 #define DEBUG_CAPA(level, ocapa, fmt, args...) \
201 CDEBUG(level, fmt " capa@%p uid %u op %u ino "LPU64" mdsid %d keyid %d " \
202 "expiry "LPU64" flags %u type %d\n", \
203 ##args, ocapa, ocapa->c_capa.lc_uid, ocapa->c_capa.lc_op, \
204 ocapa->c_capa.lc_ino, ocapa->c_capa.lc_mdsid, ocapa->c_capa.lc_keyid, \
205 ocapa->c_capa.lc_expiry, ocapa->c_capa.lc_flags, ocapa->c_type); \
208 static struct obd_capa *
209 get_new_capa_locked(struct hlist_head *head, uid_t uid, int capa_op,__u64 mdsid,
210 unsigned long ino, int type, struct lustre_capa *capa,
211 struct inode *inode, struct lustre_handle *handle)
213 struct obd_capa *ocapa, *old;
216 ocapa = alloc_capa();
220 spin_lock(&capa_lock);
221 old = find_capa(head, uid, capa_op, mdsid, ino, type);
223 memcpy(&ocapa->c_capa, capa, sizeof(*capa));
224 ocapa->c_type = type;
225 if (type == CLIENT_CAPA) {
228 ocapa->c_inode = inode;
229 memcpy(&ocapa->c_handle, handle, sizeof(*handle));
231 list_add_capa(ocapa, &capa_list[type]);
232 hlist_add_head(&ocapa->c_hash, capa_hash);
234 DEBUG_CAPA(D_CACHE, ocapa, "get_new_capa_locked");
236 if (type != CLIENT_CAPA && capa_count[type] > CAPA_CACHE_SIZE) {
237 struct list_head *node = capa_list[type].next;
238 struct obd_capa *tcapa;
241 /* free 12 unused capa from head */
242 while (node->next != &capa_list[type] && count < 12) {
243 tcapa = list_entry(node, struct obd_capa, c_list);
245 if (atomic_read(&tcapa->c_refc) > 0)
247 list_del(&tcapa->c_list);
253 spin_unlock(&capa_lock);
258 spin_unlock(&capa_lock);
264 static struct obd_capa *
265 capa_get_locked(uid_t uid, int capa_op,__u64 mdsid, unsigned long ino,
266 int type, struct lustre_capa *capa, struct inode *inode,
267 struct lustre_handle *handle)
269 struct hlist_head *head = capa_hash +
270 capa_hashfn(uid, capa_op, mdsid, ino);
271 struct obd_capa *ocapa;
274 ocapa = find_capa_locked(head, uid, capa_op, mdsid, ino, type);
279 ocapa = get_new_capa_locked(head, uid, capa_op, mdsid, ino,
280 type, capa, inode, handle);
285 capa_get(uid_t uid, int capa_op, __u64 mdsid, unsigned long ino, int type,
286 struct lustre_capa *capa, struct inode *inode,
287 struct lustre_handle *handle)
289 return capa_get_locked(uid, capa_op, mdsid, ino, type, capa, inode,
293 static void __capa_put(struct obd_capa *ocapa, int type)
295 hlist_del_init(&ocapa->c_hash);
296 list_del_init(&ocapa->c_list);
300 void capa_put(struct obd_capa *ocapa, int type)
305 if (atomic_dec_and_lock(&ocapa->c_refc, &capa_lock)) {
306 if (type == CLIENT_CAPA) {
307 iput(ocapa->c_inode);
308 __capa_put(ocapa, type);
311 spin_unlock(&capa_lock);
318 static inline void __update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
320 memcpy(&ocapa->c_capa, capa, sizeof(*capa));
323 static int update_capa_locked(struct lustre_capa *capa, int type)
325 uid_t uid = capa->lc_uid;
326 int capa_op = capa->lc_op;
327 __u64 mdsid = capa->lc_mdsid;
328 unsigned long ino = capa->lc_ino;
329 struct hlist_head *head = capa_hash +
330 capa_hashfn(uid, capa_op, mdsid, ino);
331 struct obd_capa *ocapa;
334 spin_lock(&capa_lock);
335 ocapa = find_capa(head, uid, capa_op, mdsid, ino, type);
337 __update_capa(ocapa, capa);
338 spin_unlock(&capa_lock);
340 if (ocapa == NULL && type == MDS_CAPA)
341 ocapa = get_new_capa_locked(head, uid, capa_op, mdsid, ino, type,
344 RETURN(ocapa ? 0 : -ENOENT);
347 int capa_renew(struct lustre_capa *capa, int type)
349 return update_capa_locked(capa, type);
352 void capa_hmac(struct crypto_tfm *tfm, u8 *key, struct lustre_capa *capa)
354 int keylen = CAPA_KEY_LEN;
355 struct scatterlist sl = {
356 .page = virt_to_page(capa),
357 .offset = (unsigned long)(capa) % PAGE_SIZE,
358 .length = sizeof(struct lustre_capa_data),
363 crypto_hmac(tfm, key, &keylen, &sl, 1, capa->lc_hmac);
367 void capa_dup(void *dst, struct obd_capa *ocapa)
369 spin_lock(&capa_lock);
370 memcpy(dst, &ocapa->c_capa, sizeof(ocapa->c_capa));
371 spin_unlock(&capa_lock);
374 void capa_dup2(void *dst, struct lustre_capa *capa)
376 spin_lock(&capa_lock);
377 memcpy(dst, capa, sizeof(*capa));
378 spin_unlock(&capa_lock);
381 int capa_expired(struct lustre_capa *capa)
385 do_gettimeofday(&tv);
386 return (capa->lc_expiry < tv.tv_sec) ? 1 : 0;
389 int __capa_is_to_expire(struct obd_capa *ocapa)
392 int pre_expiry = capa_pre_expiry(&ocapa->c_capa);
394 do_gettimeofday(&tv);
395 return (ocapa->c_capa.lc_expiry - pre_expiry < tv.tv_sec)? 1 : 0;
398 int capa_is_to_expire(struct obd_capa *ocapa)
402 spin_lock(&capa_lock);
403 rc = __capa_is_to_expire(ocapa);
404 spin_unlock(&capa_lock);
409 EXPORT_SYMBOL(capa_op);
410 EXPORT_SYMBOL(capa_get);
411 EXPORT_SYMBOL(capa_put);
412 EXPORT_SYMBOL(capa_renew);
413 EXPORT_SYMBOL(__capa_get);
414 EXPORT_SYMBOL(capa_hmac);
415 EXPORT_SYMBOL(capa_dup);
416 EXPORT_SYMBOL(capa_dup2);
417 EXPORT_SYMBOL(capa_expired);
418 EXPORT_SYMBOL(__capa_is_to_expire);
419 EXPORT_SYMBOL(capa_is_to_expire);