Whamcloud - gitweb
b=6427
[fs/lustre-release.git] / lustre / sec / upcall_cache.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef EXPORT_SYMTAB
23 # define EXPORT_SYMTAB
24 #endif
25 #define DEBUG_SUBSYSTEM S_LOV
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30
31 #include <linux/obd_support.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_idl.h>
34 #include <linux/obd_class.h>
35 #include <linux/lustre_ucache.h>
36
37 /* FIXME
38  * current ucache implementation is simply took from group hash code, almost
39  * without any change. it's very simple and have very limited functionality,
40  * and probably it's also only suitable for usage of group hash.
41  */
42
43 void upcall_cache_init_entry(struct upcall_cache *cache,
44                              struct upcall_cache_entry *entry,
45                              __u64 key)
46 {
47         UC_CACHE_SET_NEW(entry);
48         INIT_LIST_HEAD(&entry->ue_hash);
49         atomic_set(&entry->ue_refcount, 0);
50         entry->ue_key = key;
51         entry->ue_cache = cache;
52         init_waitqueue_head(&entry->ue_waitq);
53 }
54 EXPORT_SYMBOL(upcall_cache_init_entry);
55
56 static inline struct upcall_cache_entry *
57 alloc_entry(struct upcall_cache *cache, __u64 key)
58 {
59         LASSERT(cache->alloc_entry);
60         return cache->alloc_entry(cache, key);
61 }
62
63 static void free_entry(struct upcall_cache_entry *entry)
64 {
65         struct upcall_cache *cache = entry->ue_cache;
66
67         LASSERT(cache);
68         LASSERT(cache->free_entry);
69         LASSERT(atomic_read(&entry->ue_refcount) == 0);
70
71         CDEBUG(D_SEC, "destroy %s entry %p for key "LPU64"\n",
72                cache->uc_name, entry, entry->ue_key);
73
74         list_del(&entry->ue_hash);
75         cache->free_entry(cache, entry);
76 }
77
78 static inline void get_entry(struct upcall_cache_entry *entry)
79 {
80         atomic_inc(&entry->ue_refcount);
81 }
82
83 static inline void put_entry(struct upcall_cache_entry *entry)
84 {
85         if (atomic_dec_and_test(&entry->ue_refcount) &&
86             !UC_CACHE_IS_VALID(entry)) {
87                 free_entry(entry);
88         }
89 }
90
91 static inline int refresh_entry(struct upcall_cache_entry *entry)
92 {
93         struct upcall_cache *cache = entry->ue_cache;
94
95         LASSERT(cache);
96         LASSERT(cache->make_upcall);
97
98         return cache->make_upcall(cache, entry);
99 }
100
101 static int check_unlink_entry(struct upcall_cache_entry *entry)
102 {
103         /* upcall will be issued upon new entries immediately
104          * after they are created
105          */
106         LASSERT(!UC_CACHE_IS_NEW(entry));
107
108         if (UC_CACHE_IS_VALID(entry) &&
109             time_before(get_seconds(), entry->ue_expire))
110                 return 0;
111
112         if (UC_CACHE_IS_ACQUIRING(entry)) {
113                 if (time_before(get_seconds(), entry->ue_acquire_expire))
114                         return 0;
115                 else {
116                         UC_CACHE_SET_EXPIRED(entry);
117                         wake_up_all(&entry->ue_waitq);
118                 }
119         } else if (!UC_CACHE_IS_INVALID(entry)) {
120                 UC_CACHE_SET_EXPIRED(entry);
121         }
122
123         list_del_init(&entry->ue_hash);
124         if (!atomic_read(&entry->ue_refcount))
125                 free_entry(entry);
126         return 1;
127 }
128
129 /* XXX
130  * currently always use write_lock
131  */
132 static struct upcall_cache_entry *
133 __get_entry(struct upcall_cache *cache, unsigned int hash, __u64 key,
134             int create, int async)
135 {
136         struct list_head *head;
137         struct upcall_cache_entry *entry, *next, *new = NULL;
138         int found = 0, rc;
139         ENTRY;
140
141         LASSERT(hash < cache->uc_hashsize);
142
143         head = &cache->uc_hashtable[hash];
144
145 find_again:
146         write_lock(&cache->uc_hashlock);
147         list_for_each_entry_safe(entry, next, head, ue_hash) {
148                 if (check_unlink_entry(entry))
149                         continue;
150                 if (entry->ue_key == key) {
151                         found = 1;
152                         break;
153                 }
154         }
155
156         if (!found) {
157                 if (!create)
158                         RETURN(NULL);
159                 if (!new) {
160                         write_unlock(&cache->uc_hashlock);
161                         new = alloc_entry(cache, key);
162                         if (!new) {
163                                 CERROR("fail to alloc entry\n");
164                                 RETURN(NULL);
165                         }
166                         goto find_again;
167                 } else {
168                         list_add(&new->ue_hash, head);
169                         entry = new;
170                 }
171         } else {
172                 if (new) {
173                         free_entry(new);
174                         new = NULL;
175                 }
176                 list_move(&entry->ue_hash, head);
177         }
178         get_entry(entry);
179
180         /* as for this moment, we have found matched entry
181          * and hold a ref of it. if it's NEW (we created it),
182          * we must give it a push to refresh
183          */
184         if (UC_CACHE_IS_NEW(entry)) {
185                 LASSERT(entry == new);
186                 UC_CACHE_SET_ACQUIRING(entry);
187                 UC_CACHE_CLEAR_NEW(entry);
188                 entry->ue_acquire_expire = get_seconds() +
189                                            cache->uc_acquire_expire;
190
191                 write_unlock(&cache->uc_hashlock);
192                 rc = refresh_entry(entry);
193                 write_lock(&cache->uc_hashlock);
194                 if (rc) {
195                         UC_CACHE_CLEAR_ACQUIRING(entry);
196                         UC_CACHE_SET_INVALID(entry);
197                 }
198         }
199
200         /* caller don't want to wait */
201         if (async) {
202                 write_unlock(&cache->uc_hashlock);
203                 RETURN(entry);
204         }
205
206         /* someone (and only one) is doing upcall upon
207          * this item, just wait it complete
208          */
209         if (UC_CACHE_IS_ACQUIRING(entry)) {
210                 wait_queue_t wait;
211
212                 init_waitqueue_entry(&wait, current);
213                 add_wait_queue(&entry->ue_waitq, &wait);
214                 set_current_state(TASK_INTERRUPTIBLE);
215                 write_unlock(&cache->uc_hashlock);
216
217                 schedule_timeout(cache->uc_acquire_expire * HZ);
218
219                 write_lock(&cache->uc_hashlock);
220                 remove_wait_queue(&entry->ue_waitq, &wait);
221                 if (UC_CACHE_IS_ACQUIRING(entry)) {
222                         /* we're interrupted or upcall failed
223                          * in the middle
224                          */
225                         CERROR("entry %p not refreshed: cur %lu, key "LPU64", "
226                                "ref %d fl %u, ac %ld, ex %ld\n",
227                                entry, get_seconds(), entry->ue_key,
228                                atomic_read(&entry->ue_refcount),
229                                entry->ue_flags, entry->ue_acquire_expire,
230                                entry->ue_expire);
231                         put_entry(entry);
232                         write_unlock(&cache->uc_hashlock);
233                         CERROR("Interrupted? Or check whether %s is in place\n",
234                                cache->uc_upcall);
235                         RETURN(NULL);
236                 }
237                 /* fall through */
238         }
239
240         /* invalid means error, don't need to try again */
241         if (UC_CACHE_IS_INVALID(entry)) {
242                 put_entry(entry);
243                 write_unlock(&cache->uc_hashlock);
244                 RETURN(NULL);
245         }
246
247         /* check expired 
248          * We can't refresh the existed one because some
249          * memory might be shared by multiple processes.
250          */
251         if (check_unlink_entry(entry)) {
252                 /* if expired, try again. but if this entry is
253                  * created by me and too quickly turn to expired
254                  * without any error, should at least give a
255                  * chance to use it once.
256                  */
257                 if (entry != new) {
258                         put_entry(entry);
259                         write_unlock(&cache->uc_hashlock);
260                         new = NULL;
261                         goto find_again;
262                 }
263         }
264         
265         /* Now we know it's good */
266         LASSERT(UC_CACHE_IS_VALID(entry));
267         write_unlock(&cache->uc_hashlock);
268
269         RETURN(entry);
270 }
271
272 struct upcall_cache_entry *
273 upcall_cache_get_entry(struct upcall_cache *cache, __u64 key)
274 {
275         unsigned int hash;
276
277         LASSERT(cache->hash);
278
279         hash = cache->hash(cache, key);
280
281         return __get_entry(cache, hash, key, 1, 0);
282 }
283 EXPORT_SYMBOL(upcall_cache_get_entry);
284
285 void upcall_cache_put_entry(struct upcall_cache_entry *entry)
286 {
287         struct upcall_cache *cache = entry->ue_cache;
288
289         write_lock(&cache->uc_hashlock);
290         LASSERTF(atomic_read(&entry->ue_refcount) > 0,
291                  "entry %p: ref %d\n", entry, atomic_read(&entry->ue_refcount));
292         put_entry(entry);
293         write_unlock(&cache->uc_hashlock);
294 }
295 EXPORT_SYMBOL(upcall_cache_put_entry);
296
297 int upcall_cache_downcall(struct upcall_cache *cache, __u64 key, void *args)
298 {
299         struct list_head *head;
300         struct upcall_cache_entry *entry;
301         int found = 0, rc;
302         unsigned int hash;
303         ENTRY;
304
305         hash = cache->hash(cache, key);
306         LASSERT(hash < cache->uc_hashsize);
307
308         head = &cache->uc_hashtable[hash];
309
310         write_lock(&cache->uc_hashlock);
311         list_for_each_entry(entry, head, ue_hash) {
312                 if (entry->ue_key == key) {
313                         found = 1;
314                         break;
315                 }
316         }
317         if (!found) {
318                 /* haven't found, it's possible */
319                 write_unlock(&cache->uc_hashlock);
320                 CWARN("key "LPU64" entry dosen't found\n", key);
321                 RETURN(-EINVAL);
322         }
323
324         if (!UC_CACHE_IS_ACQUIRING(entry)) {
325                 if (UC_CACHE_IS_VALID(entry)) {
326                         /* This should not happen, just give a warning
327                          * at this moment.
328                          */
329                         CWARN("entry %p(key "LPU64", ac %ld, ex %ld): "
330                               "already valid???\n", entry, entry->ue_key,
331                               entry->ue_acquire_expire, entry->ue_expire);
332                         GOTO(out, rc = 0);
333                 }
334
335                 CWARN("stale entry %p: cur %lu, key "LPU64", ref %d, "
336                       "fl %u, ac %ld, ex %ld\n",
337                        entry, get_seconds(), entry->ue_key,
338                        atomic_read(&entry->ue_refcount), entry->ue_flags,
339                        entry->ue_acquire_expire, entry->ue_expire);
340                 GOTO(out, rc = -EINVAL);
341         }
342
343         if (!UC_CACHE_IS_ACQUIRING(entry) ||
344             UC_CACHE_IS_INVALID(entry) ||
345             UC_CACHE_IS_EXPIRED(entry)) {
346                 CWARN("stale entry %p: cur %lu, key "LPU64", ref %d, "
347                       "fl %u, ac %ld, ex %ld\n",
348                        entry, get_seconds(), entry->ue_key,
349                        atomic_read(&entry->ue_refcount), entry->ue_flags,
350                        entry->ue_acquire_expire, entry->ue_expire);
351                 GOTO(out, rc = -EINVAL);
352         }
353
354         atomic_inc(&entry->ue_refcount);
355         write_unlock(&cache->uc_hashlock);
356         rc = cache->parse_downcall(cache, entry, args);
357         write_lock(&cache->uc_hashlock);
358         atomic_dec(&entry->ue_refcount);
359
360         if (rc < 0) {
361                 UC_CACHE_SET_INVALID(entry);
362                 list_del_init(&entry->ue_hash);
363                 GOTO(out, rc);
364         } else if (rc == 0) {
365                 entry->ue_expire = get_seconds() + cache->uc_entry_expire;
366         } else {
367                 entry->ue_expire = get_seconds() + cache->uc_err_entry_expire;
368         }
369
370         UC_CACHE_SET_VALID(entry);
371         CDEBUG(D_SEC, "create ucache entry %p(key "LPU64")\n",
372                entry, entry->ue_key);
373 out:
374         wake_up_all(&entry->ue_waitq);
375         write_unlock(&cache->uc_hashlock);
376         RETURN(rc);
377 }
378 EXPORT_SYMBOL(upcall_cache_downcall);
379
380 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key)
381 {
382         struct list_head *head;
383         struct upcall_cache_entry *entry;
384         unsigned int hash;
385         int found = 0;
386         ENTRY;
387
388         hash = cache->hash(cache, key);
389         LASSERT(hash < cache->uc_hashsize);
390
391         head = &cache->uc_hashtable[hash];
392
393         write_lock(&cache->uc_hashlock);
394         list_for_each_entry(entry, head, ue_hash) {
395                 if (entry->ue_key == key) {
396                         found = 1;
397                         break;
398                 }
399         }
400
401         if (found) {
402                 UC_CACHE_SET_EXPIRED(entry);
403                 if (!atomic_read(&entry->ue_refcount))
404                         free_entry(entry);
405         }
406         write_unlock(&cache->uc_hashlock);
407 }
408 EXPORT_SYMBOL(upcall_cache_flush_one);
409
410 static void cache_flush(struct upcall_cache *cache, int force, int sync)
411 {
412         struct upcall_cache_entry *entry, *next;
413         int i;
414         ENTRY;
415
416         write_lock(&cache->uc_hashlock);
417         for (i = 0; i < cache->uc_hashsize; i++) {
418                 list_for_each_entry_safe(entry, next,
419                                          &cache->uc_hashtable[i], ue_hash) {
420                         if (!force && atomic_read(&entry->ue_refcount)) {
421                                 UC_CACHE_SET_EXPIRED(entry);
422                                 continue;
423                         }
424                         LASSERT(!atomic_read(&entry->ue_refcount));
425                         free_entry(entry);
426                 }
427         }
428         write_unlock(&cache->uc_hashlock);
429         EXIT;
430 }
431
432 void upcall_cache_flush_idle(struct upcall_cache *cache)
433 {
434         cache_flush(cache, 0, 0);
435 }
436
437 void upcall_cache_flush_all(struct upcall_cache *cache)
438 {
439         cache_flush(cache, 1, 0);
440 }
441 EXPORT_SYMBOL(upcall_cache_flush_idle);
442 EXPORT_SYMBOL(upcall_cache_flush_all);