Whamcloud - gitweb
- lost #define changes from 1_6
[fs/lustre-release.git] / lustre / lvfs / upcall_cache.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Supplementary groups cache.
5  *
6  *  Copyright (c) 2004 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_SEC
25
26 #ifndef AUTOCONF_INCLUDED
27 #include <linux/config.h>
28 #endif
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/mm.h>
32 #include <linux/kmod.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/errno.h>
36 #include <linux/version.h>
37 #include <linux/unistd.h>
38
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
41
42 #include <linux/fs.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46 #include <asm/segment.h>
47
48 #include <obd_support.h>
49 #include <lustre_lib.h>
50
51 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
52                                               __u64 key, void *args)
53 {
54         struct upcall_cache_entry *entry;
55
56         OBD_ALLOC_PTR(entry);
57         if (!entry)
58                 return NULL;
59
60         UC_CACHE_SET_NEW(entry);
61         INIT_LIST_HEAD(&entry->ue_hash);
62         entry->ue_key = key;
63         atomic_set(&entry->ue_refcount, 0);
64         init_waitqueue_head(&entry->ue_waitq);
65         if (cache->uc_ops->init_entry)
66                 cache->uc_ops->init_entry(entry, args);
67         return entry;
68 }
69
70 /* protected by cache lock */
71 static void free_entry(struct upcall_cache *cache,
72                        struct upcall_cache_entry *entry)
73 {
74         if (cache->uc_ops->free_entry)
75                 cache->uc_ops->free_entry(cache, entry);
76
77         list_del(&entry->ue_hash);
78         CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
79                entry, entry->ue_key);
80         OBD_FREE_PTR(entry);
81 }
82
83 static inline int upcall_compare(struct upcall_cache *cache,
84                                  struct upcall_cache_entry *entry,
85                                  __u64 key, void *args)
86 {
87         if (entry->ue_key != key)
88                 return -1;
89
90         if (cache->uc_ops->upcall_compare)
91                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
92
93         return 0;
94 }
95
96 static inline int downcall_compare(struct upcall_cache *cache,
97                                    struct upcall_cache_entry *entry,
98                                    __u64 key, void *args)
99 {
100         if (entry->ue_key != key)
101                 return -1;
102
103         if (cache->uc_ops->downcall_compare)
104                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
105
106         return 0;
107 }
108
109 static inline void get_entry(struct upcall_cache_entry *entry)
110 {
111         atomic_inc(&entry->ue_refcount);
112 }
113
114 static inline void put_entry(struct upcall_cache *cache,
115                              struct upcall_cache_entry *entry)
116 {
117         if (atomic_dec_and_test(&entry->ue_refcount) &&
118             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
119                 free_entry(cache, entry);
120         }
121 }
122
123 static int check_unlink_entry(struct upcall_cache *cache,
124                               struct upcall_cache_entry *entry)
125 {
126         if (UC_CACHE_IS_VALID(entry) &&
127             time_before(jiffies, entry->ue_expire))
128                 return 0;
129
130         if (UC_CACHE_IS_ACQUIRING(entry)) {
131                 if (time_before(jiffies, entry->ue_acquire_expire))
132                         return 0;
133
134                 UC_CACHE_SET_EXPIRED(entry);
135                 wake_up_all(&entry->ue_waitq);
136         } else if (!UC_CACHE_IS_INVALID(entry)) {
137                 UC_CACHE_SET_EXPIRED(entry);
138         }
139
140         list_del_init(&entry->ue_hash);
141         if (!atomic_read(&entry->ue_refcount))
142                 free_entry(cache, entry);
143         return 1;
144 }
145
146 static inline int refresh_entry(struct upcall_cache *cache,
147                          struct upcall_cache_entry *entry)
148 {
149         LASSERT(cache->uc_ops->do_upcall);
150         return cache->uc_ops->do_upcall(cache, entry);
151 }
152
153 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
154                                                   __u64 key, void *args)
155 {
156         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
157         struct list_head *head;
158         wait_queue_t wait;
159         int rc, found;
160         ENTRY;
161
162         LASSERT(cache);
163
164         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
165 find_again:
166         found = 0;
167         spin_lock(&cache->uc_lock);
168         list_for_each_entry_safe(entry, next, head, ue_hash) {
169                 /* check invalid & expired items */
170                 if (check_unlink_entry(cache, entry))
171                         continue;
172                 if (upcall_compare(cache, entry, key, args) == 0) {
173                         found = 1;
174                         break;
175                 }
176         }
177
178         if (!found) { /* didn't find it */
179                 if (!new) {
180                         spin_unlock(&cache->uc_lock);
181                         new = alloc_entry(cache, key, args);
182                         if (!new) {
183                                 CERROR("fail to alloc entry\n");
184                                 RETURN(ERR_PTR(-ENOMEM));
185                         }
186                         goto find_again;
187                 } else {
188                         list_add(&new->ue_hash, head);
189                         entry = new;
190                 }
191         } else {
192                 if (new) {
193                         free_entry(cache, new);
194                         new = NULL;
195                 }
196                 list_move(&entry->ue_hash, head);
197         }
198         get_entry(entry);
199
200         /* acquire for new one */
201         if (UC_CACHE_IS_NEW(entry)) {
202                 UC_CACHE_SET_ACQUIRING(entry);
203                 UC_CACHE_CLEAR_NEW(entry);
204                 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
205                 spin_unlock(&cache->uc_lock);
206                 rc = refresh_entry(cache, entry);
207                 spin_lock(&cache->uc_lock);
208                 if (rc < 0) {
209                         UC_CACHE_CLEAR_ACQUIRING(entry);
210                         UC_CACHE_SET_INVALID(entry);
211                 }
212                 /* fall through */
213         }
214         /* someone (and only one) is doing upcall upon
215          * this item, just wait it complete
216          */
217         if (UC_CACHE_IS_ACQUIRING(entry)) {
218                 unsigned long expiry = jiffies + cache->uc_acquire_expire;
219
220                 init_waitqueue_entry(&wait, current);
221                 add_wait_queue(&entry->ue_waitq, &wait);
222                 set_current_state(TASK_INTERRUPTIBLE);
223                 spin_unlock(&cache->uc_lock);
224
225                 schedule_timeout(cache->uc_acquire_expire);
226
227                 spin_lock(&cache->uc_lock);
228                 remove_wait_queue(&entry->ue_waitq, &wait);
229                 if (UC_CACHE_IS_ACQUIRING(entry)) {
230                         /* we're interrupted or upcall failed in the middle */
231                         rc = time_before(jiffies, expiry) ? -EINTR : -ETIMEDOUT;
232                         put_entry(cache, entry);
233                         CERROR("acquire timeout exceeded for key "LPU64
234                                "\n", entry->ue_key);
235                         GOTO(out, entry = ERR_PTR(rc));
236                 }
237                 /* fall through */
238         }
239
240         /* invalid means error, don't need to try again */
241         if (UC_CACHE_IS_INVALID(entry)) {
242                 put_entry(cache, entry);
243                 GOTO(out, entry = ERR_PTR(-EIDRM));
244         }
245
246         /* check expired
247          * We can't refresh the existing one because some
248          * memory might be shared by multiple processes.
249          */
250         if (check_unlink_entry(cache, entry)) {
251                 /* if expired, try again. but if this entry is
252                  * created by me but too quickly turn to expired
253                  * without any error, should at least give a
254                  * chance to use it once.
255                  */
256                 if (entry != new) {
257                         put_entry(cache, entry);
258                         spin_unlock(&cache->uc_lock);
259                         new = NULL;
260                         goto find_again;
261                 }
262         }
263
264         /* Now we know it's good */
265 out:
266         spin_unlock(&cache->uc_lock);
267         RETURN(entry);
268 }
269 EXPORT_SYMBOL(upcall_cache_get_entry);
270
271 void upcall_cache_put_entry(struct upcall_cache *cache,
272                             struct upcall_cache_entry *entry)
273 {
274         ENTRY;
275
276         if (!entry) {
277                 EXIT;
278                 return;
279         }
280
281         LASSERT(atomic_read(&entry->ue_refcount) > 0);
282         spin_lock(&cache->uc_lock);
283         put_entry(cache, entry);
284         spin_unlock(&cache->uc_lock);
285         EXIT;
286 }
287 EXPORT_SYMBOL(upcall_cache_put_entry);
288
289 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
290                           void *args)
291 {
292         struct upcall_cache_entry *entry = NULL;
293         struct list_head *head;
294         int found = 0, rc = 0;
295         ENTRY;
296
297         LASSERT(cache);
298
299         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
300
301         spin_lock(&cache->uc_lock);
302         list_for_each_entry(entry, head, ue_hash) {
303                 if (downcall_compare(cache, entry, key, args) == 0) {
304                         found = 1;
305                         get_entry(entry);
306                         break;
307                 }
308         }
309
310         if (!found) {
311                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
312                        cache->uc_name, key);
313                 /* haven't found, it's possible */
314                 spin_unlock(&cache->uc_lock);
315                 RETURN(-EINVAL);
316         }
317
318         if (err) {
319                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
320                        cache->uc_name, entry->ue_key, err);
321                 GOTO(out, rc = -EINVAL);
322         }
323
324         if (!UC_CACHE_IS_ACQUIRING(entry)) {
325                 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
326                        cache->uc_name, entry, entry->ue_key);
327                 GOTO(out, rc = 0);
328         }
329
330         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
331                 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
332                        cache->uc_name, entry, entry->ue_key);
333                 GOTO(out, rc = -EINVAL);
334         }
335
336         spin_unlock(&cache->uc_lock);
337         if (cache->uc_ops->parse_downcall)
338                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
339         spin_lock(&cache->uc_lock);
340         if (rc)
341                 GOTO(out, rc);
342
343         entry->ue_expire = jiffies + cache->uc_entry_expire;
344         UC_CACHE_SET_VALID(entry);
345         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
346                cache->uc_name, entry, entry->ue_key);
347 out:
348         if (rc) {
349                 UC_CACHE_SET_INVALID(entry);
350                 list_del_init(&entry->ue_hash);
351         }
352         UC_CACHE_CLEAR_ACQUIRING(entry);
353         spin_unlock(&cache->uc_lock);
354         wake_up_all(&entry->ue_waitq);
355         put_entry(cache, entry);
356
357         RETURN(rc);
358 }
359 EXPORT_SYMBOL(upcall_cache_downcall);
360
361 static void cache_flush(struct upcall_cache *cache, int force)
362 {
363         struct upcall_cache_entry *entry, *next;
364         int i;
365         ENTRY;
366
367         spin_lock(&cache->uc_lock);
368         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
369                 list_for_each_entry_safe(entry, next,
370                                          &cache->uc_hashtable[i], ue_hash) {
371                         if (!force && atomic_read(&entry->ue_refcount)) {
372                                 UC_CACHE_SET_EXPIRED(entry);
373                                 continue;
374                         }
375                         LASSERT(!atomic_read(&entry->ue_refcount));
376                         free_entry(cache, entry);
377                 }
378         }
379         spin_unlock(&cache->uc_lock);
380         EXIT;
381 }
382
383 void upcall_cache_flush_idle(struct upcall_cache *cache)
384 {
385         cache_flush(cache, 0);
386 }
387 EXPORT_SYMBOL(upcall_cache_flush_idle);
388
389 void upcall_cache_flush_all(struct upcall_cache *cache)
390 {
391         cache_flush(cache, 1);
392 }
393 EXPORT_SYMBOL(upcall_cache_flush_all);
394
395 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
396 {
397         struct list_head *head;
398         struct upcall_cache_entry *entry;
399         int found = 0;
400         ENTRY;
401
402         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
403
404         spin_lock(&cache->uc_lock);
405         list_for_each_entry(entry, head, ue_hash) {
406                 if (upcall_compare(cache, entry, key, args) == 0) {
407                         found = 1;
408                         break;
409                 }
410         }
411
412         if (found) {
413                 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
414                       "cur %lu, ex %ld/%ld\n",
415                       cache->uc_name, entry, entry->ue_key,
416                       atomic_read(&entry->ue_refcount), entry->ue_flags,
417                       get_seconds(), entry->ue_acquire_expire,
418                       entry->ue_expire);
419                 UC_CACHE_SET_EXPIRED(entry);
420                 if (!atomic_read(&entry->ue_refcount))
421                         free_entry(cache, entry);
422         }
423         spin_unlock(&cache->uc_lock);
424 }
425 EXPORT_SYMBOL(upcall_cache_flush_one);
426
427 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
428                                        struct upcall_cache_ops *ops)
429 {
430         struct upcall_cache *cache;
431         int i;
432         ENTRY;
433
434         OBD_ALLOC(cache, sizeof(*cache));
435         if (!cache)
436                 RETURN(ERR_PTR(-ENOMEM));
437
438         spin_lock_init(&cache->uc_lock);
439         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
440                 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
441         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
442         /* upcall pathname proc tunable */
443         strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
444         cache->uc_entry_expire = 10 * 60 * HZ;
445         cache->uc_acquire_expire = 15 * HZ;
446         cache->uc_ops = ops;
447
448         RETURN(cache);
449 }
450 EXPORT_SYMBOL(upcall_cache_init);
451
452 void upcall_cache_cleanup(struct upcall_cache *cache)
453 {
454         if (!cache)
455                 return;
456         upcall_cache_flush_all(cache);
457         OBD_FREE(cache, sizeof(*cache));
458 }
459 EXPORT_SYMBOL(upcall_cache_cleanup);