Whamcloud - gitweb
remove unneed include.
[fs/lustre-release.git] / lustre / lvfs / upcall_cache.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Supplementary groups cache.
5  *
6  *  Copyright (c) 2004 Cluster File Systems, Inc.
7  *
8  *   This file is part of Lustre, http://www.lustre.org.
9  *
10  *   Lustre is free software; you can redistribute it and/or
11  *   modify it under the terms of version 2 of the GNU General Public
12  *   License as published by the Free Software Foundation.
13  *
14  *   Lustre is distributed in the hope that it will be useful,
15  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
16  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  *   GNU General Public License for more details.
18  *
19  *   You should have received a copy of the GNU General Public License
20  *   along with Lustre; if not, write to the Free Software
21  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23
24 #define DEBUG_SUBSYSTEM S_SEC
25
26 #ifndef AUTOCONF_INCLUDED
27 #include <linux/config.h>
28 #endif
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/mm.h>
32 #include <linux/kmod.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/errno.h>
36 #include <linux/version.h>
37 #include <linux/unistd.h>
38
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
41
42 #include <linux/fs.h>
43 #include <linux/stat.h>
44 #include <asm/uaccess.h>
45 #include <linux/slab.h>
46
47 #include <obd_support.h>
48 #include <lustre_lib.h>
49
50 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
51                                               __u64 key, void *args)
52 {
53         struct upcall_cache_entry *entry;
54
55         OBD_ALLOC_PTR(entry);
56         if (!entry)
57                 return NULL;
58
59         UC_CACHE_SET_NEW(entry);
60         INIT_LIST_HEAD(&entry->ue_hash);
61         entry->ue_key = key;
62         atomic_set(&entry->ue_refcount, 0);
63         init_waitqueue_head(&entry->ue_waitq);
64         if (cache->uc_ops->init_entry)
65                 cache->uc_ops->init_entry(entry, args);
66         return entry;
67 }
68
69 /* protected by cache lock */
70 static void free_entry(struct upcall_cache *cache,
71                        struct upcall_cache_entry *entry)
72 {
73         if (cache->uc_ops->free_entry)
74                 cache->uc_ops->free_entry(cache, entry);
75
76         list_del(&entry->ue_hash);
77         CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
78                entry, entry->ue_key);
79         OBD_FREE_PTR(entry);
80 }
81
82 static inline int upcall_compare(struct upcall_cache *cache,
83                                  struct upcall_cache_entry *entry,
84                                  __u64 key, void *args)
85 {
86         if (entry->ue_key != key)
87                 return -1;
88
89         if (cache->uc_ops->upcall_compare)
90                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
91
92         return 0;
93 }
94
95 static inline int downcall_compare(struct upcall_cache *cache,
96                                    struct upcall_cache_entry *entry,
97                                    __u64 key, void *args)
98 {
99         if (entry->ue_key != key)
100                 return -1;
101
102         if (cache->uc_ops->downcall_compare)
103                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
104
105         return 0;
106 }
107
108 static inline void get_entry(struct upcall_cache_entry *entry)
109 {
110         atomic_inc(&entry->ue_refcount);
111 }
112
113 static inline void put_entry(struct upcall_cache *cache,
114                              struct upcall_cache_entry *entry)
115 {
116         if (atomic_dec_and_test(&entry->ue_refcount) &&
117             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
118                 free_entry(cache, entry);
119         }
120 }
121
122 static int check_unlink_entry(struct upcall_cache *cache,
123                               struct upcall_cache_entry *entry)
124 {
125         if (UC_CACHE_IS_VALID(entry) &&
126             time_before(jiffies, entry->ue_expire))
127                 return 0;
128
129         if (UC_CACHE_IS_ACQUIRING(entry)) {
130                 if (time_before(jiffies, entry->ue_acquire_expire))
131                         return 0;
132
133                 UC_CACHE_SET_EXPIRED(entry);
134                 wake_up_all(&entry->ue_waitq);
135         } else if (!UC_CACHE_IS_INVALID(entry)) {
136                 UC_CACHE_SET_EXPIRED(entry);
137         }
138
139         list_del_init(&entry->ue_hash);
140         if (!atomic_read(&entry->ue_refcount))
141                 free_entry(cache, entry);
142         return 1;
143 }
144
145 static inline int refresh_entry(struct upcall_cache *cache,
146                          struct upcall_cache_entry *entry)
147 {
148         LASSERT(cache->uc_ops->do_upcall);
149         return cache->uc_ops->do_upcall(cache, entry);
150 }
151
152 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
153                                                   __u64 key, void *args)
154 {
155         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
156         struct list_head *head;
157         wait_queue_t wait;
158         int rc, found;
159         ENTRY;
160
161         LASSERT(cache);
162
163         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
164 find_again:
165         found = 0;
166         spin_lock(&cache->uc_lock);
167         list_for_each_entry_safe(entry, next, head, ue_hash) {
168                 /* check invalid & expired items */
169                 if (check_unlink_entry(cache, entry))
170                         continue;
171                 if (upcall_compare(cache, entry, key, args) == 0) {
172                         found = 1;
173                         break;
174                 }
175         }
176
177         if (!found) { /* didn't find it */
178                 if (!new) {
179                         spin_unlock(&cache->uc_lock);
180                         new = alloc_entry(cache, key, args);
181                         if (!new) {
182                                 CERROR("fail to alloc entry\n");
183                                 RETURN(ERR_PTR(-ENOMEM));
184                         }
185                         goto find_again;
186                 } else {
187                         list_add(&new->ue_hash, head);
188                         entry = new;
189                 }
190         } else {
191                 if (new) {
192                         free_entry(cache, new);
193                         new = NULL;
194                 }
195                 list_move(&entry->ue_hash, head);
196         }
197         get_entry(entry);
198
199         /* acquire for new one */
200         if (UC_CACHE_IS_NEW(entry)) {
201                 UC_CACHE_SET_ACQUIRING(entry);
202                 UC_CACHE_CLEAR_NEW(entry);
203                 entry->ue_acquire_expire = jiffies + cache->uc_acquire_expire;
204                 spin_unlock(&cache->uc_lock);
205                 rc = refresh_entry(cache, entry);
206                 spin_lock(&cache->uc_lock);
207                 if (rc < 0) {
208                         UC_CACHE_CLEAR_ACQUIRING(entry);
209                         UC_CACHE_SET_INVALID(entry);
210                         if (unlikely(rc == -EREMCHG)) {
211                                 put_entry(cache, entry);
212                                 GOTO(out, entry = ERR_PTR(rc));
213                         }
214                 }
215                 /* fall through */
216         }
217         /* someone (and only one) is doing upcall upon
218          * this item, just wait it complete
219          */
220         if (UC_CACHE_IS_ACQUIRING(entry)) {
221                 unsigned long expiry = jiffies + cache->uc_acquire_expire;
222
223                 init_waitqueue_entry(&wait, current);
224                 add_wait_queue(&entry->ue_waitq, &wait);
225                 set_current_state(TASK_INTERRUPTIBLE);
226                 spin_unlock(&cache->uc_lock);
227
228                 schedule_timeout(cache->uc_acquire_expire);
229
230                 spin_lock(&cache->uc_lock);
231                 remove_wait_queue(&entry->ue_waitq, &wait);
232                 if (UC_CACHE_IS_ACQUIRING(entry)) {
233                         /* we're interrupted or upcall failed in the middle */
234                         rc = time_before(jiffies, expiry) ? -EINTR : -ETIMEDOUT;
235                         put_entry(cache, entry);
236                         CERROR("acquire timeout exceeded for key "LPU64
237                                "\n", entry->ue_key);
238                         GOTO(out, entry = ERR_PTR(rc));
239                 }
240                 /* fall through */
241         }
242
243         /* invalid means error, don't need to try again */
244         if (UC_CACHE_IS_INVALID(entry)) {
245                 put_entry(cache, entry);
246                 GOTO(out, entry = ERR_PTR(-EIDRM));
247         }
248
249         /* check expired
250          * We can't refresh the existing one because some
251          * memory might be shared by multiple processes.
252          */
253         if (check_unlink_entry(cache, entry)) {
254                 /* if expired, try again. but if this entry is
255                  * created by me but too quickly turn to expired
256                  * without any error, should at least give a
257                  * chance to use it once.
258                  */
259                 if (entry != new) {
260                         put_entry(cache, entry);
261                         spin_unlock(&cache->uc_lock);
262                         new = NULL;
263                         goto find_again;
264                 }
265         }
266
267         /* Now we know it's good */
268 out:
269         spin_unlock(&cache->uc_lock);
270         RETURN(entry);
271 }
272 EXPORT_SYMBOL(upcall_cache_get_entry);
273
274 void upcall_cache_put_entry(struct upcall_cache *cache,
275                             struct upcall_cache_entry *entry)
276 {
277         ENTRY;
278
279         if (!entry) {
280                 EXIT;
281                 return;
282         }
283
284         LASSERT(atomic_read(&entry->ue_refcount) > 0);
285         spin_lock(&cache->uc_lock);
286         put_entry(cache, entry);
287         spin_unlock(&cache->uc_lock);
288         EXIT;
289 }
290 EXPORT_SYMBOL(upcall_cache_put_entry);
291
292 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
293                           void *args)
294 {
295         struct upcall_cache_entry *entry = NULL;
296         struct list_head *head;
297         int found = 0, rc = 0;
298         ENTRY;
299
300         LASSERT(cache);
301
302         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
303
304         spin_lock(&cache->uc_lock);
305         list_for_each_entry(entry, head, ue_hash) {
306                 if (downcall_compare(cache, entry, key, args) == 0) {
307                         found = 1;
308                         get_entry(entry);
309                         break;
310                 }
311         }
312
313         if (!found) {
314                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
315                        cache->uc_name, key);
316                 /* haven't found, it's possible */
317                 spin_unlock(&cache->uc_lock);
318                 RETURN(-EINVAL);
319         }
320
321         if (err) {
322                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
323                        cache->uc_name, entry->ue_key, err);
324                 GOTO(out, rc = -EINVAL);
325         }
326
327         if (!UC_CACHE_IS_ACQUIRING(entry)) {
328                 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
329                        cache->uc_name, entry, entry->ue_key);
330                 GOTO(out, rc = 0);
331         }
332
333         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
334                 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
335                        cache->uc_name, entry, entry->ue_key);
336                 GOTO(out, rc = -EINVAL);
337         }
338
339         spin_unlock(&cache->uc_lock);
340         if (cache->uc_ops->parse_downcall)
341                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
342         spin_lock(&cache->uc_lock);
343         if (rc)
344                 GOTO(out, rc);
345
346         entry->ue_expire = jiffies + cache->uc_entry_expire;
347         UC_CACHE_SET_VALID(entry);
348         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
349                cache->uc_name, entry, entry->ue_key);
350 out:
351         if (rc) {
352                 UC_CACHE_SET_INVALID(entry);
353                 list_del_init(&entry->ue_hash);
354         }
355         UC_CACHE_CLEAR_ACQUIRING(entry);
356         spin_unlock(&cache->uc_lock);
357         wake_up_all(&entry->ue_waitq);
358         put_entry(cache, entry);
359
360         RETURN(rc);
361 }
362 EXPORT_SYMBOL(upcall_cache_downcall);
363
364 static void cache_flush(struct upcall_cache *cache, int force)
365 {
366         struct upcall_cache_entry *entry, *next;
367         int i;
368         ENTRY;
369
370         spin_lock(&cache->uc_lock);
371         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
372                 list_for_each_entry_safe(entry, next,
373                                          &cache->uc_hashtable[i], ue_hash) {
374                         if (!force && atomic_read(&entry->ue_refcount)) {
375                                 UC_CACHE_SET_EXPIRED(entry);
376                                 continue;
377                         }
378                         LASSERT(!atomic_read(&entry->ue_refcount));
379                         free_entry(cache, entry);
380                 }
381         }
382         spin_unlock(&cache->uc_lock);
383         EXIT;
384 }
385
386 void upcall_cache_flush_idle(struct upcall_cache *cache)
387 {
388         cache_flush(cache, 0);
389 }
390 EXPORT_SYMBOL(upcall_cache_flush_idle);
391
392 void upcall_cache_flush_all(struct upcall_cache *cache)
393 {
394         cache_flush(cache, 1);
395 }
396 EXPORT_SYMBOL(upcall_cache_flush_all);
397
398 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
399 {
400         struct list_head *head;
401         struct upcall_cache_entry *entry;
402         int found = 0;
403         ENTRY;
404
405         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
406
407         spin_lock(&cache->uc_lock);
408         list_for_each_entry(entry, head, ue_hash) {
409                 if (upcall_compare(cache, entry, key, args) == 0) {
410                         found = 1;
411                         break;
412                 }
413         }
414
415         if (found) {
416                 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
417                       "cur %lu, ex %ld/%ld\n",
418                       cache->uc_name, entry, entry->ue_key,
419                       atomic_read(&entry->ue_refcount), entry->ue_flags,
420                       get_seconds(), entry->ue_acquire_expire,
421                       entry->ue_expire);
422                 UC_CACHE_SET_EXPIRED(entry);
423                 if (!atomic_read(&entry->ue_refcount))
424                         free_entry(cache, entry);
425         }
426         spin_unlock(&cache->uc_lock);
427 }
428 EXPORT_SYMBOL(upcall_cache_flush_one);
429
430 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
431                                        struct upcall_cache_ops *ops)
432 {
433         struct upcall_cache *cache;
434         int i;
435         ENTRY;
436
437         OBD_ALLOC(cache, sizeof(*cache));
438         if (!cache)
439                 RETURN(ERR_PTR(-ENOMEM));
440
441         spin_lock_init(&cache->uc_lock);
442         rwlock_init(&cache->uc_upcall_rwlock);
443         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
444                 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
445         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
446         /* upcall pathname proc tunable */
447         strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
448         cache->uc_entry_expire = 10 * 60 * HZ;
449         cache->uc_acquire_expire = 15 * HZ;
450         cache->uc_ops = ops;
451
452         RETURN(cache);
453 }
454 EXPORT_SYMBOL(upcall_cache_init);
455
456 void upcall_cache_cleanup(struct upcall_cache *cache)
457 {
458         if (!cache)
459                 return;
460         upcall_cache_flush_all(cache);
461         OBD_FREE(cache, sizeof(*cache));
462 }
463 EXPORT_SYMBOL(upcall_cache_cleanup);