Whamcloud - gitweb
LU-1347 build: remove the vim/emacs modelines
[fs/lustre-release.git] / libcfs / libcfs / upcall_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  *
34  * libcfs/libcfs/upcall_cache.c
35  *
36  * Supplementary groups cache.
37  */
38 #define DEBUG_SUBSYSTEM S_SEC
39
40 #include <libcfs/lucache.h>
41
42 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
43                                               __u64 key, void *args)
44 {
45         struct upcall_cache_entry *entry;
46
47         LIBCFS_ALLOC(entry, sizeof(*entry));
48         if (!entry)
49                 return NULL;
50
51         UC_CACHE_SET_NEW(entry);
52         CFS_INIT_LIST_HEAD(&entry->ue_hash);
53         entry->ue_key = key;
54         cfs_atomic_set(&entry->ue_refcount, 0);
55         cfs_waitq_init(&entry->ue_waitq);
56         if (cache->uc_ops->init_entry)
57                 cache->uc_ops->init_entry(entry, args);
58         return entry;
59 }
60
61 /* protected by cache lock */
62 static void free_entry(struct upcall_cache *cache,
63                        struct upcall_cache_entry *entry)
64 {
65         if (cache->uc_ops->free_entry)
66                 cache->uc_ops->free_entry(cache, entry);
67
68         cfs_list_del(&entry->ue_hash);
69         CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
70                entry, entry->ue_key);
71         LIBCFS_FREE(entry, sizeof(*entry));
72 }
73
74 static inline int upcall_compare(struct upcall_cache *cache,
75                                  struct upcall_cache_entry *entry,
76                                  __u64 key, void *args)
77 {
78         if (entry->ue_key != key)
79                 return -1;
80
81         if (cache->uc_ops->upcall_compare)
82                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
83
84         return 0;
85 }
86
87 static inline int downcall_compare(struct upcall_cache *cache,
88                                    struct upcall_cache_entry *entry,
89                                    __u64 key, void *args)
90 {
91         if (entry->ue_key != key)
92                 return -1;
93
94         if (cache->uc_ops->downcall_compare)
95                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
96
97         return 0;
98 }
99
100 static inline void get_entry(struct upcall_cache_entry *entry)
101 {
102         cfs_atomic_inc(&entry->ue_refcount);
103 }
104
105 static inline void put_entry(struct upcall_cache *cache,
106                              struct upcall_cache_entry *entry)
107 {
108         if (cfs_atomic_dec_and_test(&entry->ue_refcount) &&
109             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
110                 free_entry(cache, entry);
111         }
112 }
113
114 static int check_unlink_entry(struct upcall_cache *cache,
115                               struct upcall_cache_entry *entry)
116 {
117         if (UC_CACHE_IS_VALID(entry) &&
118             cfs_time_before(cfs_time_current(), entry->ue_expire))
119                 return 0;
120
121         if (UC_CACHE_IS_ACQUIRING(entry)) {
122                 if (entry->ue_acquire_expire == 0 ||
123                     cfs_time_before(cfs_time_current(),
124                                     entry->ue_acquire_expire))
125                         return 0;
126
127                 UC_CACHE_SET_EXPIRED(entry);
128                 cfs_waitq_broadcast(&entry->ue_waitq);
129         } else if (!UC_CACHE_IS_INVALID(entry)) {
130                 UC_CACHE_SET_EXPIRED(entry);
131         }
132
133         cfs_list_del_init(&entry->ue_hash);
134         if (!cfs_atomic_read(&entry->ue_refcount))
135                 free_entry(cache, entry);
136         return 1;
137 }
138
139 static inline int refresh_entry(struct upcall_cache *cache,
140                          struct upcall_cache_entry *entry)
141 {
142         LASSERT(cache->uc_ops->do_upcall);
143         return cache->uc_ops->do_upcall(cache, entry);
144 }
145
146 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
147                                                   __u64 key, void *args)
148 {
149         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
150         cfs_list_t *head;
151         cfs_waitlink_t wait;
152         int rc, found;
153         ENTRY;
154
155         LASSERT(cache);
156
157         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
158 find_again:
159         found = 0;
160         cfs_spin_lock(&cache->uc_lock);
161         cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
162                 /* check invalid & expired items */
163                 if (check_unlink_entry(cache, entry))
164                         continue;
165                 if (upcall_compare(cache, entry, key, args) == 0) {
166                         found = 1;
167                         break;
168                 }
169         }
170
171         if (!found) {
172                 if (!new) {
173                         cfs_spin_unlock(&cache->uc_lock);
174                         new = alloc_entry(cache, key, args);
175                         if (!new) {
176                                 CERROR("fail to alloc entry\n");
177                                 RETURN(ERR_PTR(-ENOMEM));
178                         }
179                         goto find_again;
180                 } else {
181                         cfs_list_add(&new->ue_hash, head);
182                         entry = new;
183                 }
184         } else {
185                 if (new) {
186                         free_entry(cache, new);
187                         new = NULL;
188                 }
189                 cfs_list_move(&entry->ue_hash, head);
190         }
191         get_entry(entry);
192
193         /* acquire for new one */
194         if (UC_CACHE_IS_NEW(entry)) {
195                 UC_CACHE_SET_ACQUIRING(entry);
196                 UC_CACHE_CLEAR_NEW(entry);
197                 cfs_spin_unlock(&cache->uc_lock);
198                 rc = refresh_entry(cache, entry);
199                 cfs_spin_lock(&cache->uc_lock);
200                 entry->ue_acquire_expire =
201                         cfs_time_shift(cache->uc_acquire_expire);
202                 if (rc < 0) {
203                         UC_CACHE_CLEAR_ACQUIRING(entry);
204                         UC_CACHE_SET_INVALID(entry);
205                         cfs_waitq_broadcast(&entry->ue_waitq);
206                         if (unlikely(rc == -EREMCHG)) {
207                                 put_entry(cache, entry);
208                                 GOTO(out, entry = ERR_PTR(rc));
209                         }
210                 }
211         }
212         /* someone (and only one) is doing upcall upon this item,
213          * wait it to complete */
214         if (UC_CACHE_IS_ACQUIRING(entry)) {
215                 long expiry = (entry == new) ?
216                               cfs_time_seconds(cache->uc_acquire_expire) :
217                               CFS_MAX_SCHEDULE_TIMEOUT;
218                 long left;
219
220                 cfs_waitlink_init(&wait);
221                 cfs_waitq_add(&entry->ue_waitq, &wait);
222                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
223                 cfs_spin_unlock(&cache->uc_lock);
224
225                 left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
226                                            expiry);
227
228                 cfs_spin_lock(&cache->uc_lock);
229                 cfs_waitq_del(&entry->ue_waitq, &wait);
230                 if (UC_CACHE_IS_ACQUIRING(entry)) {
231                         /* we're interrupted or upcall failed in the middle */
232                         rc = left > 0 ? -EINTR : -ETIMEDOUT;
233                         CERROR("acquire for key "LPU64": error %d\n",
234                                entry->ue_key, rc);
235                         put_entry(cache, entry);
236                         GOTO(out, entry = ERR_PTR(rc));
237                 }
238         }
239
240         /* invalid means error, don't need to try again */
241         if (UC_CACHE_IS_INVALID(entry)) {
242                 put_entry(cache, entry);
243                 GOTO(out, entry = ERR_PTR(-EIDRM));
244         }
245
246         /* check expired
247          * We can't refresh the existing one because some
248          * memory might be shared by multiple processes.
249          */
250         if (check_unlink_entry(cache, entry)) {
251                 /* if expired, try again. but if this entry is
252                  * created by me but too quickly turn to expired
253                  * without any error, should at least give a
254                  * chance to use it once.
255                  */
256                 if (entry != new) {
257                         put_entry(cache, entry);
258                         cfs_spin_unlock(&cache->uc_lock);
259                         new = NULL;
260                         goto find_again;
261                 }
262         }
263
264         /* Now we know it's good */
265 out:
266         cfs_spin_unlock(&cache->uc_lock);
267         RETURN(entry);
268 }
269 EXPORT_SYMBOL(upcall_cache_get_entry);
270
271 void upcall_cache_put_entry(struct upcall_cache *cache,
272                             struct upcall_cache_entry *entry)
273 {
274         ENTRY;
275
276         if (!entry) {
277                 EXIT;
278                 return;
279         }
280
281         LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
282         cfs_spin_lock(&cache->uc_lock);
283         put_entry(cache, entry);
284         cfs_spin_unlock(&cache->uc_lock);
285         EXIT;
286 }
287 EXPORT_SYMBOL(upcall_cache_put_entry);
288
289 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
290                           void *args)
291 {
292         struct upcall_cache_entry *entry = NULL;
293         cfs_list_t *head;
294         int found = 0, rc = 0;
295         ENTRY;
296
297         LASSERT(cache);
298
299         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
300
301         cfs_spin_lock(&cache->uc_lock);
302         cfs_list_for_each_entry(entry, head, ue_hash) {
303                 if (downcall_compare(cache, entry, key, args) == 0) {
304                         found = 1;
305                         get_entry(entry);
306                         break;
307                 }
308         }
309
310         if (!found) {
311                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
312                        cache->uc_name, key);
313                 /* haven't found, it's possible */
314                 cfs_spin_unlock(&cache->uc_lock);
315                 RETURN(-EINVAL);
316         }
317
318         if (err) {
319                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
320                        cache->uc_name, entry->ue_key, err);
321                 GOTO(out, rc = -EINVAL);
322         }
323
324         if (!UC_CACHE_IS_ACQUIRING(entry)) {
325                 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
326                        cache->uc_name, entry, entry->ue_key);
327                 GOTO(out, rc = 0);
328         }
329
330         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
331                 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
332                        cache->uc_name, entry, entry->ue_key);
333                 GOTO(out, rc = -EINVAL);
334         }
335
336         cfs_spin_unlock(&cache->uc_lock);
337         if (cache->uc_ops->parse_downcall)
338                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
339         cfs_spin_lock(&cache->uc_lock);
340         if (rc)
341                 GOTO(out, rc);
342
343         entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
344         UC_CACHE_SET_VALID(entry);
345         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
346                cache->uc_name, entry, entry->ue_key);
347 out:
348         if (rc) {
349                 UC_CACHE_SET_INVALID(entry);
350                 cfs_list_del_init(&entry->ue_hash);
351         }
352         UC_CACHE_CLEAR_ACQUIRING(entry);
353         cfs_spin_unlock(&cache->uc_lock);
354         cfs_waitq_broadcast(&entry->ue_waitq);
355         put_entry(cache, entry);
356
357         RETURN(rc);
358 }
359 EXPORT_SYMBOL(upcall_cache_downcall);
360
361 static void cache_flush(struct upcall_cache *cache, int force)
362 {
363         struct upcall_cache_entry *entry, *next;
364         int i;
365         ENTRY;
366
367         cfs_spin_lock(&cache->uc_lock);
368         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
369                 cfs_list_for_each_entry_safe(entry, next,
370                                          &cache->uc_hashtable[i], ue_hash) {
371                         if (!force && cfs_atomic_read(&entry->ue_refcount)) {
372                                 UC_CACHE_SET_EXPIRED(entry);
373                                 continue;
374                         }
375                         LASSERT(!cfs_atomic_read(&entry->ue_refcount));
376                         free_entry(cache, entry);
377                 }
378         }
379         cfs_spin_unlock(&cache->uc_lock);
380         EXIT;
381 }
382
383 void upcall_cache_flush_idle(struct upcall_cache *cache)
384 {
385         cache_flush(cache, 0);
386 }
387 EXPORT_SYMBOL(upcall_cache_flush_idle);
388
389 void upcall_cache_flush_all(struct upcall_cache *cache)
390 {
391         cache_flush(cache, 1);
392 }
393 EXPORT_SYMBOL(upcall_cache_flush_all);
394
395 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
396 {
397         cfs_list_t *head;
398         struct upcall_cache_entry *entry;
399         int found = 0;
400         ENTRY;
401
402         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
403
404         cfs_spin_lock(&cache->uc_lock);
405         cfs_list_for_each_entry(entry, head, ue_hash) {
406                 if (upcall_compare(cache, entry, key, args) == 0) {
407                         found = 1;
408                         break;
409                 }
410         }
411
412         if (found) {
413                 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
414                       "cur %lu, ex %ld/%ld\n",
415                       cache->uc_name, entry, entry->ue_key,
416                       cfs_atomic_read(&entry->ue_refcount), entry->ue_flags,
417                       cfs_time_current_sec(), entry->ue_acquire_expire,
418                       entry->ue_expire);
419                 UC_CACHE_SET_EXPIRED(entry);
420                 if (!cfs_atomic_read(&entry->ue_refcount))
421                         free_entry(cache, entry);
422         }
423         cfs_spin_unlock(&cache->uc_lock);
424 }
425 EXPORT_SYMBOL(upcall_cache_flush_one);
426
427 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
428                                        struct upcall_cache_ops *ops)
429 {
430         struct upcall_cache *cache;
431         int i;
432         ENTRY;
433
434         LIBCFS_ALLOC(cache, sizeof(*cache));
435         if (!cache)
436                 RETURN(ERR_PTR(-ENOMEM));
437
438         cfs_spin_lock_init(&cache->uc_lock);
439         cfs_rwlock_init(&cache->uc_upcall_rwlock);
440         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
441                 CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
442         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
443         /* upcall pathname proc tunable */
444         strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
445         cache->uc_entry_expire = 20 * 60;
446         cache->uc_acquire_expire = 30;
447         cache->uc_ops = ops;
448
449         RETURN(cache);
450 }
451 EXPORT_SYMBOL(upcall_cache_init);
452
453 void upcall_cache_cleanup(struct upcall_cache *cache)
454 {
455         if (!cache)
456                 return;
457         upcall_cache_flush_all(cache);
458         LIBCFS_FREE(cache, sizeof(*cache));
459 }
460 EXPORT_SYMBOL(upcall_cache_cleanup);