Whamcloud - gitweb
LU-10560 libcfs: handle rename to wait_queue_entry_t
[fs/lustre-release.git] / lustre / obdclass / upcall_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/obdclass/upcall_cache.c
33  *
34  * Supplementary groups cache.
35  */
36 #define DEBUG_SUBSYSTEM S_SEC
37
38 #include <libcfs/linux/linux-misc.h>
39 #include <libcfs/libcfs.h>
40 #include <uapi/linux/lnet/lnet-types.h>
41 #include <upcall_cache.h>
42
43 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
44                                               __u64 key, void *args)
45 {
46         struct upcall_cache_entry *entry;
47
48         LIBCFS_ALLOC(entry, sizeof(*entry));
49         if (!entry)
50                 return NULL;
51
52         UC_CACHE_SET_NEW(entry);
53         INIT_LIST_HEAD(&entry->ue_hash);
54         entry->ue_key = key;
55         atomic_set(&entry->ue_refcount, 0);
56         init_waitqueue_head(&entry->ue_waitq);
57         if (cache->uc_ops->init_entry)
58                 cache->uc_ops->init_entry(entry, args);
59         return entry;
60 }
61
62 /* protected by cache lock */
63 static void free_entry(struct upcall_cache *cache,
64                        struct upcall_cache_entry *entry)
65 {
66         if (cache->uc_ops->free_entry)
67                 cache->uc_ops->free_entry(cache, entry);
68
69         list_del(&entry->ue_hash);
70         CDEBUG(D_OTHER, "destroy cache entry %p for key %llu\n",
71                 entry, entry->ue_key);
72         LIBCFS_FREE(entry, sizeof(*entry));
73 }
74
75 static inline int upcall_compare(struct upcall_cache *cache,
76                                  struct upcall_cache_entry *entry,
77                                  __u64 key, void *args)
78 {
79         if (entry->ue_key != key)
80                 return -1;
81
82         if (cache->uc_ops->upcall_compare)
83                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
84
85         return 0;
86 }
87
88 static inline int downcall_compare(struct upcall_cache *cache,
89                                    struct upcall_cache_entry *entry,
90                                    __u64 key, void *args)
91 {
92         if (entry->ue_key != key)
93                 return -1;
94
95         if (cache->uc_ops->downcall_compare)
96                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
97
98         return 0;
99 }
100
101 static inline void get_entry(struct upcall_cache_entry *entry)
102 {
103         atomic_inc(&entry->ue_refcount);
104 }
105
106 static inline void put_entry(struct upcall_cache *cache,
107                              struct upcall_cache_entry *entry)
108 {
109         if (atomic_dec_and_test(&entry->ue_refcount) &&
110             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
111                 free_entry(cache, entry);
112         }
113 }
114
115 static int check_unlink_entry(struct upcall_cache *cache,
116                               struct upcall_cache_entry *entry)
117 {
118         if (UC_CACHE_IS_VALID(entry) &&
119             cfs_time_before(cfs_time_current(), entry->ue_expire))
120                 return 0;
121
122         if (UC_CACHE_IS_ACQUIRING(entry)) {
123                 if (entry->ue_acquire_expire == 0 ||
124                     cfs_time_before(cfs_time_current(),
125                                     entry->ue_acquire_expire))
126                         return 0;
127
128                 UC_CACHE_SET_EXPIRED(entry);
129                 wake_up_all(&entry->ue_waitq);
130         } else if (!UC_CACHE_IS_INVALID(entry)) {
131                 UC_CACHE_SET_EXPIRED(entry);
132         }
133
134         list_del_init(&entry->ue_hash);
135         if (!atomic_read(&entry->ue_refcount))
136                 free_entry(cache, entry);
137         return 1;
138 }
139
140 static inline int refresh_entry(struct upcall_cache *cache,
141                          struct upcall_cache_entry *entry)
142 {
143         LASSERT(cache->uc_ops->do_upcall);
144         return cache->uc_ops->do_upcall(cache, entry);
145 }
146
147 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
148                                                   __u64 key, void *args)
149 {
150         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
151         struct list_head *head;
152         wait_queue_entry_t wait;
153         int rc, found;
154         ENTRY;
155
156         LASSERT(cache);
157
158         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
159 find_again:
160         found = 0;
161         spin_lock(&cache->uc_lock);
162         list_for_each_entry_safe(entry, next, head, ue_hash) {
163                 /* check invalid & expired items */
164                 if (check_unlink_entry(cache, entry))
165                         continue;
166                 if (upcall_compare(cache, entry, key, args) == 0) {
167                         found = 1;
168                         break;
169                 }
170         }
171
172         if (!found) {
173                 if (!new) {
174                         spin_unlock(&cache->uc_lock);
175                         new = alloc_entry(cache, key, args);
176                         if (!new) {
177                                 CERROR("fail to alloc entry\n");
178                                 RETURN(ERR_PTR(-ENOMEM));
179                         }
180                         goto find_again;
181                 } else {
182                         list_add(&new->ue_hash, head);
183                         entry = new;
184                 }
185         } else {
186                 if (new) {
187                         free_entry(cache, new);
188                         new = NULL;
189                 }
190                 list_move(&entry->ue_hash, head);
191         }
192         get_entry(entry);
193
194         /* acquire for new one */
195         if (UC_CACHE_IS_NEW(entry)) {
196                 UC_CACHE_SET_ACQUIRING(entry);
197                 UC_CACHE_CLEAR_NEW(entry);
198                 spin_unlock(&cache->uc_lock);
199                 rc = refresh_entry(cache, entry);
200                 spin_lock(&cache->uc_lock);
201                 entry->ue_acquire_expire =
202                         cfs_time_shift(cache->uc_acquire_expire);
203                 if (rc < 0) {
204                         UC_CACHE_CLEAR_ACQUIRING(entry);
205                         UC_CACHE_SET_INVALID(entry);
206                         wake_up_all(&entry->ue_waitq);
207                         if (unlikely(rc == -EREMCHG)) {
208                                 put_entry(cache, entry);
209                                 GOTO(out, entry = ERR_PTR(rc));
210                         }
211                 }
212         }
213         /* someone (and only one) is doing upcall upon this item,
214          * wait it to complete */
215         if (UC_CACHE_IS_ACQUIRING(entry)) {
216                 long expiry = (entry == new) ?
217                               cfs_time_seconds(cache->uc_acquire_expire) :
218                               MAX_SCHEDULE_TIMEOUT;
219                 long left;
220
221                 init_waitqueue_entry(&wait, current);
222                 add_wait_queue(&entry->ue_waitq, &wait);
223                 set_current_state(TASK_INTERRUPTIBLE);
224                 spin_unlock(&cache->uc_lock);
225
226                 left = schedule_timeout(expiry);
227
228                 spin_lock(&cache->uc_lock);
229                 remove_wait_queue(&entry->ue_waitq, &wait);
230                 if (UC_CACHE_IS_ACQUIRING(entry)) {
231                         /* we're interrupted or upcall failed in the middle */
232                         rc = left > 0 ? -EINTR : -ETIMEDOUT;
233                         CERROR("acquire for key %llu: error %d\n",
234                                entry->ue_key, rc);
235                         put_entry(cache, entry);
236                         GOTO(out, entry = ERR_PTR(rc));
237                 }
238         }
239
240         /* invalid means error, don't need to try again */
241         if (UC_CACHE_IS_INVALID(entry)) {
242                 put_entry(cache, entry);
243                 GOTO(out, entry = ERR_PTR(-EIDRM));
244         }
245
246         /* check expired
247          * We can't refresh the existing one because some
248          * memory might be shared by multiple processes.
249          */
250         if (check_unlink_entry(cache, entry)) {
251                 /* if expired, try again. but if this entry is
252                  * created by me but too quickly turn to expired
253                  * without any error, should at least give a
254                  * chance to use it once.
255                  */
256                 if (entry != new) {
257                         put_entry(cache, entry);
258                         spin_unlock(&cache->uc_lock);
259                         new = NULL;
260                         goto find_again;
261                 }
262         }
263
264         /* Now we know it's good */
265 out:
266         spin_unlock(&cache->uc_lock);
267         RETURN(entry);
268 }
269 EXPORT_SYMBOL(upcall_cache_get_entry);
270
271 void upcall_cache_put_entry(struct upcall_cache *cache,
272                             struct upcall_cache_entry *entry)
273 {
274         ENTRY;
275
276         if (!entry) {
277                 EXIT;
278                 return;
279         }
280
281         LASSERT(atomic_read(&entry->ue_refcount) > 0);
282         spin_lock(&cache->uc_lock);
283         put_entry(cache, entry);
284         spin_unlock(&cache->uc_lock);
285         EXIT;
286 }
287 EXPORT_SYMBOL(upcall_cache_put_entry);
288
289 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
290                           void *args)
291 {
292         struct upcall_cache_entry *entry = NULL;
293         struct list_head *head;
294         int found = 0, rc = 0;
295         ENTRY;
296
297         LASSERT(cache);
298
299         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
300
301         spin_lock(&cache->uc_lock);
302         list_for_each_entry(entry, head, ue_hash) {
303                 if (downcall_compare(cache, entry, key, args) == 0) {
304                         found = 1;
305                         get_entry(entry);
306                         break;
307                 }
308         }
309
310         if (!found) {
311                 CDEBUG(D_OTHER, "%s: upcall for key %llu not expected\n",
312                        cache->uc_name, key);
313                 /* haven't found, it's possible */
314                 spin_unlock(&cache->uc_lock);
315                 RETURN(-EINVAL);
316         }
317
318         if (err) {
319                 CDEBUG(D_OTHER, "%s: upcall for key %llu returned %d\n",
320                        cache->uc_name, entry->ue_key, err);
321                 GOTO(out, rc = -EINVAL);
322         }
323
324         if (!UC_CACHE_IS_ACQUIRING(entry)) {
325                 CDEBUG(D_RPCTRACE, "%s: found uptodate entry %p (key %llu)"
326                        "\n", cache->uc_name, entry, entry->ue_key);
327                 GOTO(out, rc = 0);
328         }
329
330         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
331                 CERROR("%s: found a stale entry %p (key %llu) in ioctl\n",
332                        cache->uc_name, entry, entry->ue_key);
333                 GOTO(out, rc = -EINVAL);
334         }
335
336         spin_unlock(&cache->uc_lock);
337         if (cache->uc_ops->parse_downcall)
338                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
339         spin_lock(&cache->uc_lock);
340         if (rc)
341                 GOTO(out, rc);
342
343         entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
344         UC_CACHE_SET_VALID(entry);
345         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key %llu\n",
346                cache->uc_name, entry, entry->ue_key);
347 out:
348         if (rc) {
349                 UC_CACHE_SET_INVALID(entry);
350                 list_del_init(&entry->ue_hash);
351         }
352         UC_CACHE_CLEAR_ACQUIRING(entry);
353         spin_unlock(&cache->uc_lock);
354         wake_up_all(&entry->ue_waitq);
355         put_entry(cache, entry);
356
357         RETURN(rc);
358 }
359 EXPORT_SYMBOL(upcall_cache_downcall);
360
361 void upcall_cache_flush(struct upcall_cache *cache, int force)
362 {
363         struct upcall_cache_entry *entry, *next;
364         int i;
365         ENTRY;
366
367         spin_lock(&cache->uc_lock);
368         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
369                 list_for_each_entry_safe(entry, next,
370                                          &cache->uc_hashtable[i], ue_hash) {
371                         if (!force && atomic_read(&entry->ue_refcount)) {
372                                 UC_CACHE_SET_EXPIRED(entry);
373                                 continue;
374                         }
375                         LASSERT(!atomic_read(&entry->ue_refcount));
376                         free_entry(cache, entry);
377                 }
378         }
379         spin_unlock(&cache->uc_lock);
380         EXIT;
381 }
382 EXPORT_SYMBOL(upcall_cache_flush);
383
384 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
385 {
386         struct list_head *head;
387         struct upcall_cache_entry *entry;
388         int found = 0;
389         ENTRY;
390
391         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
392
393         spin_lock(&cache->uc_lock);
394         list_for_each_entry(entry, head, ue_hash) {
395                 if (upcall_compare(cache, entry, key, args) == 0) {
396                         found = 1;
397                         break;
398                 }
399         }
400
401         if (found) {
402                 CWARN("%s: flush entry %p: key %llu, ref %d, fl %x, "
403                       "cur %lu, ex %ld/%ld\n",
404                       cache->uc_name, entry, entry->ue_key,
405                       atomic_read(&entry->ue_refcount), entry->ue_flags,
406                       cfs_time_current_sec(), entry->ue_acquire_expire,
407                       entry->ue_expire);
408                 UC_CACHE_SET_EXPIRED(entry);
409                 if (!atomic_read(&entry->ue_refcount))
410                         free_entry(cache, entry);
411         }
412         spin_unlock(&cache->uc_lock);
413 }
414 EXPORT_SYMBOL(upcall_cache_flush_one);
415
416 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
417                                        struct upcall_cache_ops *ops)
418 {
419         struct upcall_cache *cache;
420         int i;
421         ENTRY;
422
423         LIBCFS_ALLOC(cache, sizeof(*cache));
424         if (!cache)
425                 RETURN(ERR_PTR(-ENOMEM));
426
427         spin_lock_init(&cache->uc_lock);
428         init_rwsem(&cache->uc_upcall_rwsem);
429         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
430                 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
431         strlcpy(cache->uc_name, name, sizeof(cache->uc_name));
432         /* upcall pathname proc tunable */
433         strlcpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall));
434         cache->uc_entry_expire = 20 * 60;
435         cache->uc_acquire_expire = 30;
436         cache->uc_ops = ops;
437
438         RETURN(cache);
439 }
440 EXPORT_SYMBOL(upcall_cache_init);
441
442 void upcall_cache_cleanup(struct upcall_cache *cache)
443 {
444         if (!cache)
445                 return;
446         upcall_cache_flush_all(cache);
447         LIBCFS_FREE(cache, sizeof(*cache));
448 }
449 EXPORT_SYMBOL(upcall_cache_cleanup);