Whamcloud - gitweb
LU-17015 gss: support large kerberos token for rpc sec init
[fs/lustre-release.git] / lustre / obdclass / upcall_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/obdclass/upcall_cache.c
32  *
33  * Supplementary groups cache.
34  */
35 #define DEBUG_SUBSYSTEM S_SEC
36
37 #include <libcfs/libcfs.h>
38 #include <uapi/linux/lnet/lnet-types.h>
39 #include <upcall_cache.h>
40
41 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
42                                               __u64 key, void *args)
43 {
44         struct upcall_cache_entry *entry;
45
46         LIBCFS_ALLOC(entry, sizeof(*entry));
47         if (!entry)
48                 return NULL;
49
50         UC_CACHE_SET_NEW(entry);
51         INIT_LIST_HEAD(&entry->ue_hash);
52         entry->ue_key = key;
53         atomic_set(&entry->ue_refcount, 0);
54         init_waitqueue_head(&entry->ue_waitq);
55         if (cache->uc_ops->init_entry)
56                 cache->uc_ops->init_entry(entry, args);
57         return entry;
58 }
59
60 /* protected by cache lock */
61 static void free_entry(struct upcall_cache *cache,
62                        struct upcall_cache_entry *entry)
63 {
64         if (cache->uc_ops->free_entry)
65                 cache->uc_ops->free_entry(cache, entry);
66
67         list_del(&entry->ue_hash);
68         CDEBUG(D_OTHER, "destroy cache entry %p for key %llu\n",
69                 entry, entry->ue_key);
70         LIBCFS_FREE(entry, sizeof(*entry));
71 }
72
73 static inline int upcall_compare(struct upcall_cache *cache,
74                                  struct upcall_cache_entry *entry,
75                                  __u64 key, void *args)
76 {
77         if (entry->ue_key != key)
78                 return -1;
79
80         if (cache->uc_ops->upcall_compare)
81                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
82
83         return 0;
84 }
85
86 static inline int downcall_compare(struct upcall_cache *cache,
87                                    struct upcall_cache_entry *entry,
88                                    __u64 key, void *args)
89 {
90         if (entry->ue_key != key)
91                 return -1;
92
93         if (cache->uc_ops->downcall_compare)
94                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
95
96         return 0;
97 }
98
99 static inline void get_entry(struct upcall_cache_entry *entry)
100 {
101         atomic_inc(&entry->ue_refcount);
102 }
103
104 static inline void put_entry(struct upcall_cache *cache,
105                              struct upcall_cache_entry *entry)
106 {
107         if (atomic_dec_and_test(&entry->ue_refcount) &&
108             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
109                 free_entry(cache, entry);
110         }
111 }
112
113 static int check_unlink_entry(struct upcall_cache *cache,
114                               struct upcall_cache_entry *entry)
115 {
116         time64_t now = ktime_get_seconds();
117
118         if (UC_CACHE_IS_VALID(entry) && now < entry->ue_expire)
119                 return 0;
120
121         if (UC_CACHE_IS_ACQUIRING(entry)) {
122                 if (entry->ue_acquire_expire == 0 ||
123                     now < entry->ue_acquire_expire)
124                         return 0;
125
126                 UC_CACHE_SET_EXPIRED(entry);
127                 wake_up(&entry->ue_waitq);
128         } else if (!UC_CACHE_IS_INVALID(entry)) {
129                 UC_CACHE_SET_EXPIRED(entry);
130         }
131
132         list_del_init(&entry->ue_hash);
133         if (!atomic_read(&entry->ue_refcount))
134                 free_entry(cache, entry);
135         return 1;
136 }
137
138 static inline int refresh_entry(struct upcall_cache *cache,
139                          struct upcall_cache_entry *entry)
140 {
141         LASSERT(cache->uc_ops->do_upcall);
142         return cache->uc_ops->do_upcall(cache, entry);
143 }
144
145 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
146                                                   __u64 key, void *args)
147 {
148         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
149         bool failedacquiring = false;
150         struct list_head *head;
151         wait_queue_entry_t wait;
152         int rc, found;
153         ENTRY;
154
155         LASSERT(cache);
156
157         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key,
158                                                         cache->uc_hashsize)];
159 find_again:
160         found = 0;
161         spin_lock(&cache->uc_lock);
162         list_for_each_entry_safe(entry, next, head, ue_hash) {
163                 /* check invalid & expired items */
164                 if (check_unlink_entry(cache, entry))
165                         continue;
166                 if (upcall_compare(cache, entry, key, args) == 0) {
167                         found = 1;
168                         break;
169                 }
170         }
171
172         if (!found) {
173                 if (!new) {
174                         spin_unlock(&cache->uc_lock);
175                         new = alloc_entry(cache, key, args);
176                         if (!new) {
177                                 CERROR("fail to alloc entry\n");
178                                 RETURN(ERR_PTR(-ENOMEM));
179                         }
180                         goto find_again;
181                 } else {
182                         list_add(&new->ue_hash, head);
183                         entry = new;
184                 }
185         } else {
186                 if (new) {
187                         free_entry(cache, new);
188                         new = NULL;
189                 }
190                 list_move(&entry->ue_hash, head);
191         }
192         get_entry(entry);
193
194         /* acquire for new one */
195         if (UC_CACHE_IS_NEW(entry)) {
196                 UC_CACHE_SET_ACQUIRING(entry);
197                 UC_CACHE_CLEAR_NEW(entry);
198                 spin_unlock(&cache->uc_lock);
199                 rc = refresh_entry(cache, entry);
200                 spin_lock(&cache->uc_lock);
201                 entry->ue_acquire_expire = ktime_get_seconds() +
202                                            cache->uc_acquire_expire;
203                 if (rc < 0) {
204                         UC_CACHE_CLEAR_ACQUIRING(entry);
205                         UC_CACHE_SET_INVALID(entry);
206                         wake_up(&entry->ue_waitq);
207                         if (unlikely(rc == -EREMCHG)) {
208                                 put_entry(cache, entry);
209                                 GOTO(out, entry = ERR_PTR(rc));
210                         }
211                 }
212         }
213         /* someone (and only one) is doing upcall upon this item,
214          * wait it to complete */
215         if (UC_CACHE_IS_ACQUIRING(entry)) {
216                 long expiry = (entry == new) ?
217                               cfs_time_seconds(cache->uc_acquire_expire) :
218                               MAX_SCHEDULE_TIMEOUT;
219                 long left;
220
221                 init_wait(&wait);
222                 add_wait_queue(&entry->ue_waitq, &wait);
223                 set_current_state(TASK_INTERRUPTIBLE);
224                 spin_unlock(&cache->uc_lock);
225
226                 left = schedule_timeout(expiry);
227
228                 spin_lock(&cache->uc_lock);
229                 remove_wait_queue(&entry->ue_waitq, &wait);
230                 if (UC_CACHE_IS_ACQUIRING(entry)) {
231                         /* we're interrupted or upcall failed in the middle */
232                         rc = left > 0 ? -EINTR : -ETIMEDOUT;
233                         put_entry(cache, entry);
234                         if (!failedacquiring) {
235                                 spin_unlock(&cache->uc_lock);
236                                 failedacquiring = true;
237                                 new = NULL;
238                                 CDEBUG(D_OTHER,
239                                        "retry acquire for key %llu (got %d)\n",
240                                        entry->ue_key, rc);
241                                 goto find_again;
242                         }
243                         CERROR("acquire for key %llu: error %d\n",
244                                entry->ue_key, rc);
245                         GOTO(out, entry = ERR_PTR(rc));
246                 }
247         }
248
249         /* invalid means error, don't need to try again */
250         if (UC_CACHE_IS_INVALID(entry)) {
251                 put_entry(cache, entry);
252                 GOTO(out, entry = ERR_PTR(-EIDRM));
253         }
254
255         /* check expired
256          * We can't refresh the existing one because some
257          * memory might be shared by multiple processes.
258          */
259         if (check_unlink_entry(cache, entry)) {
260                 /* if expired, try again. but if this entry is
261                  * created by me but too quickly turn to expired
262                  * without any error, should at least give a
263                  * chance to use it once.
264                  */
265                 if (entry != new) {
266                         put_entry(cache, entry);
267                         spin_unlock(&cache->uc_lock);
268                         new = NULL;
269                         goto find_again;
270                 }
271         }
272
273         /* Now we know it's good */
274 out:
275         spin_unlock(&cache->uc_lock);
276         RETURN(entry);
277 }
278 EXPORT_SYMBOL(upcall_cache_get_entry);
279
280 void upcall_cache_get_entry_raw(struct upcall_cache_entry *entry)
281 {
282         get_entry(entry);
283 }
284 EXPORT_SYMBOL(upcall_cache_get_entry_raw);
285
286 void upcall_cache_update_entry(struct upcall_cache *cache,
287                                struct upcall_cache_entry *entry,
288                                time64_t expire, int state)
289 {
290         spin_lock(&cache->uc_lock);
291         entry->ue_expire = expire;
292         if (!state)
293                 UC_CACHE_SET_VALID(entry);
294         else
295                 entry->ue_flags |= state;
296         spin_unlock(&cache->uc_lock);
297 }
298 EXPORT_SYMBOL(upcall_cache_update_entry);
299
300 void upcall_cache_put_entry(struct upcall_cache *cache,
301                             struct upcall_cache_entry *entry)
302 {
303         ENTRY;
304
305         if (!entry) {
306                 EXIT;
307                 return;
308         }
309
310         LASSERT(atomic_read(&entry->ue_refcount) > 0);
311         spin_lock(&cache->uc_lock);
312         put_entry(cache, entry);
313         spin_unlock(&cache->uc_lock);
314         EXIT;
315 }
316 EXPORT_SYMBOL(upcall_cache_put_entry);
317
318 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
319                           void *args)
320 {
321         struct upcall_cache_entry *entry = NULL;
322         struct list_head *head;
323         int found = 0, rc = 0;
324         ENTRY;
325
326         LASSERT(cache);
327
328         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key,
329                                                         cache->uc_hashsize)];
330
331         spin_lock(&cache->uc_lock);
332         list_for_each_entry(entry, head, ue_hash) {
333                 if (downcall_compare(cache, entry, key, args) == 0) {
334                         found = 1;
335                         get_entry(entry);
336                         break;
337                 }
338         }
339
340         if (!found) {
341                 CDEBUG(D_OTHER, "%s: upcall for key %llu not expected\n",
342                        cache->uc_name, key);
343                 /* haven't found, it's possible */
344                 spin_unlock(&cache->uc_lock);
345                 RETURN(-EINVAL);
346         }
347
348         if (err) {
349                 CDEBUG(D_OTHER, "%s: upcall for key %llu returned %d\n",
350                        cache->uc_name, entry->ue_key, err);
351                 GOTO(out, rc = -EINVAL);
352         }
353
354         if (!UC_CACHE_IS_ACQUIRING(entry)) {
355                 CDEBUG(D_RPCTRACE, "%s: found uptodate entry %p (key %llu)"
356                        "\n", cache->uc_name, entry, entry->ue_key);
357                 GOTO(out, rc = 0);
358         }
359
360         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
361                 CERROR("%s: found a stale entry %p (key %llu) in ioctl\n",
362                        cache->uc_name, entry, entry->ue_key);
363                 GOTO(out, rc = -EINVAL);
364         }
365
366         spin_unlock(&cache->uc_lock);
367         if (cache->uc_ops->parse_downcall)
368                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
369         spin_lock(&cache->uc_lock);
370         if (rc)
371                 GOTO(out, rc);
372
373         if (!entry->ue_expire)
374                 entry->ue_expire = ktime_get_seconds() + cache->uc_entry_expire;
375         UC_CACHE_SET_VALID(entry);
376         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key %llu\n",
377                cache->uc_name, entry, entry->ue_key);
378 out:
379         if (rc) {
380                 UC_CACHE_SET_INVALID(entry);
381                 list_del_init(&entry->ue_hash);
382         }
383         UC_CACHE_CLEAR_ACQUIRING(entry);
384         spin_unlock(&cache->uc_lock);
385         wake_up(&entry->ue_waitq);
386         put_entry(cache, entry);
387
388         RETURN(rc);
389 }
390 EXPORT_SYMBOL(upcall_cache_downcall);
391
392 void upcall_cache_flush(struct upcall_cache *cache, int force)
393 {
394         struct upcall_cache_entry *entry, *next;
395         int i;
396         ENTRY;
397
398         spin_lock(&cache->uc_lock);
399         for (i = 0; i < cache->uc_hashsize; i++) {
400                 list_for_each_entry_safe(entry, next,
401                                          &cache->uc_hashtable[i], ue_hash) {
402                         if (!force && atomic_read(&entry->ue_refcount)) {
403                                 UC_CACHE_SET_EXPIRED(entry);
404                                 continue;
405                         }
406                         LASSERT(!atomic_read(&entry->ue_refcount));
407                         free_entry(cache, entry);
408                 }
409         }
410         spin_unlock(&cache->uc_lock);
411         EXIT;
412 }
413 EXPORT_SYMBOL(upcall_cache_flush);
414
415 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
416 {
417         struct list_head *head;
418         struct upcall_cache_entry *entry;
419         int found = 0;
420         ENTRY;
421
422         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key,
423                                                         cache->uc_hashsize)];
424
425         spin_lock(&cache->uc_lock);
426         list_for_each_entry(entry, head, ue_hash) {
427                 if (upcall_compare(cache, entry, key, args) == 0) {
428                         found = 1;
429                         break;
430                 }
431         }
432
433         if (found) {
434                 CWARN("%s: flush entry %p: key %llu, ref %d, fl %x, "
435                       "cur %lld, ex %lld/%lld\n",
436                       cache->uc_name, entry, entry->ue_key,
437                       atomic_read(&entry->ue_refcount), entry->ue_flags,
438                       ktime_get_real_seconds(), entry->ue_acquire_expire,
439                       entry->ue_expire);
440                 UC_CACHE_SET_EXPIRED(entry);
441                 if (!atomic_read(&entry->ue_refcount))
442                         free_entry(cache, entry);
443         }
444         spin_unlock(&cache->uc_lock);
445 }
446 EXPORT_SYMBOL(upcall_cache_flush_one);
447
448 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
449                                        int hashsz, time64_t entry_expire,
450                                        time64_t acquire_expire,
451                                        struct upcall_cache_ops *ops)
452 {
453         struct upcall_cache *cache;
454         int i;
455         ENTRY;
456
457         LIBCFS_ALLOC(cache, sizeof(*cache));
458         if (!cache)
459                 RETURN(ERR_PTR(-ENOMEM));
460
461         spin_lock_init(&cache->uc_lock);
462         init_rwsem(&cache->uc_upcall_rwsem);
463         cache->uc_hashsize = hashsz;
464         LIBCFS_ALLOC(cache->uc_hashtable,
465                      sizeof(*cache->uc_hashtable) * cache->uc_hashsize);
466         if (!cache->uc_hashtable)
467                 RETURN(ERR_PTR(-ENOMEM));
468         for (i = 0; i < cache->uc_hashsize; i++)
469                 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
470         strlcpy(cache->uc_name, name, sizeof(cache->uc_name));
471         /* upcall pathname proc tunable */
472         strlcpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall));
473         cache->uc_entry_expire = entry_expire;
474         cache->uc_acquire_expire = acquire_expire;
475         cache->uc_ops = ops;
476
477         RETURN(cache);
478 }
479 EXPORT_SYMBOL(upcall_cache_init);
480
481 void upcall_cache_cleanup(struct upcall_cache *cache)
482 {
483         if (!cache)
484                 return;
485         upcall_cache_flush_all(cache);
486         LIBCFS_FREE(cache->uc_hashtable,
487                     sizeof(*cache->uc_hashtable) * cache->uc_hashsize);
488         LIBCFS_FREE(cache, sizeof(*cache));
489 }
490 EXPORT_SYMBOL(upcall_cache_cleanup);