-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.gnu.org/licenses/gpl-2.0.html
- *
- * GPL HEADER END
- */
+// SPDX-License-Identifier: GPL-2.0
+
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2012, 2016, Intel Corporation.
*/
+
/*
* This file is part of Lustre, http://www.lustre.org/
*
- * lustre/obdclass/upcall_cache.c
- *
* Supplementary groups cache.
*/
+
#define DEBUG_SUBSYSTEM S_SEC
#include <libcfs/libcfs.h>
#include <uapi/linux/lnet/lnet-types.h>
#include <upcall_cache.h>
+#include "upcall_cache_internal.h"
static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
__u64 key, void *args)
return entry;
}
-/* protected by cache lock */
-static void free_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
-{
- if (cache->uc_ops->free_entry)
- cache->uc_ops->free_entry(cache, entry);
-
- list_del(&entry->ue_hash);
- CDEBUG(D_OTHER, "destroy cache entry %p for key %llu\n",
- entry, entry->ue_key);
- LIBCFS_FREE(entry, sizeof(*entry));
-}
-
static inline int upcall_compare(struct upcall_cache *cache,
struct upcall_cache_entry *entry,
__u64 key, void *args)
return 0;
}
-static inline void get_entry(struct upcall_cache_entry *entry)
+static inline int accept_expired(struct upcall_cache *cache,
+ struct upcall_cache_entry *entry)
{
- atomic_inc(&entry->ue_refcount);
-}
+ if (cache->uc_ops->accept_expired)
+ return cache->uc_ops->accept_expired(cache, entry);
-static inline void put_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
-{
- if (atomic_dec_and_test(&entry->ue_refcount) &&
- (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
- free_entry(cache, entry);
- }
+ return 0;
}
static inline void write_lock_from_read(rwlock_t *lock, bool *writelock)
}
}
+/* Return value:
+ * 0 for suitable entry
+ * 1 for unsuitable entry
+ * -1 for expired entry
+ */
static int check_unlink_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry,
bool writelock)
{
time64_t now = ktime_get_seconds();
+ int accept_exp = 0;
if (UC_CACHE_IS_VALID(entry) && now < entry->ue_expire)
return 0;
UC_CACHE_SET_EXPIRED(entry);
}
- if (writelock) {
+ accept_exp = accept_expired(cache, entry);
+ if (writelock && !accept_exp) {
list_del_init(&entry->ue_hash);
if (!atomic_read(&entry->ue_refcount))
free_entry(cache, entry);
}
- return 1;
+ return accept_exp ? -1 : 1;
}
int upcall_cache_set_upcall(struct upcall_cache *cache, const char *buffer,
size_t count, bool path_only)
{
char *upcall;
+ int rc = 0;
if (count >= UC_CACHE_UPCALL_MAXPATH)
return -E2BIG;
/* Remove any extraneous bits from the upcall (e.g. linefeeds) */
if (sscanf(buffer, "%s", upcall) != 1)
- goto invalid;
-
- if (upcall[0] == '/')
- goto valid;
-
- if (path_only)
- goto invalid;
+ GOTO(out, rc = -EINVAL);
- if (strcasecmp(upcall, "NONE") == 0) {
- snprintf(upcall, count + 1, "NONE");
- goto valid;
+ /* Accepted values are:
+ * - an absolute path to an executable
+ * - if path_only is false: "none", case insensitive
+ */
+ if (upcall[0] != '/') {
+ if (!path_only && strcasecmp(upcall, "NONE") == 0)
+ snprintf(upcall, count + 1, "NONE");
+ else
+ GOTO(out, rc = -EINVAL);
}
-invalid:
- OBD_FREE(upcall, count + 1);
- return -EINVAL;
-
-valid:
down_write(&cache->uc_upcall_rwsem);
- strcpy(cache->uc_upcall, upcall);
+ strncpy(cache->uc_upcall, upcall, count + 1);
up_write(&cache->uc_upcall_rwsem);
+out:
OBD_FREE(upcall, count + 1);
- return 0;
+ return rc;
}
EXPORT_SYMBOL(upcall_cache_set_upcall);
static inline int refresh_entry(struct upcall_cache *cache,
- struct upcall_cache_entry *entry)
+ struct upcall_cache_entry *entry, __u32 fsgid)
{
LASSERT(cache->uc_ops->do_upcall);
return cache->uc_ops->do_upcall(cache, entry);
__u64 key, void *args)
{
struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
+ struct upcall_cache_entry *best_exp;
+ gid_t fsgid = (__u32)__kgid_val(INVALID_GID);
+ struct group_info *ginfo = NULL;
bool failedacquiring = false;
struct list_head *head;
wait_queue_entry_t wait;
bool writelock;
- int rc, found;
+ int rc = 0, rc2, found;
ENTRY;
writelock = false;
}
find_with_lock:
+ best_exp = NULL;
list_for_each_entry_safe(entry, next, head, ue_hash) {
/* check invalid & expired items */
- if (check_unlink_entry(cache, entry, writelock))
+ rc2 = check_unlink_entry(cache, entry, writelock);
+ if (rc2 == -1) {
+ /* look for most recent expired entry */
+ if (upcall_compare(cache, entry, key, args) == 0 &&
+ (!best_exp ||
+ entry->ue_expire > best_exp->ue_expire))
+ best_exp = entry;
+ }
+ if (rc2)
continue;
if (upcall_compare(cache, entry, key, args) == 0) {
found = 1;
}
if (!found) {
+ if (best_exp) {
+ if (!writelock) {
+ /* We found an expired but potentially usable
+ * entry while holding the read lock, so convert
+ * it to a write lock and find again, to check
+ * that entry was not modified/freed in between.
+ */
+ write_lock_from_read(&cache->uc_lock,
+ &writelock);
+ goto find_with_lock;
+ }
+ /* let's use that expired entry */
+ entry = best_exp;
+ get_entry(entry);
+ goto out;
+ }
if (!new) {
if (writelock)
write_unlock(&cache->uc_lock);
found = 0;
goto find_with_lock;
}
+ if (best_exp) {
+ list_del_init(&best_exp->ue_hash);
+ if (!atomic_read(&best_exp->ue_refcount))
+ free_entry(cache, best_exp);
+ }
list_move(&entry->ue_hash, head);
}
/* now we hold a write lock */
get_entry(entry);
+ /* special processing of supp groups for identity upcall */
+ if (strcmp(cache->uc_upcall, IDENTITY_UPCALL_INTERNAL) == 0) {
+ write_unlock(&cache->uc_lock);
+ rc = upcall_cache_get_entry_internal(cache, entry, args,
+ &fsgid, &ginfo);
+ write_lock(&cache->uc_lock);
+ if (rc)
+ GOTO(out, entry = ERR_PTR(rc));
+ }
+
/* acquire for new one */
if (UC_CACHE_IS_NEW(entry)) {
- UC_CACHE_SET_ACQUIRING(entry);
UC_CACHE_CLEAR_NEW(entry);
- write_unlock(&cache->uc_lock);
- rc = refresh_entry(cache, entry);
- write_lock(&cache->uc_lock);
+ if (strcmp(cache->uc_upcall, IDENTITY_UPCALL_INTERNAL) == 0) {
+ refresh_entry_internal(cache, entry, fsgid, &ginfo);
+ } else {
+ UC_CACHE_SET_ACQUIRING(entry);
+ write_unlock(&cache->uc_lock);
+ rc = refresh_entry(cache, entry, fsgid);
+ write_lock(&cache->uc_lock);
+ }
entry->ue_acquire_expire = ktime_get_seconds() +
cache->uc_acquire_expire;
if (rc < 0) {
write_unlock(&cache->uc_lock);
else
read_unlock(&cache->uc_lock);
+ if (ginfo)
+ groups_free(ginfo);
+ if (IS_ERR(entry))
+ CDEBUG(D_OTHER, "no entry found: rc = %ld\n", PTR_ERR(entry));
+ else
+ CDEBUG(D_OTHER, "found entry %p flags 0x%x\n",
+ entry, entry->ue_flags);
RETURN(entry);
}
EXPORT_SYMBOL(upcall_cache_get_entry);
RETURN(ERR_PTR(-ENOMEM));
for (i = 0; i < cache->uc_hashsize; i++)
INIT_LIST_HEAD(&cache->uc_hashtable[i]);
- strlcpy(cache->uc_name, name, sizeof(cache->uc_name));
+ strscpy(cache->uc_name, name, sizeof(cache->uc_name));
/* upcall pathname proc tunable */
- strlcpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall));
+ strscpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall));
cache->uc_entry_expire = entry_expire;
cache->uc_acquire_expire = acquire_expire;
cache->uc_acquire_replay = replayable;