X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fgss_pipefs.c;h=4a21cc77a6eeaf5f976a7af045879281c69cfff7;hp=dc4bb54a52f0b7cab8a203dcfdc3b5d938a503bb;hb=da1d93513fdff0a70257b13aa5649e478d4f70b6;hpb=cc2ff1bfd66a5c004eb6ed61fc2dac3f1ab49d3a diff --git a/lustre/ptlrpc/gss/gss_pipefs.c b/lustre/ptlrpc/gss/gss_pipefs.c index dc4bb54..4a21cc7 100644 --- a/lustre/ptlrpc/gss/gss_pipefs.c +++ b/lustre/ptlrpc/gss/gss_pipefs.c @@ -3,7 +3,7 @@ * * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2016, Intel Corporation. * * Author: Eric Mei */ @@ -47,7 +47,6 @@ */ #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include @@ -58,14 +57,12 @@ #include struct rpc_clnt; /* for rpc_pipefs */ #include -#else -#include -#endif +#include #include #include #include -#include +#include #include #include #include @@ -89,7 +86,7 @@ static void gss_sec_pipe_upcall_fini(struct gss_sec *gsec) } /**************************************** - * internel context helpers * + * internal context helpers * ****************************************/ static @@ -128,31 +125,31 @@ void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx) } static -void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash) +void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash) { set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); atomic_inc(&ctx->cc_refcount); - cfs_hlist_add_head(&ctx->cc_cache, hash); + hlist_add_head(&ctx->cc_cache, hash); } /* * caller must hold spinlock */ static -void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) +void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist) { assert_spin_locked(&ctx->cc_sec->ps_lock); LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); - LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); + LASSERT(!hlist_unhashed(&ctx->cc_cache)); clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags); if (atomic_dec_and_test(&ctx->cc_refcount)) { - __cfs_hlist_del(&ctx->cc_cache); - cfs_hlist_add_head(&ctx->cc_cache, freelist); + __hlist_del(&ctx->cc_cache); + hlist_add_head(&ctx->cc_cache, freelist); } else { - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); } } @@ -161,7 +158,7 @@ void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist) */ static int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, - cfs_hlist_head_t *freelist) + struct hlist_head *freelist) { if (cli_ctx_check_death(ctx)) { if (freelist) @@ -174,7 +171,7 @@ int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, static inline int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx, - cfs_hlist_head_t *freelist) + struct hlist_head *freelist) { LASSERT(ctx->cc_sec); LASSERT(atomic_read(&ctx->cc_refcount) > 0); @@ -194,11 +191,11 @@ int ctx_match_pf(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred) } static -void ctx_list_destroy_pf(cfs_hlist_head_t *head) +void ctx_list_destroy_pf(struct hlist_head *head) { struct ptlrpc_cli_ctx *ctx; - while (!cfs_hlist_empty(head)) { + while (!hlist_empty(head)) { ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache); @@ -206,7 +203,7 @@ void ctx_list_destroy_pf(cfs_hlist_head_t *head) LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); ctx_destroy_pf(ctx->cc_sec, ctx); } } @@ -236,10 +233,10 @@ void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace) spin_lock(&ctx->cc_sec->ps_lock); if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) { - LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache)); + LASSERT(!hlist_unhashed(&ctx->cc_cache)); LASSERT(atomic_read(&ctx->cc_refcount) > 1); - cfs_hlist_del_init(&ctx->cc_cache); + hlist_del_init(&ctx->cc_cache); if (atomic_dec_and_test(&ctx->cc_refcount)) LBUG(); } @@ -261,12 +258,12 @@ static void gss_sec_ctx_replace_pf(struct gss_sec *gsec, struct ptlrpc_cli_ctx *new) { - struct gss_sec_pipefs *gsec_pf; - struct ptlrpc_cli_ctx *ctx; - cfs_hlist_node_t *pos, *next; - CFS_HLIST_HEAD(freelist); - unsigned int hash; - ENTRY; + struct hlist_node __maybe_unused *pos, *next; + struct gss_sec_pipefs *gsec_pf; + struct ptlrpc_cli_ctx *ctx; + HLIST_HEAD(freelist); + unsigned int hash; + ENTRY; gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base); @@ -322,13 +319,14 @@ int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec, static void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf, - cfs_hlist_head_t *freelist) + struct hlist_head *freelist) { - struct ptlrpc_sec *sec; - struct ptlrpc_cli_ctx *ctx; - cfs_hlist_node_t *pos, *next; - int i; - ENTRY; + struct ptlrpc_sec *sec; + struct ptlrpc_cli_ctx *ctx; + struct hlist_node __maybe_unused *pos; + struct hlist_node *next; + int i; + ENTRY; sec = &gsec_pf->gsp_base.gs_base; @@ -340,8 +338,8 @@ void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf, ctx_check_death_locked_pf(ctx, freelist); } - sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval; - EXIT; + sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval; + EXIT; } static @@ -362,7 +360,7 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp, hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE; alloc_size = sizeof(*gsec_pf) + - sizeof(cfs_hlist_head_t) * hash_size; + sizeof(struct hlist_head) * hash_size; OBD_ALLOC(gsec_pf, alloc_size); if (!gsec_pf) @@ -370,7 +368,7 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp, gsec_pf->gsp_chash_size = hash_size; for (i = 0; i < hash_size; i++) - CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]); + INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]); if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs, imp, ctx, sf)) @@ -412,7 +410,7 @@ void gss_sec_destroy_pf(struct ptlrpc_sec *sec) gss_sec_destroy_common(gsec); OBD_FREE(gsec, sizeof(*gsec_pf) + - sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size); + sizeof(struct hlist_head) * gsec_pf->gsp_chash_size); } static @@ -420,13 +418,13 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec, struct vfs_cred *vcred, int create, int remove_dead) { - struct gss_sec *gsec; - struct gss_sec_pipefs *gsec_pf; - struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL; - cfs_hlist_head_t *hash_head; - cfs_hlist_node_t *pos, *next; - CFS_HLIST_HEAD(freelist); - unsigned int hash, gc = 0, found = 0; + struct gss_sec *gsec; + struct gss_sec_pipefs *gsec_pf; + struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL; + struct hlist_head *hash_head; + struct hlist_node __maybe_unused *pos, *next; + unsigned int hash, gc = 0, found = 0; + HLIST_HEAD(freelist); ENTRY; might_sleep(); @@ -442,12 +440,12 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec, retry: spin_lock(&sec->ps_lock); - /* gc_next == 0 means never do gc */ - if (remove_dead && sec->ps_gc_next && - cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) { - gss_ctx_cache_gc_pf(gsec_pf, &freelist); - gc = 1; - } + /* gc_next == 0 means never do gc */ + if (remove_dead && sec->ps_gc_next && + (ktime_get_real_seconds() > sec->ps_gc_next)) { + gss_ctx_cache_gc_pf(gsec_pf, &freelist); + gc = 1; + } cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) { if (gc == 0 && @@ -464,14 +462,14 @@ retry: if (found) { if (new && new != ctx) { /* lost the race, just free it */ - cfs_hlist_add_head(&new->cc_cache, &freelist); + hlist_add_head(&new->cc_cache, &freelist); new = NULL; } /* hot node, move to head */ if (hash_head->first != &ctx->cc_cache) { - __cfs_hlist_del(&ctx->cc_cache); - cfs_hlist_add_head(&ctx->cc_cache, hash_head); + __hlist_del(&ctx->cc_cache); + hlist_add_head(&ctx->cc_cache, hash_head); } } else { /* don't allocate for reverse sec */ @@ -517,7 +515,7 @@ void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec, int sync) { LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0); - LASSERT(cfs_hlist_unhashed(&ctx->cc_cache)); + LASSERT(hlist_unhashed(&ctx->cc_cache)); /* if required async, we must clear the UPTODATE bit to prevent extra * rpcs during destroy procedure. */ @@ -543,11 +541,11 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec, uid_t uid, int grace, int force) { - struct gss_sec *gsec; - struct gss_sec_pipefs *gsec_pf; - struct ptlrpc_cli_ctx *ctx; - cfs_hlist_node_t *pos, *next; - CFS_HLIST_HEAD(freelist); + struct gss_sec *gsec; + struct gss_sec_pipefs *gsec_pf; + struct ptlrpc_cli_ctx *ctx; + struct hlist_node __maybe_unused *pos, *next; + HLIST_HEAD(freelist); int i, busy = 0; ENTRY; @@ -635,7 +633,7 @@ struct gss_upcall_msg_data { struct gss_upcall_msg { struct rpc_pipe_msg gum_base; atomic_t gum_refcount; - cfs_list_t gum_list; + struct list_head gum_list; __u32 gum_mechidx; struct gss_sec *gum_gsec; struct gss_cli_ctx *gum_gctx; @@ -665,7 +663,7 @@ __u32 mech_name2idx(const char *name) /* pipefs dentries for each mechanisms */ static struct dentry *de_pipes[MECH_MAX] = { NULL, }; /* all upcall messgaes linked here */ -static cfs_list_t upcall_lists[MECH_MAX]; +static struct list_head upcall_lists[MECH_MAX]; /* and protected by this */ static spinlock_t upcall_locks[MECH_MAX]; @@ -684,21 +682,21 @@ void upcall_list_unlock(int idx) static void upcall_msg_enlist(struct gss_upcall_msg *msg) { - __u32 idx = msg->gum_mechidx; + __u32 idx = msg->gum_mechidx; - upcall_list_lock(idx); - cfs_list_add(&msg->gum_list, &upcall_lists[idx]); - upcall_list_unlock(idx); + upcall_list_lock(idx); + list_add(&msg->gum_list, &upcall_lists[idx]); + upcall_list_unlock(idx); } static void upcall_msg_delist(struct gss_upcall_msg *msg) { - __u32 idx = msg->gum_mechidx; + __u32 idx = msg->gum_mechidx; - upcall_list_lock(idx); - cfs_list_del_init(&msg->gum_list); - upcall_list_unlock(idx); + upcall_list_lock(idx); + list_del_init(&msg->gum_list); + upcall_list_unlock(idx); } /**************************************** @@ -722,10 +720,10 @@ void gss_release_msg(struct gss_upcall_msg *gmsg) gmsg->gum_gctx = NULL; } - LASSERT(cfs_list_empty(&gmsg->gum_list)); - LASSERT(cfs_list_empty(&gmsg->gum_base.list)); - OBD_FREE_PTR(gmsg); - EXIT; + LASSERT(list_empty(&gmsg->gum_list)); + LASSERT(list_empty(&gmsg->gum_base.list)); + OBD_FREE_PTR(gmsg); + EXIT; } static @@ -736,10 +734,10 @@ void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg) LASSERT(idx < MECH_MAX); assert_spin_locked(&upcall_locks[idx]); - if (cfs_list_empty(&gmsg->gum_list)) + if (list_empty(&gmsg->gum_list)) return; - cfs_list_del_init(&gmsg->gum_list); + list_del_init(&gmsg->gum_list); LASSERT(atomic_read(&gmsg->gum_refcount) > 1); atomic_dec(&gmsg->gum_refcount); } @@ -773,7 +771,7 @@ struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq) struct gss_upcall_msg *gmsg; upcall_list_lock(mechidx); - cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) { + list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) { if (gmsg->gum_data.gum_seq != seq) continue; @@ -791,15 +789,16 @@ struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq) static int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen) { - if (*buflen < reslen) { - CERROR("buflen %u < %u\n", *buflen, reslen); - return -EINVAL; - } + if (*buflen < reslen) { + CERROR("shorter buflen than needed: %u < %u\n", + *buflen, reslen); + return -EINVAL; + } - memcpy(res, *buf, reslen); - *buf += reslen; - *buflen -= reslen; - return 0; + memcpy(res, *buf, reslen); + *buf += reslen; + *buflen -= reslen; + return 0; } /**************************************** @@ -831,7 +830,7 @@ ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg, static ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen) { - struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode); + struct rpc_inode *rpci = RPC_I(file_inode(filp)); struct gss_upcall_msg *gss_msg; struct ptlrpc_cli_ctx *ctx; struct gss_cli_ctx *gctx = NULL; @@ -952,10 +951,10 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) { struct gss_upcall_msg *gmsg; struct gss_upcall_msg_data *gumd; - static cfs_time_t ratelimit = 0; + static time64_t ratelimit; ENTRY; - LASSERT(cfs_list_empty(&msg->list)); + LASSERT(list_empty(&msg->list)); /* normally errno is >= 0 */ if (msg->errno >= 0) { @@ -967,7 +966,7 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) gumd = &gmsg->gum_data; LASSERT(atomic_read(&gmsg->gum_refcount) > 0); - CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): " + CERROR("failed msg %p (seq %u, uid %u, svc %u, nid %#llx, obd %.*s): " "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc, gumd->gum_nid, (int) sizeof(gumd->gum_obd), gumd->gum_obd, msg->errno); @@ -975,9 +974,9 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) atomic_inc(&gmsg->gum_refcount); gss_unhash_msg(gmsg); if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) { - cfs_time_t now = cfs_time_current_sec(); + time64_t now = ktime_get_real_seconds(); - if (cfs_time_after(now, ratelimit)) { + if (now > ratelimit) { CWARN("upcall timed out, is lgssd running?\n"); ratelimit = now + 15; } @@ -990,25 +989,25 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) static void gss_pipe_release(struct inode *inode) { - struct rpc_inode *rpci = RPC_I(inode); - __u32 idx; - ENTRY; + struct rpc_inode *rpci = RPC_I(inode); + __u32 idx; + ENTRY; - idx = (__u32) (long) rpci->private; - LASSERT(idx < MECH_MAX); + idx = (__u32) (long) rpci->private; + LASSERT(idx < MECH_MAX); - upcall_list_lock(idx); - while (!cfs_list_empty(&upcall_lists[idx])) { - struct gss_upcall_msg *gmsg; - struct gss_upcall_msg_data *gumd; + upcall_list_lock(idx); + while (!list_empty(&upcall_lists[idx])) { + struct gss_upcall_msg *gmsg; + struct gss_upcall_msg_data *gumd; - gmsg = cfs_list_entry(upcall_lists[idx].next, - struct gss_upcall_msg, gum_list); - gumd = &gmsg->gum_data; - LASSERT(cfs_list_empty(&gmsg->gum_base.list)); + gmsg = list_entry(upcall_lists[idx].next, + struct gss_upcall_msg, gum_list); + gumd = &gmsg->gum_data; + LASSERT(list_empty(&gmsg->gum_base.list)); CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, " - "nid "LPX64", obd %.*s\n", gmsg, + "nid %#llx, obd %.*s\n", gmsg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc, gumd->gum_nid, (int) sizeof(gumd->gum_obd), gumd->gum_obd); @@ -1066,7 +1065,7 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx) RETURN(-ENOMEM); /* initialize pipefs base msg */ - CFS_INIT_LIST_HEAD(&gmsg->gum_base.list); + INIT_LIST_HEAD(&gmsg->gum_base.list); gmsg->gum_base.data = &gmsg->gum_data; gmsg->gum_base.len = sizeof(gmsg->gum_data); gmsg->gum_base.copied = 0; @@ -1083,7 +1082,7 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx) gmsg->gum_data.gum_gid = 0; /* not used for now */ gmsg->gum_data.gum_svc = import_to_gss_svc(imp); gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid; - strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name, + strlcpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name, sizeof(gmsg->gum_data.gum_obd)); /* This only could happen when sysadmin set it dead/expired @@ -1210,7 +1209,7 @@ int __init gss_init_pipefs_upcall(void) } de_pipes[MECH_KRB5] = de; - CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]); + INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]); spin_lock_init(&upcall_locks[MECH_KRB5]); return 0; @@ -1219,17 +1218,17 @@ int __init gss_init_pipefs_upcall(void) static void __exit gss_exit_pipefs_upcall(void) { - __u32 i; + __u32 i; - for (i = 0; i < MECH_MAX; i++) { - LASSERT(cfs_list_empty(&upcall_lists[i])); + for (i = 0; i < MECH_MAX; i++) { + LASSERT(list_empty(&upcall_lists[i])); - /* dput pipe dentry here might cause lgssd oops. */ - de_pipes[i] = NULL; - } + /* dput pipe dentry here might cause lgssd oops. */ + de_pipes[i] = NULL; + } - rpc_unlink(LUSTRE_PIPE_KRB5); - rpc_rmdir(LUSTRE_PIPE_ROOT); + rpc_unlink(LUSTRE_PIPE_KRB5); + rpc_rmdir(LUSTRE_PIPE_ROOT); } int __init gss_init_pipefs(void) @@ -1251,6 +1250,6 @@ int __init gss_init_pipefs(void) void __exit gss_exit_pipefs(void) { - gss_exit_pipefs_upcall(); - sptlrpc_unregister_policy(&gss_policy_pipefs); + gss_exit_pipefs_upcall(); + sptlrpc_unregister_policy(&gss_policy_pipefs); }