Whamcloud - gitweb
LU-14095 gss: use RCU protection for sunrpc cache
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_svc_upcall.c
index e266f67..845f0be 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  *
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
  *
  * Author: Eric Mei <ericm@clusterfs.com>
  */
  */
 
 #define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/hash.h>
 #include <linux/mutex.h>
 #include <linux/sunrpc/cache.h>
-#else
-#include <liblustre.h>
-#endif
+#include <net/sock.h>
 
 #include <obd.h>
 #include <obd_class.h>
 #include <obd_support.h>
-#include <lustre/lustre_idl.h>
-#include <lustre_net.h>
 #include <lustre_import.h>
+#include <lustre_net.h>
+#include <lustre_nodemap.h>
 #include <lustre_sec.h>
 
 #include "gss_err.h"
 #include "gss_internal.h"
 #include "gss_api.h"
+#include "gss_crypto.h"
 
 #define GSS_SVC_UPCALL_TIMEOUT  (20)
 
-static spinlock_t __ctx_index_lock;
+static DEFINE_SPINLOCK(__ctx_index_lock);
 static __u64 __ctx_index;
 
+unsigned int krb5_allow_old_client_csum;
+
 __u64 gss_get_next_ctx_index(void)
 {
        __u64 idx;
@@ -90,30 +91,30 @@ __u64 gss_get_next_ctx_index(void)
 
 static inline unsigned long hash_mem(char *buf, int length, int bits)
 {
-        unsigned long hash = 0;
-        unsigned long l = 0;
-        int len = 0;
-        unsigned char c;
-
-        do {
-                if (len == length) {
-                        c = (char) len;
-                        len = -1;
-                } else
-                        c = *buf++;
-
-                l = (l << 8) | c;
-                len++;
-
-                if ((len & (BITS_PER_LONG/8-1)) == 0)
-                        hash = cfs_hash_long(hash^l, BITS_PER_LONG);
-        } while (len);
-
-        return hash >> (BITS_PER_LONG - bits);
+       unsigned long hash = 0;
+       unsigned long l = 0;
+       int len = 0;
+       unsigned char c;
+
+       do {
+               if (len == length) {
+                       c = (char) len;
+                       len = -1;
+               } else
+                       c = *buf++;
+
+               l = (l << 8) | c;
+               len++;
+
+               if ((len & (BITS_PER_LONG/8-1)) == 0)
+                       hash = hash_long(hash^l, BITS_PER_LONG);
+       } while (len);
+
+       return hash >> (BITS_PER_LONG - bits);
 }
 
 /****************************************
- * rsi cache                            *
+ * rpc sec init (rsi) cache *
  ****************************************/
 
 #define RSI_HASHBITS    (6)
@@ -121,20 +122,40 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
 #define RSI_HASHMASK    (RSI_HASHMAX - 1)
 
 struct rsi {
-        struct cache_head       h;
-        __u32                   lustre_svc;
-        __u64                   nid;
-        cfs_waitq_t             waitq;
-        rawobj_t                in_handle, in_token;
-        rawobj_t                out_handle, out_token;
-        int                     major_status, minor_status;
+       struct cache_head       h;
+       __u32                   lustre_svc;
+       __u64                   nid;
+       char                    nm_name[LUSTRE_NODEMAP_NAME_LENGTH + 1];
+       wait_queue_head_t       waitq;
+       rawobj_t                in_handle, in_token;
+       rawobj_t                out_handle, out_token;
+       int                     major_status, minor_status;
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+       struct rcu_head         rcu_head;
+#endif
 };
 
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsi_table[RSI_HASHMAX];
+#else
 static struct cache_head *rsi_table[RSI_HASHMAX];
+#endif
 static struct cache_detail rsi_cache;
 static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
 static struct rsi *rsi_lookup(struct rsi *item);
 
+#ifdef HAVE_CACHE_DETAIL_WRITERS
+static inline int channel_users(struct cache_detail *cd)
+{
+       return atomic_read(&cd->writers);
+}
+#else
+static inline int channel_users(struct cache_detail *cd)
+{
+       return atomic_read(&cd->readers);
+}
+#endif
+
 static inline int rsi_hash(struct rsi *item)
 {
         return hash_mem((char *)item->in_handle.data, item->in_handle.len,
@@ -157,46 +178,43 @@ static void rsi_free(struct rsi *rsi)
         rawobj_free(&rsi->out_token);
 }
 
+/* See handle_channel_req() userspace for where the upcall data is read */
 static void rsi_request(struct cache_detail *cd,
                         struct cache_head *h,
                         char **bpp, int *blen)
 {
-        struct rsi *rsi = container_of(h, struct rsi, h);
-        __u64 index = 0;
-
-        /* if in_handle is null, provide kernel suggestion */
-        if (rsi->in_handle.len == 0)
-                index = gss_get_next_ctx_index();
-
-        qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
-                     sizeof(rsi->lustre_svc));
-        qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
-        qword_addhex(bpp, blen, (char *) &index, sizeof(index));
-        qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
-        qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
-        (*bpp)[-1] = '\n';
+       struct rsi *rsi = container_of(h, struct rsi, h);
+       __u64 index = 0;
+
+       /* if in_handle is null, provide kernel suggestion */
+       if (rsi->in_handle.len == 0)
+               index = gss_get_next_ctx_index();
+
+       qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
+                       sizeof(rsi->lustre_svc));
+       qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
+       qword_addhex(bpp, blen, (char *) &index, sizeof(index));
+       qword_addhex(bpp, blen, (char *) rsi->nm_name,
+                    strlen(rsi->nm_name) + 1);
+       qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
+       qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
+       (*bpp)[-1] = '\n';
 }
 
-#ifdef HAVE_CACHE_UPCALL
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
-        return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
-}
-#endif
-
 static inline void __rsi_init(struct rsi *new, struct rsi *item)
 {
-        new->out_handle = RAWOBJ_EMPTY;
-        new->out_token = RAWOBJ_EMPTY;
-
-        new->in_handle = item->in_handle;
-        item->in_handle = RAWOBJ_EMPTY;
-        new->in_token = item->in_token;
-        item->in_token = RAWOBJ_EMPTY;
-
-        new->lustre_svc = item->lustre_svc;
-        new->nid = item->nid;
-        cfs_waitq_init(&new->waitq);
+       new->out_handle = RAWOBJ_EMPTY;
+       new->out_token = RAWOBJ_EMPTY;
+
+       new->in_handle = item->in_handle;
+       item->in_handle = RAWOBJ_EMPTY;
+       new->in_token = item->in_token;
+       item->in_token = RAWOBJ_EMPTY;
+
+       new->lustre_svc = item->lustre_svc;
+       new->nid = item->nid;
+       memcpy(new->nm_name, item->nm_name, sizeof(item->nm_name));
+       init_waitqueue_head(&new->waitq);
 }
 
 static inline void __rsi_update(struct rsi *new, struct rsi *item)
@@ -213,14 +231,40 @@ static inline void __rsi_update(struct rsi *new, struct rsi *item)
         new->minor_status = item->minor_status;
 }
 
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+static void rsi_free_rcu(struct rcu_head *head)
+{
+       struct rsi *rsi = container_of(head, struct rsi, rcu_head);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+       LASSERT(hlist_unhashed(&rsi->h.cache_list));
+#else
+       LASSERT(rsi->h.next == NULL);
+#endif
+       rsi_free(rsi);
+       OBD_FREE_PTR(rsi);
+}
+
 static void rsi_put(struct kref *ref)
 {
-        struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+       struct rsi *rsi = container_of(ref, struct rsi, h.ref);
 
-        LASSERT(rsi->h.next == NULL);
-        rsi_free(rsi);
-        OBD_FREE_PTR(rsi);
+       call_rcu(&rsi->rcu_head, rsi_free_rcu);
 }
+#else /* !HAVE_CACHE_HASH_SPINLOCK */
+static void rsi_put(struct kref *ref)
+{
+       struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+       LASSERT(hlist_unhashed(&rsi->h.cache_list));
+#else
+       LASSERT(rsi->h.next == NULL);
+#endif
+       rsi_free(rsi);
+       OBD_FREE_PTR(rsi);
+}
+#endif /* HAVE_CACHE_HASH_SPINLOCK */
 
 static int rsi_match(struct cache_head *a, struct cache_head *b)
 {
@@ -260,10 +304,9 @@ static struct cache_head *rsi_alloc(void)
 static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
 {
         char           *buf = mesg;
-        char           *ep;
         int             len;
         struct rsi      rsii, *rsip = NULL;
-        time_t          expiry;
+       time64_t expiry;
         int             status = -EINVAL;
         ENTRY;
 
@@ -302,18 +345,21 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
         if (len <= 0)
                 goto out;
 
-        /* major */
-        rsii.major_status = simple_strtol(buf, &ep, 10);
-        if (*ep)
-                goto out;
+       /* major */
+       status = kstrtoint(buf, 10, &rsii.major_status);
+       if (status)
+               goto out;
 
-        /* minor */
-        len = qword_get(&mesg, buf, mlen);
-        if (len <= 0)
-                goto out;
-        rsii.minor_status = simple_strtol(buf, &ep, 10);
-        if (*ep)
-                goto out;
+       /* minor */
+       len = qword_get(&mesg, buf, mlen);
+       if (len <= 0) {
+               status = -EINVAL;
+               goto out;
+       }
+
+       status = kstrtoint(buf, 10, &rsii.minor_status);
+       if (status)
+               goto out;
 
         /* out_handle */
         len = qword_get(&mesg, buf, mlen);
@@ -337,34 +383,31 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
         rsip = rsi_update(&rsii, rsip);
         status = 0;
 out:
-        rsi_free(&rsii);
-        if (rsip) {
-                cfs_waitq_broadcast(&rsip->waitq);
-                cache_put(&rsip->h, &rsi_cache);
-        } else {
-                status = -ENOMEM;
-        }
-
-        if (status)
-                CERROR("rsi parse error %d\n", status);
-        RETURN(status);
+       rsi_free(&rsii);
+       if (rsip) {
+               wake_up_all(&rsip->waitq);
+               cache_put(&rsip->h, &rsi_cache);
+       } else {
+               status = -ENOMEM;
+       }
+
+       if (status)
+               CERROR("rsi parse error %d\n", status);
+       RETURN(status);
 }
 
 static struct cache_detail rsi_cache = {
-        .hash_size      = RSI_HASHMAX,
-        .hash_table     = rsi_table,
-        .name           = "auth.sptlrpc.init",
-        .cache_put      = rsi_put,
-#ifdef HAVE_CACHE_UPCALL
-        .cache_upcall   = rsi_upcall,
-#else
-        .cache_request  = rsi_request,
-#endif
-        .cache_parse    = rsi_parse,
-        .match          = rsi_match,
-        .init           = rsi_init,
-        .update         = update_rsi,
-        .alloc          = rsi_alloc,
+       .hash_size      = RSI_HASHMAX,
+       .hash_table     = rsi_table,
+       .name           = "auth.sptlrpc.init",
+       .cache_put      = rsi_put,
+       .cache_request  = rsi_request,
+       .cache_upcall   = sunrpc_cache_pipe_upcall,
+       .cache_parse    = rsi_parse,
+       .match          = rsi_match,
+       .init           = rsi_init,
+       .update         = update_rsi,
+       .alloc          = rsi_alloc,
 };
 
 static struct rsi *rsi_lookup(struct rsi *item)
@@ -392,7 +435,7 @@ static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
 }
 
 /****************************************
- * rsc cache                            *
+ * rpc sec context (rsc) cache                            *
  ****************************************/
 
 #define RSC_HASHBITS    (10)
@@ -404,9 +447,16 @@ struct rsc {
         struct obd_device      *target;
         rawobj_t                handle;
         struct gss_svc_ctx      ctx;
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+       struct rcu_head         rcu_head;
+#endif
 };
 
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsc_table[RSC_HASHMAX];
+#else
 static struct cache_head *rsc_table[RSC_HASHMAX];
+#endif
 static struct cache_detail rsc_cache;
 static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
 static struct rsc *rsc_lookup(struct rsc *item);
@@ -441,22 +491,52 @@ static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
 
 static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
 {
-        new->ctx = tmp->ctx;
-        tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
-        tmp->ctx.gsc_mechctx = NULL;
+       new->ctx = tmp->ctx;
+       memset(&tmp->ctx, 0, sizeof(tmp->ctx));
+       tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
+       tmp->ctx.gsc_mechctx = NULL;
+       tmp->target = NULL;
 
-        memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
+       memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
        spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
 }
 
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+static void rsc_free_rcu(struct rcu_head *head)
+{
+       struct rsc *rsci = container_of(head, struct rsc, rcu_head);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+       LASSERT(hlist_unhashed(&rsci->h.cache_list));
+#else
+       LASSERT(rsci->h.next == NULL);
+#endif
+       rawobj_free(&rsci->handle);
+       OBD_FREE_PTR(rsci);
+}
+
 static void rsc_put(struct kref *ref)
 {
-        struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+       struct rsc *rsci = container_of(ref, struct rsc, h.ref);
 
-        LASSERT(rsci->h.next == NULL);
-        rsc_free(rsci);
-        OBD_FREE_PTR(rsci);
+       rawobj_free(&rsci->ctx.gsc_rvs_hdl);
+       lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
+       call_rcu(&rsci->rcu_head, rsc_free_rcu);
 }
+#else /* !HAVE_CACHE_HASH_SPINLOCK */
+static void rsc_put(struct kref *ref)
+{
+       struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+       LASSERT(hlist_unhashed(&rsci->h.cache_list));
+#else
+       LASSERT(rsci->h.next == NULL);
+#endif
+       rsc_free(rsci);
+       OBD_FREE_PTR(rsci);
+}
+#endif /* HAVE_CACHE_HASH_SPINLOCK */
 
 static int rsc_match(struct cache_head *a, struct cache_head *b)
 {
@@ -498,7 +578,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
         char                *buf = mesg;
         int                  len, rv, tmp_int;
         struct rsc           rsci, *rscp = NULL;
-        time_t               expiry;
+       time64_t expiry;
         int                  status = -EINVAL;
         struct gss_api_mech *gm = NULL;
 
@@ -526,13 +606,13 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
         }
         rsci.ctx.gsc_remote = (tmp_int != 0);
 
-        /* root user flag */
-        rv = get_int(&mesg, &tmp_int);
-        if (rv) {
-                CERROR("fail to get oss user flag\n");
-                goto out;
-        }
-        rsci.ctx.gsc_usr_root = (tmp_int != 0);
+       /* root user flag */
+       rv = get_int(&mesg, &tmp_int);
+       if (rv) {
+               CERROR("fail to get root user flag\n");
+               goto out;
+       }
+       rsci.ctx.gsc_usr_root = (tmp_int != 0);
 
         /* mds user flag */
         rv = get_int(&mesg, &tmp_int);
@@ -569,41 +649,49 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
                 CERROR("NOENT? set rsc entry negative\n");
                set_bit(CACHE_NEGATIVE, &rsci.h.flags);
         } else {
-                rawobj_t tmp_buf;
-                unsigned long ctx_expiry;
-
-                /* gid */
-                if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
-                        goto out;
-
-                /* mech name */
-                len = qword_get(&mesg, buf, mlen);
-                if (len < 0)
-                        goto out;
-                gm = lgss_name_to_mech(buf);
-                status = -EOPNOTSUPP;
-                if (!gm)
-                        goto out;
-
-                status = -EINVAL;
-                /* mech-specific data: */
-                len = qword_get(&mesg, buf, mlen);
-                if (len < 0)
-                        goto out;
-
-                tmp_buf.len = len;
-                tmp_buf.data = (unsigned char *)buf;
-                if (lgss_import_sec_context(&tmp_buf, gm,
-                                            &rsci.ctx.gsc_mechctx))
-                        goto out;
-
-                /* currently the expiry time passed down from user-space
-                 * is invalid, here we retrive it from mech. */
-                if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
-                        CERROR("unable to get expire time, drop it\n");
-                        goto out;
-                }
-                expiry = (time_t) ctx_expiry;
+               rawobj_t tmp_buf;
+               time64_t ctx_expiry;
+
+               /* gid */
+               if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
+                       goto out;
+
+               /* mech name */
+               len = qword_get(&mesg, buf, mlen);
+               if (len < 0)
+                       goto out;
+               gm = lgss_name_to_mech(buf);
+               status = -EOPNOTSUPP;
+               if (!gm)
+                       goto out;
+
+               status = -EINVAL;
+               /* mech-specific data: */
+               len = qword_get(&mesg, buf, mlen);
+               if (len < 0)
+                       goto out;
+
+               tmp_buf.len = len;
+               tmp_buf.data = (unsigned char *)buf;
+               if (lgss_import_sec_context(&tmp_buf, gm,
+                                           &rsci.ctx.gsc_mechctx))
+                       goto out;
+
+               /* set to seconds since machine booted */
+               expiry = ktime_get_seconds();
+
+               /* currently the expiry time passed down from user-space
+                * is invalid, here we retrive it from mech.
+                */
+               if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
+                       CERROR("unable to get expire time, drop it\n");
+                       goto out;
+               }
+
+               /* ctx_expiry is the number of seconds since Jan 1 1970.
+                * We want just the  number of seconds into the future.
+                */
+               expiry += ctx_expiry - ktime_get_real_seconds();
         }
 
         rsci.h.expiry_time = expiry;
@@ -665,69 +753,6 @@ static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
  * rsc cache flush                      *
  ****************************************/
 
-typedef int rsc_entry_match(struct rsc *rscp, long data);
-
-static void rsc_flush(rsc_entry_match *match, long data)
-{
-        struct cache_head **ch;
-        struct rsc *rscp;
-        int n;
-        ENTRY;
-
-       write_lock(&rsc_cache.hash_lock);
-        for (n = 0; n < RSC_HASHMAX; n++) {
-                for (ch = &rsc_cache.hash_table[n]; *ch;) {
-                        rscp = container_of(*ch, struct rsc, h);
-
-                        if (!match(rscp, data)) {
-                                ch = &((*ch)->next);
-                                continue;
-                        }
-
-                        /* it seems simply set NEGATIVE doesn't work */
-                        *ch = (*ch)->next;
-                        rscp->h.next = NULL;
-                        cache_get(&rscp->h);
-                       set_bit(CACHE_NEGATIVE, &rscp->h.flags);
-                        COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
-                        rsc_cache.entries--;
-                }
-        }
-       write_unlock(&rsc_cache.hash_lock);
-        EXIT;
-}
-
-static int match_uid(struct rsc *rscp, long uid)
-{
-        if ((int) uid == -1)
-                return 1;
-        return ((int) rscp->ctx.gsc_uid == (int) uid);
-}
-
-static int match_target(struct rsc *rscp, long target)
-{
-        return (rscp->target == (struct obd_device *) target);
-}
-
-static inline void rsc_flush_uid(int uid)
-{
-        if (uid == -1)
-                CWARN("flush all gss contexts...\n");
-
-        rsc_flush(match_uid, (long) uid);
-}
-
-static inline void rsc_flush_target(struct obd_device *target)
-{
-        rsc_flush(match_target, (long) target);
-}
-
-void gss_secsvc_flush(struct obd_device *target)
-{
-        rsc_flush_target(target);
-}
-EXPORT_SYMBOL(gss_secsvc_flush);
-
 static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
 {
         struct rsc  rsci;
@@ -751,7 +776,7 @@ int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
                                    struct gss_cli_ctx *gctx)
 {
         struct rsc      rsci, *rscp = NULL;
-        unsigned long   ctx_expiry;
+       time64_t ctx_expiry;
         __u32           major;
         int             rc;
         ENTRY;
@@ -775,14 +800,27 @@ int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
                 CERROR("unable to get expire time, drop it\n");
                 GOTO(out, rc = -EINVAL);
         }
-        rsci.h.expiry_time = (time_t) ctx_expiry;
-
-        if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
-                rsci.ctx.gsc_usr_mds = 1;
-        else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
-                rsci.ctx.gsc_usr_oss = 1;
-        else
-                rsci.ctx.gsc_usr_root = 1;
+       rsci.h.expiry_time = ctx_expiry;
+
+       switch (imp->imp_obd->u.cli.cl_sp_to) {
+       case LUSTRE_SP_MDT:
+               rsci.ctx.gsc_usr_mds = 1;
+               break;
+       case LUSTRE_SP_OST:
+               rsci.ctx.gsc_usr_oss = 1;
+               break;
+       case LUSTRE_SP_CLI:
+               rsci.ctx.gsc_usr_root = 1;
+               break;
+       case LUSTRE_SP_MGS:
+               /* by convention, all 3 set to 1 means MGS */
+               rsci.ctx.gsc_usr_mds = 1;
+               rsci.ctx.gsc_usr_oss = 1;
+               rsci.ctx.gsc_usr_root = 1;
+               break;
+       default:
+               break;
+       }
 
         rscp = rsc_update(&rsci, rscp);
         if (rscp == NULL)
@@ -791,7 +829,7 @@ int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
         rscp->target = imp->imp_obd;
         rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
 
-        CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+       CWARN("create reverse svc ctx %p to %s: idx %#llx\n",
               &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
         rc = 0;
 out:
@@ -800,22 +838,22 @@ out:
         rsc_free(&rsci);
 
         if (rc)
-                CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
+               CERROR("create reverse svc ctx: idx %#llx, rc %d\n",
                        gsec->gs_rvs_hdl, rc);
         RETURN(rc);
 }
 
 int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
 {
-        const cfs_time_t        expire = 20;
-        struct rsc             *rscp;
+       const time64_t expire = 20;
+       struct rsc *rscp;
 
         rscp = gss_svc_searchbyctx(handle);
         if (rscp) {
                 CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
                        &rscp->ctx, rscp);
 
-                rscp->h.expiry_time = cfs_time_current_sec() + expire;
+               rscp->h.expiry_time = ktime_get_real_seconds() + expire;
                 COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
         }
         return 0;
@@ -850,26 +888,32 @@ static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
 
 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
-                               struct gss_svc_reqctx *grctx,
-                               struct gss_wire_ctx *gw,
-                               struct obd_device *target,
-                               __u32 lustre_svc,
-                               rawobj_t *rvs_hdl,
-                               rawobj_t *in_token)
+                              struct gss_svc_reqctx *grctx,
+                              struct gss_wire_ctx *gw,
+                              struct obd_device *target,
+                              __u32 lustre_svc,
+                              rawobj_t *rvs_hdl,
+                              rawobj_t *in_token)
 {
-        struct ptlrpc_reply_state *rs;
-        struct rsc                *rsci = NULL;
-        struct rsi                *rsip = NULL, rsikey;
-        cfs_waitlink_t             wait;
-        int                        replen = sizeof(struct ptlrpc_body);
-        struct gss_rep_header     *rephdr;
-        int                        first_check = 1;
-        int                        rc = SECSVC_DROP;
-        ENTRY;
-
-        memset(&rsikey, 0, sizeof(rsikey));
-        rsikey.lustre_svc = lustre_svc;
-        rsikey.nid = (__u64) req->rq_peer.nid;
+       struct ptlrpc_reply_state *rs;
+       struct rsc                *rsci = NULL;
+       struct rsi                *rsip = NULL, rsikey;
+       wait_queue_entry_t wait;
+       int                        replen = sizeof(struct ptlrpc_body);
+       struct gss_rep_header     *rephdr;
+       int                        first_check = 1;
+       int                        rc = SECSVC_DROP;
+       ENTRY;
+
+       memset(&rsikey, 0, sizeof(rsikey));
+       rsikey.lustre_svc = lustre_svc;
+       /* In case of MR, rq_peer is not the NID from which request is received,
+        * but primary NID of peer.
+        * So we need rq_source, which contains the NID actually in use.
+        */
+       rsikey.nid = (__u64) req->rq_source.nid;
+       nodemap_test_nid(req->rq_peer.nid, rsikey.nm_name,
+                        sizeof(rsikey.nm_name));
 
         /* duplicate context handle. for INIT it always 0 */
         if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
@@ -894,64 +938,69 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
                 GOTO(out, rc);
         }
 
-        cache_get(&rsip->h); /* take an extra ref */
-        cfs_waitq_init(&rsip->waitq);
-        cfs_waitlink_init(&wait);
-        cfs_waitq_add(&rsip->waitq, &wait);
+       cache_get(&rsip->h); /* take an extra ref */
+       init_wait(&wait);
+       add_wait_queue(&rsip->waitq, &wait);
 
 cache_check:
-        /* Note each time cache_check() will drop a reference if return
-         * non-zero. We hold an extra reference on initial rsip, but must
-         * take care of following calls. */
-        rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
-        switch (rc) {
-        case -EAGAIN: {
-                int valid;
-
-                if (first_check) {
-                        first_check = 0;
-
-                        read_lock(&rsi_cache.hash_lock);
+       /* Note each time cache_check() will drop a reference if return
+        * non-zero. We hold an extra reference on initial rsip, but must
+        * take care of following calls. */
+       rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+       switch (rc) {
+       case -ETIMEDOUT:
+       case -EAGAIN: {
+               int valid;
+
+               if (first_check) {
+                       first_check = 0;
+
+                       cache_read_lock(&rsi_cache);
                        valid = test_bit(CACHE_VALID, &rsip->h.flags);
-                        if (valid == 0)
-                                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                        read_unlock(&rsi_cache.hash_lock);
-
-                        if (valid == 0)
-                                cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
-                                                     CFS_HZ);
-
-                        cache_get(&rsip->h);
-                        goto cache_check;
-                }
-                CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
-                break;
-        }
-        case -ENOENT:
-                CWARN("cache_check return ENOENT, drop\n");
-                break;
-        case 0:
-                /* if not the first check, we have to release the extra
-                 * reference we just added on it. */
-                if (!first_check)
-                        cache_put(&rsip->h, &rsi_cache);
-                CDEBUG(D_SEC, "cache_check is good\n");
-                break;
-        }
-
-        cfs_waitq_del(&rsip->waitq, &wait);
-        cache_put(&rsip->h, &rsi_cache);
-
-        if (rc)
-                GOTO(out, rc = SECSVC_DROP);
+                       if (valid == 0)
+                               set_current_state(TASK_INTERRUPTIBLE);
+                       cache_read_unlock(&rsi_cache);
+
+                       if (valid == 0) {
+                               unsigned long timeout;
+
+                               timeout = cfs_time_seconds(GSS_SVC_UPCALL_TIMEOUT);
+                               schedule_timeout(timeout);
+                       }
+                       cache_get(&rsip->h);
+                       goto cache_check;
+               }
+               CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
+               break;
+       }
+       case -ENOENT:
+               CDEBUG(D_SEC, "cache_check return ENOENT, drop\n");
+               break;
+       case 0:
+               /* if not the first check, we have to release the extra
+                * reference we just added on it. */
+               if (!first_check)
+                       cache_put(&rsip->h, &rsi_cache);
+               CDEBUG(D_SEC, "cache_check is good\n");
+               break;
+       }
+
+       remove_wait_queue(&rsip->waitq, &wait);
+       cache_put(&rsip->h, &rsi_cache);
+
+       if (rc)
+               GOTO(out, rc = SECSVC_DROP);
 
         rc = SECSVC_DROP;
         rsci = gss_svc_searchbyctx(&rsip->out_handle);
         if (!rsci) {
                 CERROR("authentication failed\n");
 
-                if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
-                        rc = SECSVC_COMPLETE;
+               /* gss mechanism returned major and minor code so we return
+                * those in error message */
+               if (!gss_pack_err_notify(req, rsip->major_status,
+                                        rsip->minor_status))
+                       rc = SECSVC_COMPLETE;
 
                 GOTO(out, rc);
         } else {
@@ -959,6 +1008,20 @@ cache_check:
                 grctx->src_ctx = &rsci->ctx;
         }
 
+       if (gw->gw_flags & LUSTRE_GSS_PACK_KCSUM) {
+               grctx->src_ctx->gsc_mechctx->hash_func = gss_digest_hash;
+       } else if (!strcmp(grctx->src_ctx->gsc_mechctx->mech_type->gm_name,
+                          "krb5") &&
+                  !krb5_allow_old_client_csum) {
+               CWARN("%s: deny connection from '%s' due to missing 'krb_csum' feature, set 'sptlrpc.gss.krb5_allow_old_client_csum=1' to allow, but recommend client upgrade: rc = %d\n",
+                     target->obd_name, libcfs_nid2str(req->rq_peer.nid),
+                     -EPROTO);
+               GOTO(out, rc = SECSVC_DROP);
+       } else {
+               grctx->src_ctx->gsc_mechctx->hash_func =
+                       gss_digest_hash_compat;
+       }
+
         if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
                 CERROR("failed duplicate reverse handle\n");
                 GOTO(out, rc);
@@ -1009,24 +1072,23 @@ cache_check:
         rc = SECSVC_OK;
 
 out:
-        /* it looks like here we should put rsip also, but this mess up
-         * with NFS cache mgmt code... FIXME */
-#if 0
-        if (rsip)
-                rsi_put(&rsip->h, &rsi_cache);
-#endif
-
-        if (rsci) {
-                /* if anything went wrong, we don't keep the context too */
-                if (rc != SECSVC_OK)
+       /* it looks like here we should put rsip also, but this mess up
+        * with NFS cache mgmt code... FIXME
+        * something like:
+        * if (rsip)
+        *     rsi_put(&rsip->h, &rsi_cache); */
+
+       if (rsci) {
+               /* if anything went wrong, we don't keep the context too */
+               if (rc != SECSVC_OK)
                        set_bit(CACHE_NEGATIVE, &rsci->h.flags);
-                else
-                        CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
-                               gss_handle_to_u64(&rsci->handle));
+               else
+                       CDEBUG(D_SEC, "create rsc with idx %#llx\n",
+                              gss_handle_to_u64(&rsci->handle));
 
-                COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
-        }
-        RETURN(rc);
+               COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
+       }
+       RETURN(rc);
 }
 
 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
@@ -1036,7 +1098,7 @@ struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
 
         rsc = gss_svc_searchbyctx(&gw->gw_handle);
         if (!rsc) {
-                CWARN("Invalid gss ctx idx "LPX64" from %s\n",
+               CWARN("Invalid gss ctx idx %#llx from %s\n",
                       gss_handle_to_u64(&gw->gw_handle),
                       libcfs_nid2str(req->rq_peer.nid));
                 return NULL;
@@ -1064,46 +1126,58 @@ void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
 
 int __init gss_init_svc_upcall(void)
 {
-       int     i;
-
-       spin_lock_init(&__ctx_index_lock);
-        /*
-         * this helps reducing context index confliction. after server reboot,
-         * conflicting request from clients might be filtered out by initial
-         * sequence number checking, thus no chance to sent error notification
-         * back to clients.
-         */
-        cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
-
-
-        cache_register(&rsi_cache);
-        cache_register(&rsc_cache);
-
-        /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
-         * the init upcall channel, otherwise there's big chance that the first
-         * upcall issued before the channel be opened thus nfsv4 cache code will
-         * drop the request direclty, thus lead to unnecessary recovery time.
-         * here we wait at miximum 1.5 seconds. */
-        for (i = 0; i < 6; i++) {
-                if (atomic_read(&rsi_cache.readers) > 0)
-                        break;
-                cfs_set_current_state(TASK_UNINTERRUPTIBLE);
-                LASSERT(CFS_HZ >= 4);
-                cfs_schedule_timeout(CFS_HZ / 4);
-        }
-
-        if (atomic_read(&rsi_cache.readers) == 0)
-                CWARN("Init channel is not opened by lsvcgssd, following "
-                      "request might be dropped until lsvcgssd is active\n");
+       int     i, rc;
+
+       /*
+        * this helps reducing context index confliction. after server reboot,
+        * conflicting request from clients might be filtered out by initial
+        * sequence number checking, thus no chance to sent error notification
+        * back to clients.
+        */
+       get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+       for (i = 0; i < rsi_cache.hash_size; i++)
+               INIT_HLIST_HEAD(&rsi_cache.hash_table[i]);
+#endif
+       rc = cache_register_net(&rsi_cache, &init_net);
+       if (rc != 0)
+               return rc;
 
-        return 0;
+#ifdef HAVE_CACHE_HEAD_HLIST
+       for (i = 0; i < rsc_cache.hash_size; i++)
+               INIT_HLIST_HEAD(&rsc_cache.hash_table[i]);
+#endif
+       rc = cache_register_net(&rsc_cache, &init_net);
+       if (rc != 0) {
+               cache_unregister_net(&rsi_cache, &init_net);
+               return rc;
+       }
+
+       /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
+        * the init upcall channel, otherwise there's big chance that the first
+        * upcall issued before the channel be opened thus nfsv4 cache code will
+        * drop the request directly, thus lead to unnecessary recovery time.
+        * Here we wait at minimum 1.5 seconds.
+        */
+       for (i = 0; i < 6; i++) {
+               if (channel_users(&rsi_cache) > 0)
+                       break;
+               schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
+       }
+
+       if (channel_users(&rsi_cache) == 0)
+               CWARN("Init channel is not opened by lsvcgssd, following "
+                     "request might be dropped until lsvcgssd is active\n");
+
+       return 0;
 }
 
-void __exit gss_exit_svc_upcall(void)
+void gss_exit_svc_upcall(void)
 {
-        cache_purge(&rsi_cache);
-        cache_unregister(&rsi_cache);
+       cache_purge(&rsi_cache);
+       cache_unregister_net(&rsi_cache, &init_net);
 
-        cache_purge(&rsc_cache);
-        cache_unregister(&rsc_cache);
+       cache_purge(&rsc_cache);
+       cache_unregister_net(&rsc_cache, &init_net);
 }