Whamcloud - gitweb
LU-3963 libcfs: remove cfs_hash_long
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_svc_upcall.c
index 4e663cb..3e60767 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  *
- * Copyright (c) 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
  *
  * Author: Eric Mei <ericm@clusterfs.com>
  */
@@ -90,28 +90,50 @@ __u64 gss_get_next_ctx_index(void)
 
 static inline unsigned long hash_mem(char *buf, int length, int bits)
 {
-        unsigned long hash = 0;
-        unsigned long l = 0;
-        int len = 0;
-        unsigned char c;
-
-        do {
-                if (len == length) {
-                        c = (char) len;
-                        len = -1;
-                } else
-                        c = *buf++;
-
-                l = (l << 8) | c;
-                len++;
-
-                if ((len & (BITS_PER_LONG/8-1)) == 0)
-                        hash = cfs_hash_long(hash^l, BITS_PER_LONG);
-        } while (len);
-
-        return hash >> (BITS_PER_LONG - bits);
+       unsigned long hash = 0;
+       unsigned long l = 0;
+       int len = 0;
+       unsigned char c;
+
+       do {
+               if (len == length) {
+                       c = (char) len;
+                       len = -1;
+               } else
+                       c = *buf++;
+
+               l = (l << 8) | c;
+               len++;
+
+               if ((len & (BITS_PER_LONG/8-1)) == 0)
+                       hash = hash_long(hash^l, BITS_PER_LONG);
+       } while (len);
+
+       return hash >> (BITS_PER_LONG - bits);
 }
 
+/* This compatibility can be removed once kernel 3.3 is used,
+ * since cache_register_net/cache_unregister_net are exported.
+ * Note that since kernel 3.4 cache_register and cache_unregister
+ * are removed.
+*/
+static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+       return cache_register(cd);
+#else
+       return cache_register_net(cd, net);
+#endif
+}
+static inline void _cache_unregister_net(struct cache_detail *cd,
+                                        struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+       cache_unregister(cd);
+#else
+       cache_unregister_net(cd, net);
+#endif
+}
 /****************************************
  * rsi cache                            *
  ****************************************/
@@ -121,13 +143,13 @@ static inline unsigned long hash_mem(char *buf, int length, int bits)
 #define RSI_HASHMASK    (RSI_HASHMAX - 1)
 
 struct rsi {
-        struct cache_head       h;
-        __u32                   lustre_svc;
-        __u64                   nid;
-        cfs_waitq_t             waitq;
-        rawobj_t                in_handle, in_token;
-        rawobj_t                out_handle, out_token;
-        int                     major_status, minor_status;
+       struct cache_head       h;
+       __u32                   lustre_svc;
+       __u64                   nid;
+       wait_queue_head_t       waitq;
+       rawobj_t                in_handle, in_token;
+       rawobj_t                out_handle, out_token;
+       int                     major_status, minor_status;
 };
 
 static struct cache_head *rsi_table[RSI_HASHMAX];
@@ -161,42 +183,48 @@ static void rsi_request(struct cache_detail *cd,
                         struct cache_head *h,
                         char **bpp, int *blen)
 {
-        struct rsi *rsi = container_of(h, struct rsi, h);
-        __u64 index = 0;
-
-        /* if in_handle is null, provide kernel suggestion */
-        if (rsi->in_handle.len == 0)
-                index = gss_get_next_ctx_index();
-
-        qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
-                     sizeof(rsi->lustre_svc));
-        qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
-        qword_addhex(bpp, blen, (char *) &index, sizeof(index));
-        qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
-        qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
-        (*bpp)[-1] = '\n';
+       struct rsi *rsi = container_of(h, struct rsi, h);
+       __u64 index = 0;
+
+       /* if in_handle is null, provide kernel suggestion */
+       if (rsi->in_handle.len == 0)
+               index = gss_get_next_ctx_index();
+
+       qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
+                       sizeof(rsi->lustre_svc));
+       qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
+       qword_addhex(bpp, blen, (char *) &index, sizeof(index));
+       qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
+       qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
+       (*bpp)[-1] = '\n';
 }
 
-#ifdef HAVE_CACHE_UPCALL
+#ifdef HAVE_SUNRPC_UPCALL_HAS_3ARGS
 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
 {
-        return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+       return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+}
+#else
+
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+       return sunrpc_cache_pipe_upcall(cd, h);
 }
 #endif
 
 static inline void __rsi_init(struct rsi *new, struct rsi *item)
 {
-        new->out_handle = RAWOBJ_EMPTY;
-        new->out_token = RAWOBJ_EMPTY;
+       new->out_handle = RAWOBJ_EMPTY;
+       new->out_token = RAWOBJ_EMPTY;
 
-        new->in_handle = item->in_handle;
-        item->in_handle = RAWOBJ_EMPTY;
-        new->in_token = item->in_token;
-        item->in_token = RAWOBJ_EMPTY;
+       new->in_handle = item->in_handle;
+       item->in_handle = RAWOBJ_EMPTY;
+       new->in_token = item->in_token;
+       item->in_token = RAWOBJ_EMPTY;
 
-        new->lustre_svc = item->lustre_svc;
-        new->nid = item->nid;
-        cfs_waitq_init(&new->waitq);
+       new->lustre_svc = item->lustre_svc;
+       new->nid = item->nid;
+       init_waitqueue_head(&new->waitq);
 }
 
 static inline void __rsi_update(struct rsi *new, struct rsi *item)
@@ -337,34 +365,33 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
         rsip = rsi_update(&rsii, rsip);
         status = 0;
 out:
-        rsi_free(&rsii);
-        if (rsip) {
-                cfs_waitq_broadcast(&rsip->waitq);
-                cache_put(&rsip->h, &rsi_cache);
-        } else {
-                status = -ENOMEM;
-        }
-
-        if (status)
-                CERROR("rsi parse error %d\n", status);
-        RETURN(status);
+       rsi_free(&rsii);
+       if (rsip) {
+               wake_up_all(&rsip->waitq);
+               cache_put(&rsip->h, &rsi_cache);
+       } else {
+               status = -ENOMEM;
+       }
+
+       if (status)
+               CERROR("rsi parse error %d\n", status);
+       RETURN(status);
 }
 
 static struct cache_detail rsi_cache = {
-        .hash_size      = RSI_HASHMAX,
-        .hash_table     = rsi_table,
-        .name           = "auth.sptlrpc.init",
-        .cache_put      = rsi_put,
-#ifdef HAVE_CACHE_UPCALL
-        .cache_upcall   = rsi_upcall,
-#else
-        .cache_request  = rsi_request,
+       .hash_size      = RSI_HASHMAX,
+       .hash_table     = rsi_table,
+       .name           = "auth.sptlrpc.init",
+       .cache_put      = rsi_put,
+#ifndef HAVE_SUNRPC_UPCALL_HAS_3ARGS
+       .cache_request  = rsi_request,
 #endif
-        .cache_parse    = rsi_parse,
-        .match          = rsi_match,
-        .init           = rsi_init,
-        .update         = update_rsi,
-        .alloc          = rsi_alloc,
+       .cache_upcall   = rsi_upcall,
+       .cache_parse    = rsi_parse,
+       .match          = rsi_match,
+       .init           = rsi_init,
+       .update         = update_rsi,
+       .alloc          = rsi_alloc,
 };
 
 static struct rsi *rsi_lookup(struct rsi *item)
@@ -850,22 +877,22 @@ static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
 
 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
-                               struct gss_svc_reqctx *grctx,
-                               struct gss_wire_ctx *gw,
-                               struct obd_device *target,
-                               __u32 lustre_svc,
-                               rawobj_t *rvs_hdl,
-                               rawobj_t *in_token)
+                              struct gss_svc_reqctx *grctx,
+                              struct gss_wire_ctx *gw,
+                              struct obd_device *target,
+                              __u32 lustre_svc,
+                              rawobj_t *rvs_hdl,
+                              rawobj_t *in_token)
 {
-        struct ptlrpc_reply_state *rs;
-        struct rsc                *rsci = NULL;
-        struct rsi                *rsip = NULL, rsikey;
-        cfs_waitlink_t             wait;
-        int                        replen = sizeof(struct ptlrpc_body);
-        struct gss_rep_header     *rephdr;
-        int                        first_check = 1;
-        int                        rc = SECSVC_DROP;
-        ENTRY;
+       struct ptlrpc_reply_state *rs;
+       struct rsc                *rsci = NULL;
+       struct rsi                *rsip = NULL, rsikey;
+       wait_queue_t             wait;
+       int                        replen = sizeof(struct ptlrpc_body);
+       struct gss_rep_header     *rephdr;
+       int                        first_check = 1;
+       int                        rc = SECSVC_DROP;
+       ENTRY;
 
         memset(&rsikey, 0, sizeof(rsikey));
         rsikey.lustre_svc = lustre_svc;
@@ -894,18 +921,19 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
                 GOTO(out, rc);
         }
 
-        cache_get(&rsip->h); /* take an extra ref */
-        cfs_waitq_init(&rsip->waitq);
-        cfs_waitlink_init(&wait);
-        cfs_waitq_add(&rsip->waitq, &wait);
+       cache_get(&rsip->h); /* take an extra ref */
+       init_waitqueue_head(&rsip->waitq);
+       init_waitqueue_entry_current(&wait);
+       add_wait_queue(&rsip->waitq, &wait);
 
 cache_check:
-        /* Note each time cache_check() will drop a reference if return
-         * non-zero. We hold an extra reference on initial rsip, but must
-         * take care of following calls. */
-        rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
-        switch (rc) {
-        case -EAGAIN: {
+       /* Note each time cache_check() will drop a reference if return
+        * non-zero. We hold an extra reference on initial rsip, but must
+        * take care of following calls. */
+       rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+       switch (rc) {
+       case -ETIMEDOUT:
+       case -EAGAIN: {
                 int valid;
 
                 if (first_check) {
@@ -914,15 +942,15 @@ cache_check:
                         read_lock(&rsi_cache.hash_lock);
                        valid = test_bit(CACHE_VALID, &rsip->h.flags);
                         if (valid == 0)
-                                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+                               set_current_state(TASK_INTERRUPTIBLE);
                         read_unlock(&rsi_cache.hash_lock);
 
-                        if (valid == 0)
-                                cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
-                                                     CFS_HZ);
+                       if (valid == 0)
+                               schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+                                                    HZ);
 
-                        cache_get(&rsip->h);
-                        goto cache_check;
+                       cache_get(&rsip->h);
+                       goto cache_check;
                 }
                 CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
                 break;
@@ -933,17 +961,17 @@ cache_check:
         case 0:
                 /* if not the first check, we have to release the extra
                  * reference we just added on it. */
-                if (!first_check)
-                        cache_put(&rsip->h, &rsi_cache);
-                CDEBUG(D_SEC, "cache_check is good\n");
-                break;
-        }
+               if (!first_check)
+                       cache_put(&rsip->h, &rsi_cache);
+               CDEBUG(D_SEC, "cache_check is good\n");
+               break;
+       }
 
-        cfs_waitq_del(&rsip->waitq, &wait);
-        cache_put(&rsip->h, &rsi_cache);
+       remove_wait_queue(&rsip->waitq, &wait);
+       cache_put(&rsip->h, &rsi_cache);
 
-        if (rc)
-                GOTO(out, rc = SECSVC_DROP);
+       if (rc)
+               GOTO(out, rc = SECSVC_DROP);
 
         rc = SECSVC_DROP;
         rsci = gss_svc_searchbyctx(&rsip->out_handle);
@@ -1064,46 +1092,52 @@ void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
 
 int __init gss_init_svc_upcall(void)
 {
-       int     i;
+       int     i, rc;
 
        spin_lock_init(&__ctx_index_lock);
-        /*
-         * this helps reducing context index confliction. after server reboot,
-         * conflicting request from clients might be filtered out by initial
-         * sequence number checking, thus no chance to sent error notification
-         * back to clients.
-         */
-        cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
-
-
-        cache_register(&rsi_cache);
-        cache_register(&rsc_cache);
-
-        /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
-         * the init upcall channel, otherwise there's big chance that the first
-         * upcall issued before the channel be opened thus nfsv4 cache code will
-         * drop the request direclty, thus lead to unnecessary recovery time.
-         * here we wait at miximum 1.5 seconds. */
-        for (i = 0; i < 6; i++) {
-                if (atomic_read(&rsi_cache.readers) > 0)
-                        break;
-                cfs_set_current_state(TASK_UNINTERRUPTIBLE);
-                LASSERT(CFS_HZ >= 4);
-                cfs_schedule_timeout(CFS_HZ / 4);
-        }
-
-        if (atomic_read(&rsi_cache.readers) == 0)
-                CWARN("Init channel is not opened by lsvcgssd, following "
-                      "request might be dropped until lsvcgssd is active\n");
-
-        return 0;
+       /*
+        * this helps reducing context index confliction. after server reboot,
+        * conflicting request from clients might be filtered out by initial
+        * sequence number checking, thus no chance to sent error notification
+        * back to clients.
+        */
+       cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+
+       rc = _cache_register_net(&rsi_cache, &init_net);
+       if (rc != 0)
+               return rc;
+
+       rc = _cache_register_net(&rsc_cache, &init_net);
+       if (rc != 0) {
+               _cache_unregister_net(&rsi_cache, &init_net);
+               return rc;
+       }
+
+       /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
+        * the init upcall channel, otherwise there's big chance that the first
+        * upcall issued before the channel be opened thus nfsv4 cache code will
+        * drop the request direclty, thus lead to unnecessary recovery time.
+        * here we wait at miximum 1.5 seconds. */
+       for (i = 0; i < 6; i++) {
+               if (atomic_read(&rsi_cache.readers) > 0)
+                       break;
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               LASSERT(HZ >= 4);
+               schedule_timeout(HZ / 4);
+       }
+
+       if (atomic_read(&rsi_cache.readers) == 0)
+               CWARN("Init channel is not opened by lsvcgssd, following "
+                     "request might be dropped until lsvcgssd is active\n");
+
+       return 0;
 }
 
-void __exit gss_exit_svc_upcall(void)
+void gss_exit_svc_upcall(void)
 {
-        cache_purge(&rsi_cache);
-        cache_unregister(&rsi_cache);
+       cache_purge(&rsi_cache);
+       _cache_unregister_net(&rsi_cache, &init_net);
 
-        cache_purge(&rsc_cache);
-        cache_unregister(&rsc_cache);
+       cache_purge(&rsc_cache);
+       _cache_unregister_net(&rsc_cache, &init_net);
 }