1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004 - 2006, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * Neil Brown <neilb@cse.unsw.edu.au>
12 * J. Bruce Fields <bfields@umich.edu>
13 * Andy Adamson <andros@umich.edu>
14 * Dug Song <dugsong@monkey.org>
16 * RPCSEC_GSS server authentication.
17 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
20 * The RPCSEC_GSS involves three stages:
23 * 3/ context destruction
25 * Context creation is handled largely by upcalls to user-space.
26 * In particular, GSS_Accept_sec_context is handled by an upcall
27 * Data exchange is handled entirely within the kernel
28 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
29 * Context destruction is handled in-kernel
30 * GSS_Delete_sec_context is in-kernel
32 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
33 * The context handle and gss_token are used as a key into the rpcsec_init cache.
34 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
35 * being major_status, minor_status, context_handle, reply_token.
36 * These are sent back to the client.
37 * Sequence window management is handled by the kernel. The window size if currently
38 * a compile time constant.
40 * When user-space is happy that a context is established, it places an entry
41 * in the rpcsec_context cache. The key for this cache is the context_handle.
42 * The content includes:
43 * uid/gidlist - for determining access rights
45 * mechanism specific information, such as a key
49 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/hash.h>
57 #include <liblustre.h>
60 #include <linux/sunrpc/cache.h>
63 #include <obd_class.h>
64 #include <obd_support.h>
65 #include <lustre/lustre_idl.h>
66 #include <lustre_net.h>
67 #include <lustre_import.h>
68 #include <lustre_sec.h>
71 #include "gss_internal.h"
74 #define GSS_SVC_UPCALL_TIMEOUT (20)
76 static spinlock_t __ctx_index_lock = SPIN_LOCK_UNLOCKED;
77 static __u64 __ctx_index = 1ULL;
79 __u64 gss_get_next_ctx_index(void)
83 spin_lock(&__ctx_index_lock);
85 spin_unlock(&__ctx_index_lock);
91 unsigned long hash_mem(char *buf, int length, int bits)
93 unsigned long hash = 0;
108 if ((len & (BITS_PER_LONG/8-1)) == 0)
109 hash = hash_long(hash^l, BITS_PER_LONG);
112 return hash >> (BITS_PER_LONG - bits);
115 /****************************************
117 ****************************************/
119 #define RSI_HASHBITS (6)
120 #define RSI_HASHMAX (1 << RSI_HASHBITS)
121 #define RSI_HASHMASK (RSI_HASHMAX - 1)
127 wait_queue_head_t waitq;
128 rawobj_t in_handle, in_token;
129 rawobj_t out_handle, out_token;
130 int major_status, minor_status;
133 static struct cache_head *rsi_table[RSI_HASHMAX];
134 static struct cache_detail rsi_cache;
135 static struct rsi *rsi_lookup(struct rsi *item, int set);
138 void rsi_free(struct rsi *rsi)
140 rawobj_free(&rsi->in_handle);
141 rawobj_free(&rsi->in_token);
142 rawobj_free(&rsi->out_handle);
143 rawobj_free(&rsi->out_token);
147 void rsi_put(struct cache_head *item, struct cache_detail *cd)
149 struct rsi *rsi = container_of(item, struct rsi, h);
151 LASSERT(atomic_read(&item->refcnt) > 0);
153 if (cache_put(item, cd)) {
154 LASSERT(item->next == NULL);
156 kfree(rsi); /* created by cache mgmt using kmalloc */
161 int rsi_hash(struct rsi *item)
163 return hash_mem((char *)item->in_handle.data, item->in_handle.len,
165 hash_mem((char *)item->in_token.data, item->in_token.len,
170 int rsi_match(struct rsi *item, struct rsi *tmp)
172 return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
173 rawobj_equal(&item->in_token, &tmp->in_token));
177 void rsi_request(struct cache_detail *cd,
178 struct cache_head *h,
179 char **bpp, int *blen)
181 struct rsi *rsi = container_of(h, struct rsi, h);
184 /* if in_handle is null, provide kernel suggestion */
185 if (rsi->in_handle.len == 0)
186 index = gss_get_next_ctx_index();
188 qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
189 sizeof(rsi->lustre_svc));
190 qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
191 qword_addhex(bpp, blen, (char *) &index, sizeof(index));
192 qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
193 qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
198 void rsi_init(struct rsi *new, struct rsi *item)
200 new->out_handle = RAWOBJ_EMPTY;
201 new->out_token = RAWOBJ_EMPTY;
203 new->in_handle = item->in_handle;
204 item->in_handle = RAWOBJ_EMPTY;
205 new->in_token = item->in_token;
206 item->in_token = RAWOBJ_EMPTY;
208 new->lustre_svc = item->lustre_svc;
209 new->nid = item->nid;
210 init_waitqueue_head(&new->waitq);
214 void rsi_update(struct rsi *new, struct rsi *item)
216 LASSERT(new->out_handle.len == 0);
217 LASSERT(new->out_token.len == 0);
219 new->out_handle = item->out_handle;
220 item->out_handle = RAWOBJ_EMPTY;
221 new->out_token = item->out_token;
222 item->out_token = RAWOBJ_EMPTY;
224 new->major_status = item->major_status;
225 new->minor_status = item->minor_status;
229 int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
234 struct rsi rsii, *rsip = NULL;
236 int status = -EINVAL;
240 memset(&rsii, 0, sizeof(rsii));
243 len = qword_get(&mesg, buf, mlen);
246 if (rawobj_alloc(&rsii.in_handle, buf, len)) {
252 len = qword_get(&mesg, buf, mlen);
255 if (rawobj_alloc(&rsii.in_token, buf, len)) {
261 expiry = get_expiry(&mesg);
265 len = qword_get(&mesg, buf, mlen);
270 rsii.major_status = simple_strtol(buf, &ep, 10);
275 len = qword_get(&mesg, buf, mlen);
278 rsii.minor_status = simple_strtol(buf, &ep, 10);
283 len = qword_get(&mesg, buf, mlen);
286 if (rawobj_alloc(&rsii.out_handle, buf, len)) {
292 len = qword_get(&mesg, buf, mlen);
295 if (rawobj_alloc(&rsii.out_token, buf, len)) {
300 rsii.h.expiry_time = expiry;
301 rsip = rsi_lookup(&rsii, 1);
306 wake_up_all(&rsip->waitq);
307 rsi_put(&rsip->h, &rsi_cache);
311 CERROR("rsi parse error %d\n", status);
315 static struct cache_detail rsi_cache = {
316 .hash_size = RSI_HASHMAX,
317 .hash_table = rsi_table,
318 .name = "auth.ptlrpcs.init",
319 .cache_put = rsi_put,
320 .cache_request = rsi_request,
321 .cache_parse = rsi_parse,
324 static DefineSimpleCacheLookup(rsi, 0)
326 /****************************************
328 ****************************************/
330 #define RSC_HASHBITS (10)
331 #define RSC_HASHMAX (1 << RSC_HASHBITS)
332 #define RSC_HASHMASK (RSC_HASHMAX - 1)
336 struct obd_device *target;
338 struct gss_svc_ctx ctx;
341 static struct cache_head *rsc_table[RSC_HASHMAX];
342 static struct cache_detail rsc_cache;
343 static struct rsc *rsc_lookup(struct rsc *item, int set);
346 void rsc_free(struct rsc *rsci)
348 rawobj_free(&rsci->handle);
349 rawobj_free(&rsci->ctx.gsc_rvs_hdl);
350 lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
354 void rsc_put(struct cache_head *item, struct cache_detail *cd)
356 struct rsc *rsci = container_of(item, struct rsc, h);
358 LASSERT(atomic_read(&item->refcnt) > 0);
360 if (cache_put(item, cd)) {
361 LASSERT(item->next == NULL);
363 kfree(rsci); /* created by cache mgmt using kmalloc */
368 int rsc_hash(struct rsc *rsci)
370 return hash_mem((char *)rsci->handle.data,
371 rsci->handle.len, RSC_HASHBITS);
375 int rsc_match(struct rsc *new, struct rsc *tmp)
377 return rawobj_equal(&new->handle, &tmp->handle);
381 void rsc_init(struct rsc *new, struct rsc *tmp)
383 new->handle = tmp->handle;
384 tmp->handle = RAWOBJ_EMPTY;
387 memset(&new->ctx, 0, sizeof(new->ctx));
388 new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
392 void rsc_update(struct rsc *new, struct rsc *tmp)
395 tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
396 tmp->ctx.gsc_mechctx = NULL;
398 memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
399 spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
403 int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
406 int len, rv, tmp_int;
407 struct rsc rsci, *rscp = NULL;
409 int status = -EINVAL;
411 memset(&rsci, 0, sizeof(rsci));
414 len = qword_get(&mesg, buf, mlen);
415 if (len < 0) goto out;
417 if (rawobj_alloc(&rsci.handle, buf, len))
422 expiry = get_expiry(&mesg);
428 rv = get_int(&mesg, &tmp_int);
430 CERROR("fail to get remote flag\n");
433 rsci.ctx.gsc_remote = (tmp_int != 0);
436 rv = get_int(&mesg, &tmp_int);
438 CERROR("fail to get oss user flag\n");
441 rsci.ctx.gsc_usr_root = (tmp_int != 0);
444 rv = get_int(&mesg, &tmp_int);
446 CERROR("fail to get mds user flag\n");
449 rsci.ctx.gsc_usr_mds = (tmp_int != 0);
452 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
454 CERROR("fail to get mapped uid\n");
458 /* uid, or NEGATIVE */
459 rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
463 CERROR("NOENT? set rsc entry negative\n");
464 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
466 struct gss_api_mech *gm;
468 unsigned long ctx_expiry;
471 if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
475 len = qword_get(&mesg, buf, mlen);
478 gm = lgss_name_to_mech(buf);
479 status = -EOPNOTSUPP;
484 /* mech-specific data: */
485 len = qword_get(&mesg, buf, mlen);
491 tmp_buf.data = (unsigned char *)buf;
492 if (lgss_import_sec_context(&tmp_buf, gm,
493 &rsci.ctx.gsc_mechctx)) {
498 /* currently the expiry time passed down from user-space
499 * is invalid, here we retrive it from mech.
501 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
502 CERROR("unable to get expire time, drop it\n");
506 expiry = (time_t) ctx_expiry;
511 rsci.h.expiry_time = expiry;
512 rscp = rsc_lookup(&rsci, 1);
517 rsc_put(&rscp->h, &rsc_cache);
520 CERROR("parse rsc error %d\n", status);
524 /****************************************
526 ****************************************/
528 typedef int rsc_entry_match(struct rsc *rscp, long data);
531 void rsc_flush(rsc_entry_match *match, long data)
533 struct cache_head **ch;
538 write_lock(&rsc_cache.hash_lock);
539 for (n = 0; n < RSC_HASHMAX; n++) {
540 for (ch = &rsc_cache.hash_table[n]; *ch;) {
541 rscp = container_of(*ch, struct rsc, h);
543 if (!match(rscp, data)) {
548 /* it seems simply set NEGATIVE doesn't work */
552 set_bit(CACHE_NEGATIVE, &rscp->h.flags);
553 rsc_put(&rscp->h, &rsc_cache);
557 write_unlock(&rsc_cache.hash_lock);
562 int match_uid(struct rsc *rscp, long uid)
566 return ((int) rscp->ctx.gsc_uid == (int) uid);
570 int match_target(struct rsc *rscp, long target)
572 return (rscp->target == (struct obd_device *) target);
576 void rsc_flush_uid(int uid)
579 CWARN("flush all gss contexts...\n");
581 rsc_flush(match_uid, (long) uid);
585 void rsc_flush_target(struct obd_device *target)
587 rsc_flush(match_target, (long) target);
590 void gss_secsvc_flush(struct obd_device *target)
592 rsc_flush_target(target);
594 EXPORT_SYMBOL(gss_secsvc_flush);
596 static struct cache_detail rsc_cache = {
597 .hash_size = RSC_HASHMAX,
598 .hash_table = rsc_table,
599 .name = "auth.ptlrpcs.context",
600 .cache_put = rsc_put,
601 .cache_parse = rsc_parse,
604 static DefineSimpleCacheLookup(rsc, 0);
607 struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
612 memset(&rsci, 0, sizeof(rsci));
613 if (rawobj_dup(&rsci.handle, handle))
616 found = rsc_lookup(&rsci, 0);
620 if (cache_check(&rsc_cache, &found->h, NULL))
625 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
626 struct gss_sec *gsec,
627 struct gss_cli_ctx *gctx)
629 struct rsc rsci, *rscp;
630 unsigned long ctx_expiry;
634 memset(&rsci, 0, sizeof(rsci));
636 if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
637 sizeof(gsec->gs_rvs_hdl))) {
638 CERROR("unable alloc handle\n");
642 major = lgss_copy_reverse_context(gctx->gc_mechctx,
643 &rsci.ctx.gsc_mechctx);
644 if (major != GSS_S_COMPLETE) {
645 CERROR("unable to copy reverse context\n");
650 if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
651 CERROR("unable to get expire time, drop it\n");
656 rsci.h.expiry_time = (time_t) ctx_expiry;
657 rsci.target = imp->imp_obd;
659 rscp = rsc_lookup(&rsci, 1);
662 rsc_put(&rscp->h, &rsc_cache);
664 CWARN("client installed reverse svc ctx to %s: idx %llx\n",
665 imp->imp_obd->u.cli.cl_target_uuid.uuid,
668 imp->imp_next_reconnect = gss_round_imp_reconnect(ctx_expiry);
669 CWARN("import to %s: set force reconnect at %lu(%lds valid time)\n",
670 imp->imp_obd->u.cli.cl_target_uuid.uuid,
671 imp->imp_next_reconnect,
672 (long) (imp->imp_next_reconnect - get_seconds()));
679 gss_svc_unseal_request(struct ptlrpc_request *req,
681 struct gss_wire_cred *gc,
682 __u32 *vp, __u32 vlen)
684 struct ptlrpcs_wire_hdr *sec_hdr;
685 struct gss_ctx *ctx = rsci->mechctx;
686 rawobj_t cipher_text, plain_text;
690 sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
693 CERROR("vlen only %u\n", vlen);
694 RETURN(GSS_S_CALL_BAD_STRUCTURE);
697 cipher_text.len = le32_to_cpu(*vp++);
698 cipher_text.data = (__u8 *) vp;
701 if (cipher_text.len > vlen) {
702 CERROR("cipher claimed %u while buf only %u\n",
703 cipher_text.len, vlen);
704 RETURN(GSS_S_CALL_BAD_STRUCTURE);
707 plain_text = cipher_text;
709 major = lgss_unwrap(ctx, GSS_C_QOP_DEFAULT, &cipher_text, &plain_text);
711 CERROR("unwrap error 0x%x\n", major);
715 if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
716 CERROR("discard replayed request %p(o%u,x"LPU64",t"LPU64")\n",
717 req, req->rq_reqmsg->opc, req->rq_xid,
718 req->rq_reqmsg->transno);
719 RETURN(GSS_S_DUPLICATE_TOKEN);
722 req->rq_reqmsg = (struct lustre_msg *) (vp);
723 req->rq_reqlen = plain_text.len;
725 CDEBUG(D_SEC, "msg len %d\n", req->rq_reqlen);
727 RETURN(GSS_S_COMPLETE);
732 struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
736 static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
738 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
739 struct gss_svc_reqctx *grctx,
740 struct gss_wire_ctx *gw,
741 struct obd_device *target,
746 struct ptlrpc_reply_state *rs;
747 struct rsc *rsci = NULL;
748 struct rsi *rsip = NULL, rsikey;
750 int replen = sizeof(struct ptlrpc_body);
751 struct gss_rep_header *rephdr;
753 int rc = SECSVC_DROP;
756 memset(&rsikey, 0, sizeof(rsikey));
757 rsikey.lustre_svc = lustre_svc;
758 rsikey.nid = (__u64) req->rq_peer.nid;
760 /* duplicate context handle. for INIT it always 0 */
761 if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
762 CERROR("fail to dup context handle\n");
766 if (rawobj_dup(&rsikey.in_token, in_token)) {
767 CERROR("can't duplicate token\n");
768 rawobj_free(&rsikey.in_handle);
772 rsip = rsi_lookup(&rsikey, 0);
775 CERROR("error in rsi_lookup.\n");
777 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
778 rc = SECSVC_COMPLETE;
783 cache_get(&rsip->h); /* take an extra ref */
784 init_waitqueue_head(&rsip->waitq);
785 init_waitqueue_entry(&wait, current);
786 add_wait_queue(&rsip->waitq, &wait);
789 /* Note each time cache_check() will drop a reference if return
790 * non-zero. We hold an extra reference on initial rsip, but must
791 * take care of following calls.
793 rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
801 read_lock(&rsi_cache.hash_lock);
802 valid = test_bit(CACHE_VALID, &rsip->h.flags);
804 set_current_state(TASK_INTERRUPTIBLE);
805 read_unlock(&rsi_cache.hash_lock);
808 unsigned long j1, j2;
811 schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
813 CWARN("slept %lu ticks for cache refill\n",
820 CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
824 CWARN("cache_check return ENOENT, drop\n");
827 /* if not the first check, we have to release the extra
828 * reference we just added on it.
831 cache_put(&rsip->h, &rsi_cache);
832 CDEBUG(D_SEC, "cache_check is good\n");
836 remove_wait_queue(&rsip->waitq, &wait);
837 cache_put(&rsip->h, &rsi_cache);
840 GOTO(out, rc = SECSVC_DROP);
843 rsci = gss_svc_searchbyctx(&rsip->out_handle);
845 CERROR("authentication failed\n");
847 if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
848 rc = SECSVC_COMPLETE;
853 grctx->src_ctx = &rsci->ctx;
856 if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
857 CERROR("failed duplicate reverse handle\n");
861 rsci->target = target;
863 CWARN("server create rsc %p(%u->%s)\n",
864 rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
866 if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
867 CERROR("handle size %u too large\n", rsip->out_handle.len);
868 GOTO(out, rc = SECSVC_DROP);
872 grctx->src_reserve_len = size_round4(rsip->out_token.len);
874 rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
876 CERROR("failed to pack reply: %d\n", rc);
877 GOTO(out, rc = SECSVC_DROP);
880 rs = req->rq_reply_state;
881 LASSERT(rs->rs_repbuf->lm_bufcount == 3);
882 LASSERT(rs->rs_repbuf->lm_buflens[0] >=
883 sizeof(*rephdr) + rsip->out_handle.len);
884 LASSERT(rs->rs_repbuf->lm_buflens[2] >= rsip->out_token.len);
886 rephdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
887 rephdr->gh_version = PTLRPC_GSS_VERSION;
888 rephdr->gh_flags = 0;
889 rephdr->gh_proc = PTLRPC_GSS_PROC_ERR;
890 rephdr->gh_major = rsip->major_status;
891 rephdr->gh_minor = rsip->minor_status;
892 rephdr->gh_seqwin = GSS_SEQ_WIN;
893 rephdr->gh_handle.len = rsip->out_handle.len;
894 memcpy(rephdr->gh_handle.data, rsip->out_handle.data,
895 rsip->out_handle.len);
897 memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0), rsip->out_token.data,
898 rsip->out_token.len);
900 rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
901 rsip->out_token.len, 0);
903 if (rsci->ctx.gsc_usr_mds)
904 CWARN("user from %s authenticated as mds\n",
905 libcfs_nid2str(req->rq_peer.nid));
910 /* it looks like here we should put rsip also, but this mess up
911 * with NFS cache mgmt code... FIXME
915 rsi_put(&rsip->h, &rsi_cache);
919 /* if anything went wrong, we don't keep the context too */
921 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
923 rsc_put(&rsci->h, &rsc_cache);
928 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
929 struct gss_wire_ctx *gw)
933 rsc = gss_svc_searchbyctx(&gw->gw_handle);
935 CWARN("Invalid gss context handle from %s\n",
936 libcfs_nid2str(req->rq_peer.nid));
943 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx)
945 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
947 rsc_put(&rsc->h, &rsc_cache);
950 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
952 struct rsc *rsc = container_of(ctx, struct rsc, ctx);
954 set_bit(CACHE_NEGATIVE, &rsc->h.flags);
957 int __init gss_svc_init_upcall(void)
961 cache_register(&rsi_cache);
962 cache_register(&rsc_cache);
964 /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
965 * the init upcall channel, otherwise there's big chance that the first
966 * upcall issued before the channel be opened thus nfsv4 cache code will
967 * drop the request direclty, thus lead to unnecessary recovery time.
968 * here we wait at miximum 1.5 seconds.
970 for (i = 0; i < 6; i++) {
971 if (atomic_read(&rsi_cache.readers) > 0)
973 set_current_state(TASK_UNINTERRUPTIBLE);
975 schedule_timeout(HZ / 4);
978 if (atomic_read(&rsi_cache.readers) == 0)
979 CWARN("Init channel is not opened by lsvcgssd, following "
980 "request might be dropped until lsvcgssd is active\n");
985 void __exit gss_svc_exit_upcall(void)
989 cache_purge(&rsi_cache);
990 if ((rc = cache_unregister(&rsi_cache)))
991 CERROR("unregister rsi cache: %d\n", rc);
993 cache_purge(&rsc_cache);
994 if ((rc = cache_unregister(&rsc_cache)))
995 CERROR("unregister rsc cache: %d\n", rc);