1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modifications for Lustre
5 * Copyright 2004, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
11 * Neil Brown <neilb@cse.unsw.edu.au>
12 * J. Bruce Fields <bfields@umich.edu>
13 * Andy Adamson <andros@umich.edu>
14 * Dug Song <dugsong@monkey.org>
16 * RPCSEC_GSS server authentication.
17 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
20 * The RPCSEC_GSS involves three stages:
23 * 3/ context destruction
25 * Context creation is handled largely by upcalls to user-space.
26 * In particular, GSS_Accept_sec_context is handled by an upcall
27 * Data exchange is handled entirely within the kernel
28 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
29 * Context destruction is handled in-kernel
30 * GSS_Delete_sec_context is in-kernel
32 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
33 * The context handle and gss_token are used as a key into the rpcsec_init cache.
34 * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
35 * being major_status, minor_status, context_handle, reply_token.
36 * These are sent back to the client.
37 * Sequence window management is handled by the kernel. The window size if currently
38 * a compile time constant.
40 * When user-space is happy that a context is established, it places an entry
41 * in the rpcsec_context cache. The key for this cache is the context_handle.
42 * The content includes:
43 * uid/gidlist - for determining access rights
45 * mechanism specific information, such as a key
49 #define DEBUG_SUBSYSTEM S_SEC
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/hash.h>
57 #include <liblustre.h>
60 #include <linux/sunrpc/cache.h>
62 #include <libcfs/kp30.h>
63 #include <linux/obd.h>
64 #include <linux/obd_class.h>
65 #include <linux/obd_support.h>
66 #include <linux/lustre_idl.h>
67 #include <linux/lustre_net.h>
68 #include <linux/lustre_import.h>
69 #include <linux/lustre_sec.h>
72 #include "gss_internal.h"
75 static inline unsigned long hash_mem(char *buf, int length, int bits)
77 unsigned long hash = 0;
83 c = (char)len; len = -1;
88 if ((len & (BITS_PER_LONG/8-1))==0)
89 hash = hash_long(hash^l, BITS_PER_LONG);
91 return hash >> (BITS_PER_LONG - bits);
94 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
97 * Key is context handle (\x if empty) and gss_token.
98 * Content is major_status minor_status (integers) context_handle, reply_token.
102 #define RSI_HASHBITS 6
103 #define RSI_HASHMAX (1<<RSI_HASHBITS)
104 #define RSI_HASHMASK (RSI_HASHMAX-1)
112 rawobj_t in_handle, in_token, in_srv_type;
113 rawobj_t out_handle, out_token;
114 int major_status, minor_status;
117 static struct cache_head *rsi_table[RSI_HASHMAX];
118 static struct cache_detail rsi_cache;
120 static void rsi_free(struct rsi *rsii)
122 rawobj_free(&rsii->in_handle);
123 rawobj_free(&rsii->in_token);
124 rawobj_free(&rsii->out_handle);
125 rawobj_free(&rsii->out_token);
128 static void rsi_put(struct cache_head *item, struct cache_detail *cd)
130 struct rsi *rsii = container_of(item, struct rsi, h);
131 LASSERT(atomic_read(&item->refcnt) > 0);
132 if (cache_put(item, cd)) {
133 LASSERT(item->next == NULL);
135 OBD_FREE(rsii, sizeof(*rsii));
139 static inline int rsi_hash(struct rsi *item)
141 return hash_mem((char *)item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
142 ^ hash_mem((char *)item->in_token.data, item->in_token.len, RSI_HASHBITS);
145 static inline int rsi_match(struct rsi *item, struct rsi *tmp)
147 return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
148 rawobj_equal(&item->in_token, &tmp->in_token));
151 static void rsi_request(struct cache_detail *cd,
152 struct cache_head *h,
153 char **bpp, int *blen)
155 struct rsi *rsii = container_of(h, struct rsi, h);
157 qword_addhex(bpp, blen, (char *) &rsii->lustre_svc,
158 sizeof(rsii->lustre_svc));
159 qword_addhex(bpp, blen, (char *) &rsii->naltype, sizeof(rsii->naltype));
160 qword_addhex(bpp, blen, (char *) &rsii->netid, sizeof(rsii->netid));
161 qword_addhex(bpp, blen, (char *) &rsii->nid, sizeof(rsii->nid));
162 qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
163 qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
168 gssd_reply(struct rsi *item)
171 struct cache_head **hp, **head;
174 head = &rsi_cache.hash_table[rsi_hash(item)];
175 write_lock(&rsi_cache.hash_lock);
176 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
177 tmp = container_of(*hp, struct rsi, h);
178 if (rsi_match(tmp, item)) {
180 clear_bit(CACHE_HASHED, &tmp->h.flags);
184 if (test_bit(CACHE_VALID, &tmp->h.flags)) {
185 CERROR("rsi is valid\n");
186 write_unlock(&rsi_cache.hash_lock);
187 rsi_put(&tmp->h, &rsi_cache);
190 set_bit(CACHE_HASHED, &item->h.flags);
194 set_bit(CACHE_VALID, &item->h.flags);
195 item->h.last_refresh = get_seconds();
196 write_unlock(&rsi_cache.hash_lock);
197 cache_fresh(&rsi_cache, &tmp->h, 0);
198 rsi_put(&tmp->h, &rsi_cache);
202 write_unlock(&rsi_cache.hash_lock);
207 * here we just wait here for its completion or timedout. it's a
208 * hacking but works, and we'll comeup with real fix if we decided
209 * to still stick with NFS4 cache code
212 gssd_upcall(struct rsi *item, struct cache_req *chandle)
215 struct cache_head **hp, **head;
216 unsigned long starttime;
219 head = &rsi_cache.hash_table[rsi_hash(item)];
220 read_lock(&rsi_cache.hash_lock);
221 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
222 tmp = container_of(*hp, struct rsi, h);
223 if (rsi_match(tmp, item)) {
225 if (!test_bit(CACHE_VALID, &tmp->h.flags)) {
226 CERROR("found rsi without VALID\n");
227 read_unlock(&rsi_cache.hash_lock);
234 read_unlock(&rsi_cache.hash_lock);
239 set_bit(CACHE_HASHED, &item->h.flags);
240 item->h.next = *head;
243 read_unlock(&rsi_cache.hash_lock);
244 //cache_get(&item->h);
246 cache_check(&rsi_cache, &item->h, chandle);
247 starttime = get_seconds();
249 set_current_state(TASK_UNINTERRUPTIBLE);
250 schedule_timeout(HZ/2);
251 read_lock(&rsi_cache.hash_lock);
252 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
253 tmp = container_of(*hp, struct rsi, h);
256 if (rsi_match(tmp, item)) {
257 if (!test_bit(CACHE_VALID, &tmp->h.flags)) {
258 read_unlock(&rsi_cache.hash_lock);
262 clear_bit(CACHE_HASHED, &tmp->h.flags);
266 read_unlock(&rsi_cache.hash_lock);
270 read_unlock(&rsi_cache.hash_lock);
271 } while ((get_seconds() - starttime) <= SVCSEC_UPCALL_TIMEOUT);
272 CERROR("%ds timeout while waiting cache refill\n",
273 SVCSEC_UPCALL_TIMEOUT);
277 static int rsi_parse(struct cache_detail *cd,
278 char *mesg, int mlen)
280 /* context token expiry major minor context token */
286 int status = -EINVAL;
289 OBD_ALLOC(rsii, sizeof(*rsii));
292 cache_init(&rsii->h);
295 len = qword_get(&mesg, buf, mlen);
298 if (rawobj_alloc(&rsii->in_handle, buf, len)) {
304 len = qword_get(&mesg, buf, mlen);
307 if (rawobj_alloc(&rsii->in_token, buf, len)) {
313 expiry = get_expiry(&mesg);
318 len = qword_get(&mesg, buf, mlen);
321 rsii->major_status = simple_strtol(buf, &ep, 10);
326 len = qword_get(&mesg, buf, mlen);
329 rsii->minor_status = simple_strtol(buf, &ep, 10);
334 len = qword_get(&mesg, buf, mlen);
337 if (rawobj_alloc(&rsii->out_handle, buf, len)) {
343 len = qword_get(&mesg, buf, mlen);
346 if (rawobj_alloc(&rsii->out_token, buf, len)) {
351 rsii->h.expiry_time = expiry;
352 status = gssd_reply(rsii);
355 rsi_put(&rsii->h, &rsi_cache);
359 static struct cache_detail rsi_cache = {
360 .hash_size = RSI_HASHMAX,
361 .hash_table = rsi_table,
362 .name = "auth.ptlrpcs.init",
363 .cache_put = rsi_put,
364 .cache_request = rsi_request,
365 .cache_parse = rsi_parse,
369 * The rpcsec_context cache is used to store a context that is
370 * used in data exchange.
371 * The key is a context handle. The content is:
372 * uid, gidlist, mechanism, service-set, mech-specific-data
375 #define RSC_HASHBITS 10
376 #define RSC_HASHMAX (1<<RSC_HASHBITS)
377 #define RSC_HASHMASK (RSC_HASHMAX-1)
379 #define GSS_SEQ_WIN 512
381 struct gss_svc_seq_data {
382 /* highest seq number seen so far: */
384 /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
385 * sd_win is nonzero iff sequence number i has been seen already: */
386 unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
393 __u32 remote_realm:1,
396 struct vfs_cred cred;
398 struct gss_svc_seq_data seqdata;
399 struct gss_ctx *mechctx;
402 static struct cache_head *rsc_table[RSC_HASHMAX];
403 static struct cache_detail rsc_cache;
405 static void rsc_free(struct rsc *rsci)
407 rawobj_free(&rsci->handle);
409 kgss_delete_sec_context(&rsci->mechctx);
411 if (rsci->cred.vc_ginfo)
412 put_group_info(rsci->cred.vc_ginfo);
416 static void rsc_put(struct cache_head *item, struct cache_detail *cd)
418 struct rsc *rsci = container_of(item, struct rsc, h);
420 LASSERT(atomic_read(&item->refcnt) > 0);
421 if (cache_put(item, cd)) {
422 LASSERT(item->next == NULL);
424 OBD_FREE(rsci, sizeof(*rsci));
429 rsc_hash(struct rsc *rsci)
431 return hash_mem((char *)rsci->handle.data,
432 rsci->handle.len, RSC_HASHBITS);
436 rsc_match(struct rsc *new, struct rsc *tmp)
438 return rawobj_equal(&new->handle, &tmp->handle);
441 static struct rsc *rsc_lookup(struct rsc *item, int set)
443 struct rsc *tmp = NULL;
444 struct cache_head **hp, **head;
445 head = &rsc_cache.hash_table[rsc_hash(item)];
449 write_lock(&rsc_cache.hash_lock);
451 read_lock(&rsc_cache.hash_lock);
452 for (hp = head; *hp != NULL; hp = &tmp->h.next) {
453 tmp = container_of(*hp, struct rsc, h);
454 if (!rsc_match(tmp, item))
461 clear_bit(CACHE_HASHED, &tmp->h.flags);
462 rsc_put(&tmp->h, &rsc_cache);
465 /* Didn't find anything */
470 set_bit(CACHE_HASHED, &item->h.flags);
471 item->h.next = *head;
473 write_unlock(&rsc_cache.hash_lock);
474 cache_fresh(&rsc_cache, &item->h, item->h.expiry_time);
480 read_unlock(&rsc_cache.hash_lock);
484 static int rsc_parse(struct cache_detail *cd,
485 char *mesg, int mlen)
487 /* contexthandle expiry [ uid gid N <n gids> mechname
488 * ...mechdata... ] */
490 int len, rv, tmp_int;
491 struct rsc *rsci, *res = NULL;
493 int status = -EINVAL;
495 OBD_ALLOC(rsci, sizeof(*rsci));
497 CERROR("fail to alloc rsci\n");
500 cache_init(&rsci->h);
503 len = qword_get(&mesg, buf, mlen);
504 if (len < 0) goto out;
506 if (rawobj_alloc(&rsci->handle, buf, len))
510 expiry = get_expiry(&mesg);
516 rv = get_int(&mesg, &tmp_int);
518 CERROR("fail to get remote flag\n");
521 rsci->remote_realm = (tmp_int != 0);
524 rv = get_int(&mesg, &tmp_int);
526 CERROR("fail to get mds user flag\n");
529 rsci->auth_usr_mds = (tmp_int != 0);
532 rv = get_int(&mesg, &tmp_int);
534 CERROR("fail to get oss user flag\n");
537 rsci->auth_usr_oss = (tmp_int != 0);
540 rv = get_int(&mesg, (int *)&rsci->mapped_uid);
542 CERROR("fail to get mapped uid\n");
546 /* uid, or NEGATIVE */
547 rv = get_int(&mesg, (int *)&rsci->cred.vc_uid);
551 CERROR("NOENT? set rsc entry negative\n");
552 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
554 struct gss_api_mech *gm;
559 if (get_int(&mesg, (int *)&rsci->cred.vc_gid))
563 len = qword_get(&mesg, buf, mlen);
566 gm = kgss_name_to_mech(buf);
567 status = -EOPNOTSUPP;
572 /* mech-specific data: */
573 len = qword_get(&mesg, buf, mlen);
579 tmp_buf.data = (unsigned char *)buf;
580 if (kgss_import_sec_context(&tmp_buf, gm, &rsci->mechctx)) {
585 /* currently the expiry time passed down from user-space
586 * is invalid, here we retrive it from mech.
588 if (kgss_inquire_context(rsci->mechctx, &ctx_expiry)) {
589 CERROR("unable to get expire time, drop it\n");
590 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
594 expiry = (time_t) gss_roundup_expire_time(ctx_expiry);
598 rsci->h.expiry_time = expiry;
599 spin_lock_init(&rsci->seqdata.sd_lock);
600 res = rsc_lookup(rsci, 1);
601 rsc_put(&res->h, &rsc_cache);
605 rsc_put(&rsci->h, &rsc_cache);
610 * flush all entries with @uid. @uid == -1 will match all.
611 * we only know the uid, maybe netid/nid in the future, in all cases
612 * we must search the whole cache
614 static void rsc_flush(uid_t uid)
616 struct cache_head **ch;
622 CWARN("flush all gss contexts\n");
624 write_lock(&rsc_cache.hash_lock);
625 for (n = 0; n < RSC_HASHMAX; n++) {
626 for (ch = &rsc_cache.hash_table[n]; *ch;) {
627 rscp = container_of(*ch, struct rsc, h);
629 if (uid != -1 && rscp->cred.vc_uid != uid) {
634 /* it seems simply set NEGATIVE doesn't work */
638 set_bit(CACHE_NEGATIVE, &rscp->h.flags);
639 clear_bit(CACHE_HASHED, &rscp->h.flags);
641 CWARN("flush rsc %p(%u) for uid %u\n", rscp,
642 *((__u32 *) rscp->handle.data),
644 rsc_put(&rscp->h, &rsc_cache);
648 write_unlock(&rsc_cache.hash_lock);
652 static struct cache_detail rsc_cache = {
653 .hash_size = RSC_HASHMAX,
654 .hash_table = rsc_table,
655 .name = "auth.ptlrpcs.context",
656 .cache_put = rsc_put,
657 .cache_parse = rsc_parse,
661 gss_svc_searchbyctx(rawobj_t *handle)
666 rsci.handle = *handle;
667 found = rsc_lookup(&rsci, 0);
671 if (cache_check(&rsc_cache, &found->h, NULL))
678 * again hacking: only try to give the svcgssd a chance to handle
681 struct cache_deferred_req* my_defer(struct cache_req *req)
686 static struct cache_req my_chandle = {my_defer};
688 /* Implements sequence number algorithm as specified in RFC 2203. */
689 static inline void __dbg_dump_seqwin(struct gss_svc_seq_data *sd)
691 char buf[sizeof(sd->sd_win)*2+1];
694 for (i = 0; i < sizeof(sd->sd_win); i++)
695 sprintf(&buf[i+i], "%02x", ((__u8 *) sd->sd_win)[i]);
696 CWARN("dump seqwin: %s\n", buf);
699 static inline void __dbg_seq_jump(struct gss_svc_seq_data *sd, __u32 seq_num)
701 CWARN("seq jump to %u, cur max %u!\n", seq_num, sd->sd_max);
702 __dbg_dump_seqwin(sd);
705 static inline void __dbg_seq_increase(struct gss_svc_seq_data *sd, __u32 seq_num)
707 int n = seq_num - sd->sd_max;
710 for (i = 0; i < n; i++) {
711 if (!test_bit(i, sd->sd_win))
717 CWARN("seq increase to %u, cur max %u\n", seq_num, sd->sd_max);
718 __dbg_dump_seqwin(sd);
722 gss_check_seq_num(struct gss_svc_seq_data *sd, __u32 seq_num)
726 spin_lock(&sd->sd_lock);
727 if (seq_num > sd->sd_max) {
728 if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
729 __dbg_seq_jump(sd, seq_num);
730 memset(sd->sd_win, 0, sizeof(sd->sd_win));
731 sd->sd_max = seq_num;
733 __dbg_seq_increase(sd, seq_num);
734 while(sd->sd_max < seq_num) {
736 __clear_bit(sd->sd_max % GSS_SEQ_WIN,
740 __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
742 } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
743 CERROR("seq %u too low: max %u, win %d\n",
744 seq_num, sd->sd_max, GSS_SEQ_WIN);
749 if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win)) {
750 CERROR("seq %u is replay: max %u, win %d\n",
751 seq_num, sd->sd_max, GSS_SEQ_WIN);
755 spin_unlock(&sd->sd_lock);
760 gss_svc_verify_request(struct ptlrpc_request *req,
762 struct rpc_gss_wire_cred *gc,
763 __u32 *vp, __u32 vlen)
765 struct ptlrpcs_wire_hdr *sec_hdr;
766 struct gss_ctx *ctx = rsci->mechctx;
772 sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
774 req->rq_reqmsg = (struct lustre_msg *) (req->rq_reqbuf + sizeof(*sec_hdr));
775 req->rq_reqlen = sec_hdr->msg_len;
777 msg.len = sec_hdr->msg_len;
778 msg.data = (__u8 *)req->rq_reqmsg;
780 mic.len = le32_to_cpu(*vp++);
781 mic.data = (unsigned char *)vp;
784 if (mic.len > vlen) {
785 CERROR("checksum len %d, while buffer len %d\n",
787 RETURN(GSS_S_CALL_BAD_STRUCTURE);
791 CERROR("invalid mic len %d\n", mic.len);
792 RETURN(GSS_S_CALL_BAD_STRUCTURE);
795 maj_stat = kgss_verify_mic(ctx, &msg, &mic, NULL);
796 if (maj_stat != GSS_S_COMPLETE) {
797 CERROR("MIC verification error: major %x\n", maj_stat);
801 if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
802 CERROR("discard replayed request %p(o%u,x"LPU64",t"LPU64")\n",
803 req, req->rq_reqmsg->opc, req->rq_xid,
804 req->rq_reqmsg->transno);
805 RETURN(GSS_S_DUPLICATE_TOKEN);
808 RETURN(GSS_S_COMPLETE);
812 gss_svc_unseal_request(struct ptlrpc_request *req,
814 struct rpc_gss_wire_cred *gc,
815 __u32 *vp, __u32 vlen)
817 struct ptlrpcs_wire_hdr *sec_hdr;
818 struct gss_ctx *ctx = rsci->mechctx;
819 rawobj_t cipher_text, plain_text;
823 sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
826 CERROR("vlen only %u\n", vlen);
827 RETURN(GSS_S_CALL_BAD_STRUCTURE);
830 cipher_text.len = le32_to_cpu(*vp++);
831 cipher_text.data = (__u8 *) vp;
834 if (cipher_text.len > vlen) {
835 CERROR("cipher claimed %u while buf only %u\n",
836 cipher_text.len, vlen);
837 RETURN(GSS_S_CALL_BAD_STRUCTURE);
840 plain_text = cipher_text;
842 major = kgss_unwrap(ctx, GSS_C_QOP_DEFAULT, &cipher_text, &plain_text);
844 CERROR("unwrap error 0x%x\n", major);
848 if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
849 CERROR("discard replayed request %p(o%u,x"LPU64",t"LPU64")\n",
850 req, req->rq_reqmsg->opc, req->rq_xid,
851 req->rq_reqmsg->transno);
852 RETURN(GSS_S_DUPLICATE_TOKEN);
855 req->rq_reqmsg = (struct lustre_msg *) (vp);
856 req->rq_reqlen = plain_text.len;
858 CDEBUG(D_SEC, "msg len %d\n", req->rq_reqlen);
860 RETURN(GSS_S_COMPLETE);
864 gss_pack_err_notify(struct ptlrpc_request *req,
865 __u32 major, __u32 minor)
867 struct gss_svc_data *svcdata = req->rq_svcsec_data;
868 __u32 reslen, *resp, *reslenp;
869 char nidstr[PTL_NALFMT_SIZE];
870 const __u32 secdata_len = 7 * 4;
874 OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_ERR_NOTIFY|OBD_FAIL_ONCE, -EINVAL);
877 svcdata->is_err_notify = 1;
878 svcdata->reserve_len = 7 * 4;
880 rc = lustre_pack_reply(req, 0, NULL, NULL);
882 CERROR("could not pack reply, err %d\n", rc);
886 LASSERT(req->rq_reply_state);
887 LASSERT(req->rq_reply_state->rs_repbuf);
888 LASSERT(req->rq_reply_state->rs_repbuf_len >= secdata_len);
889 resp = (__u32 *) req->rq_reply_state->rs_repbuf;
892 *resp++ = cpu_to_le32(PTLRPCS_FLVR_GSS_NONE);
893 *resp++ = cpu_to_le32(PTLRPCS_SVC_NONE);
894 *resp++ = cpu_to_le32(req->rq_replen);
897 /* skip lustre msg */
898 resp += req->rq_replen / 4;
899 reslen = svcdata->reserve_len;
902 * version, subflavor, notify, major, minor,
903 * obj1(fake), obj2(fake)
905 *resp++ = cpu_to_le32(PTLRPC_SEC_GSS_VERSION);
906 *resp++ = cpu_to_le32(PTLRPCS_FLVR_KRB5I);
907 *resp++ = cpu_to_le32(PTLRPCS_GSS_PROC_ERR);
908 *resp++ = cpu_to_le32(major);
909 *resp++ = cpu_to_le32(minor);
913 /* the actual sec data length */
914 *reslenp = cpu_to_le32(secdata_len);
916 req->rq_reply_state->rs_repdata_len += (secdata_len);
917 CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
919 portals_nid2str(req->rq_peer.peer_ni->pni_number,
920 req->rq_peer.peer_id.nid, nidstr));
924 static void dump_cache_head(struct cache_head *h)
926 CWARN("ref %d, fl %lx, n %p, t %ld, %ld\n",
927 atomic_read(&h->refcnt), h->flags, h->next,
928 h->expiry_time, h->last_refresh);
930 static void dump_rsi(struct rsi *rsi)
932 CWARN("dump rsi %p\n", rsi);
933 dump_cache_head(&rsi->h);
934 CWARN("%x,%x,%llx\n", rsi->naltype, rsi->netid, rsi->nid);
935 CWARN("len %d, d %p\n", rsi->in_handle.len, rsi->in_handle.data);
936 CWARN("len %d, d %p\n", rsi->in_token.len, rsi->in_token.data);
937 CWARN("len %d, d %p\n", rsi->out_handle.len, rsi->out_handle.data);
938 CWARN("len %d, d %p\n", rsi->out_token.len, rsi->out_token.data);
942 gss_svcsec_handle_init(struct ptlrpc_request *req,
943 struct rpc_gss_wire_cred *gc,
944 __u32 *secdata, __u32 seclen,
945 enum ptlrpcs_error *res)
947 struct gss_svc_data *svcdata = req->rq_svcsec_data;
949 struct rsi *rsikey, *rsip;
951 __u32 reslen, *resp, *reslenp;
952 char nidstr[PTL_NALFMT_SIZE];
958 CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gc->gc_proc,
959 portals_nid2str(req->rq_peer.peer_ni->pni_number,
960 req->rq_peer.peer_id.nid, nidstr));
962 *res = PTLRPCS_BADCRED;
963 OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_INIT_REQ|OBD_FAIL_ONCE, SVC_DROP);
965 if (gc->gc_proc == RPC_GSS_PROC_INIT &&
966 gc->gc_ctx.len != 0) {
967 CERROR("proc %d, ctx_len %d: not really init?\n",
968 gc->gc_proc == RPC_GSS_PROC_INIT, gc->gc_ctx.len);
972 OBD_ALLOC(rsikey, sizeof(*rsikey));
974 CERROR("out of memory\n");
977 cache_init(&rsikey->h);
979 /* obtain lustre svc type */
981 CERROR("sec size %d too small\n", seclen);
982 GOTO(out_rsikey, rc = SVC_DROP);
984 rsikey->lustre_svc = le32_to_cpu(*secdata++);
987 /* duplicate context handle. currently always 0 */
988 if (rawobj_dup(&rsikey->in_handle, &gc->gc_ctx)) {
989 CERROR("fail to dup context handle\n");
990 GOTO(out_rsikey, rc = SVC_DROP);
994 *res = PTLRPCS_BADVERF;
995 if (rawobj_extract(&tmpobj, &secdata, &seclen)) {
996 CERROR("can't extract token\n");
997 GOTO(out_rsikey, rc = SVC_DROP);
999 if (rawobj_dup(&rsikey->in_token, &tmpobj)) {
1000 CERROR("can't duplicate token\n");
1001 GOTO(out_rsikey, rc = SVC_DROP);
1004 rsikey->naltype = (__u32) req->rq_peer.peer_ni->pni_number;
1006 rsikey->nid = (__u64) req->rq_peer.peer_id.nid;
1008 rsip = gssd_upcall(rsikey, &my_chandle);
1010 CERROR("error in gssd_upcall.\n");
1013 if (gss_pack_err_notify(req, GSS_S_FAILURE, 0))
1016 GOTO(out_rsikey, rc);
1019 rsci = gss_svc_searchbyctx(&rsip->out_handle);
1021 CERROR("rsci still not mature yet?\n");
1024 if (gss_pack_err_notify(req, GSS_S_FAILURE, 0))
1029 CDEBUG(D_SEC, "svcsec create gss context %p(%u@%s)\n",
1030 rsci, rsci->cred.vc_uid,
1031 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1032 req->rq_peer.peer_id.nid, nidstr));
1034 svcdata->is_init = 1;
1035 svcdata->reserve_len = 7 * 4 +
1036 size_round4(rsip->out_handle.len) +
1037 size_round4(rsip->out_token.len);
1039 rc = lustre_pack_reply(req, 0, NULL, NULL);
1041 CERROR("failed to pack reply, rc = %d\n", rc);
1042 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1043 GOTO(out, rc = SVC_DROP);
1047 resp = (__u32 *) req->rq_reply_state->rs_repbuf;
1048 *resp++ = cpu_to_le32(PTLRPCS_FLVR_GSS_NONE);
1049 *resp++ = cpu_to_le32(PTLRPCS_SVC_NONE);
1050 *resp++ = cpu_to_le32(req->rq_replen);
1053 resp += req->rq_replen / 4;
1054 reslen = svcdata->reserve_len;
1056 /* gss reply: (conform to err notify format)
1057 * x, x, seq, major, minor, handle, token
1061 *resp++ = cpu_to_le32(GSS_SEQ_WIN);
1062 *resp++ = cpu_to_le32(rsip->major_status);
1063 *resp++ = cpu_to_le32(rsip->minor_status);
1065 if (rawobj_serialize(&rsip->out_handle,
1071 if (rawobj_serialize(&rsip->out_token,
1077 /* the actual sec data length */
1078 *reslenp = cpu_to_le32(svcdata->reserve_len - reslen);
1080 req->rq_reply_state->rs_repdata_len += le32_to_cpu(*reslenp);
1081 CDEBUG(D_SEC, "req %p: msgsize %d, authsize %d, "
1082 "total size %d\n", req, req->rq_replen,
1083 le32_to_cpu(*reslenp),
1084 req->rq_reply_state->rs_repdata_len);
1088 req->rq_remote_realm = rsci->remote_realm;
1089 req->rq_auth_usr_mds = rsci->auth_usr_mds;
1090 req->rq_auth_usr_oss = rsci->auth_usr_oss;
1091 req->rq_auth_uid = rsci->cred.vc_uid;
1092 req->rq_mapped_uid = rsci->mapped_uid;
1094 if (req->rq_auth_usr_mds) {
1095 CWARN("usr from %s authenticated as mds svc cred\n",
1096 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1097 req->rq_peer.peer_id.nid, nidstr));
1099 if (req->rq_auth_usr_oss) {
1100 CWARN("usr from %s authenticated as oss svc cred\n",
1101 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1102 req->rq_peer.peer_id.nid, nidstr));
1105 /* This is simplified since right now we doesn't support
1106 * INIT_CONTINUE yet.
1108 if (gc->gc_proc == RPC_GSS_PROC_INIT) {
1109 struct ptlrpcs_wire_hdr *hdr;
1111 hdr = buf_to_sec_hdr(req->rq_reqbuf);
1112 req->rq_reqmsg = buf_to_lustre_msg(req->rq_reqbuf);
1113 req->rq_reqlen = hdr->msg_len;
1120 rsc_put(&rsci->h, &rsc_cache);
1122 rsi_put(&rsip->h, &rsi_cache);
1124 rsi_put(&rsikey->h, &rsi_cache);
1130 gss_svcsec_handle_data(struct ptlrpc_request *req,
1131 struct rpc_gss_wire_cred *gc,
1132 __u32 *secdata, __u32 seclen,
1133 enum ptlrpcs_error *res)
1136 char nidstr[PTL_NALFMT_SIZE];
1141 *res = PTLRPCS_GSS_CREDPROBLEM;
1143 rsci = gss_svc_searchbyctx(&gc->gc_ctx);
1145 CWARN("Invalid gss context handle from %s\n",
1146 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1147 req->rq_peer.peer_id.nid, nidstr));
1148 major = GSS_S_NO_CONTEXT;
1152 switch (gc->gc_svc) {
1153 case PTLRPCS_GSS_SVC_INTEGRITY:
1154 major = gss_svc_verify_request(req, rsci, gc, secdata, seclen);
1155 if (major == GSS_S_COMPLETE)
1158 CWARN("fail in verify:0x%x: ctx %p@%s\n", major, rsci,
1159 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1160 req->rq_peer.peer_id.nid, nidstr));
1162 case PTLRPCS_GSS_SVC_PRIVACY:
1163 major = gss_svc_unseal_request(req, rsci, gc, secdata, seclen);
1164 if (major == GSS_S_COMPLETE)
1167 CWARN("fail in decrypt:0x%x: ctx %p@%s\n", major, rsci,
1168 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1169 req->rq_peer.peer_id.nid, nidstr));
1172 CERROR("unsupported gss service %d\n", gc->gc_svc);
1173 GOTO(out, rc = SVC_DROP);
1176 req->rq_remote_realm = rsci->remote_realm;
1177 req->rq_auth_usr_mds = rsci->auth_usr_mds;
1178 req->rq_auth_usr_oss = rsci->auth_usr_oss;
1179 req->rq_auth_uid = rsci->cred.vc_uid;
1180 req->rq_mapped_uid = rsci->mapped_uid;
1183 GOTO(out, rc = SVC_OK);
1186 if (gss_pack_err_notify(req, major, 0))
1192 rsc_put(&rsci->h, &rsc_cache);
1197 gss_svcsec_handle_destroy(struct ptlrpc_request *req,
1198 struct rpc_gss_wire_cred *gc,
1199 __u32 *secdata, __u32 seclen,
1200 enum ptlrpcs_error *res)
1202 struct gss_svc_data *svcdata = req->rq_svcsec_data;
1204 char nidstr[PTL_NALFMT_SIZE];
1209 *res = PTLRPCS_GSS_CREDPROBLEM;
1211 rsci = gss_svc_searchbyctx(&gc->gc_ctx);
1213 CWARN("invalid gss context handle for destroy.\n");
1217 if (gc->gc_svc != PTLRPCS_GSS_SVC_INTEGRITY) {
1218 CERROR("service %d is not supported in destroy.\n",
1220 GOTO(out, rc = SVC_DROP);
1223 *res = gss_svc_verify_request(req, rsci, gc, secdata, seclen);
1225 GOTO(out, rc = SVC_DROP);
1227 /* compose reply, which is actually nothing */
1228 svcdata->is_fini = 1;
1229 if (lustre_pack_reply(req, 0, NULL, NULL))
1230 GOTO(out, rc = SVC_DROP);
1232 CDEBUG(D_SEC, "svcsec destroy gss context %p(%u@%s)\n",
1233 rsci, rsci->cred.vc_uid,
1234 portals_nid2str(req->rq_peer.peer_ni->pni_number,
1235 req->rq_peer.peer_id.nid, nidstr));
1237 set_bit(CACHE_NEGATIVE, &rsci->h.flags);
1241 rsc_put(&rsci->h, &rsc_cache);
1246 * let incomming request go through security check:
1247 * o context establishment: invoke user space helper
1248 * o data exchange: verify/decrypt
1249 * o context destruction: mark context invalid
1251 * in most cases, error will result to drop the packet silently.
1254 gss_svcsec_accept(struct ptlrpc_request *req, enum ptlrpcs_error *res)
1256 struct gss_svc_data *svcdata;
1257 struct rpc_gss_wire_cred *gc;
1258 struct ptlrpcs_wire_hdr *sec_hdr;
1259 __u32 subflavor, seclen, *secdata, version;
1263 CDEBUG(D_SEC, "request %p\n", req);
1264 LASSERT(req->rq_reqbuf);
1265 LASSERT(req->rq_reqbuf_len);
1267 *res = PTLRPCS_BADCRED;
1269 sec_hdr = buf_to_sec_hdr(req->rq_reqbuf);
1270 LASSERT(SEC_FLAVOR_MAJOR(sec_hdr->flavor) == PTLRPCS_FLVR_MAJOR_GSS);
1272 seclen = req->rq_reqbuf_len - sizeof(*sec_hdr) - sec_hdr->msg_len;
1273 secdata = (__u32 *) buf_to_sec_data(req->rq_reqbuf);
1275 if (sec_hdr->sec_len > seclen) {
1276 CERROR("seclen %d, while max buf %d\n",
1277 sec_hdr->sec_len, seclen);
1281 if (seclen < 6 * 4) {
1282 CERROR("sec size %d too small\n", seclen);
1286 LASSERT(!req->rq_svcsec_data);
1287 OBD_ALLOC(svcdata, sizeof(*svcdata));
1289 CERROR("fail to alloc svcdata\n");
1292 req->rq_svcsec_data = svcdata;
1293 gc = &svcdata->clcred;
1295 /* Now secdata/seclen is what we want to parse
1297 version = le32_to_cpu(*secdata++); /* version */
1298 subflavor = le32_to_cpu(*secdata++); /* subflavor */
1299 gc->gc_proc = le32_to_cpu(*secdata++); /* proc */
1300 gc->gc_seq = le32_to_cpu(*secdata++); /* seq */
1301 gc->gc_svc = le32_to_cpu(*secdata++); /* service */
1304 CDEBUG(D_SEC, "wire gss_hdr: %u/%u/%u/%u/%u\n",
1305 version, subflavor, gc->gc_proc,
1306 gc->gc_seq, gc->gc_svc);
1308 if (version != PTLRPC_SEC_GSS_VERSION) {
1309 CERROR("gss version mismatch: %d - %d\n",
1310 version, PTLRPC_SEC_GSS_VERSION);
1311 GOTO(err_free, rc = SVC_DROP);
1314 /* We _must_ alloc new storage for gc_ctx. In case of recovery
1315 * request will be saved to delayed handling, at that time the
1316 * incoming buffer might have already been released.
1318 if (rawobj_extract_alloc(&gc->gc_ctx, &secdata, &seclen)) {
1319 CERROR("fail to obtain gss context handle\n");
1320 GOTO(err_free, rc = SVC_DROP);
1323 *res = PTLRPCS_BADVERF;
1324 switch(gc->gc_proc) {
1325 case RPC_GSS_PROC_INIT:
1326 case RPC_GSS_PROC_CONTINUE_INIT:
1327 rc = gss_svcsec_handle_init(req, gc, secdata, seclen, res);
1329 case RPC_GSS_PROC_DATA:
1330 rc = gss_svcsec_handle_data(req, gc, secdata, seclen, res);
1332 case RPC_GSS_PROC_DESTROY:
1333 rc = gss_svcsec_handle_destroy(req, gc, secdata, seclen, res);
1341 if (rc == SVC_DROP && req->rq_svcsec_data) {
1342 OBD_FREE(req->rq_svcsec_data, sizeof(struct gss_svc_data));
1343 req->rq_svcsec_data = NULL;
1350 gss_svcsec_authorize(struct ptlrpc_request *req)
1352 struct ptlrpc_reply_state *rs = req->rq_reply_state;
1353 struct gss_svc_data *gsd = (struct gss_svc_data *)req->rq_svcsec_data;
1354 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1356 struct ptlrpcs_wire_hdr *sec_hdr;
1357 rawobj_buf_t msg_buf;
1358 rawobj_t cipher_buf;
1359 __u32 *vp, *vpsave, major, vlen, seclen;
1365 LASSERT(rs->rs_repbuf);
1368 if (gsd->is_init || gsd->is_init_continue ||
1369 gsd->is_err_notify || gsd->is_fini) {
1370 /* nothing to do in these cases */
1371 CDEBUG(D_SEC, "req %p: init/fini/err\n", req);
1375 if (gc->gc_proc != RPC_GSS_PROC_DATA) {
1376 CERROR("proc %d not support\n", gc->gc_proc);
1380 rscp = gss_svc_searchbyctx(&gc->gc_ctx);
1382 CERROR("ctx %u disapeared under us\n",
1383 *((__u32 *) gc->gc_ctx.data));
1387 sec_hdr = (struct ptlrpcs_wire_hdr *) rs->rs_repbuf;
1388 switch (gc->gc_svc) {
1389 case PTLRPCS_GSS_SVC_INTEGRITY:
1390 /* prepare various pointers */
1391 lmsg.len = req->rq_replen;
1392 lmsg.data = (__u8 *) (rs->rs_repbuf + sizeof(*sec_hdr));
1393 vp = (__u32 *) (lmsg.data + lmsg.len);
1394 vlen = rs->rs_repbuf_len - sizeof(*sec_hdr) - lmsg.len;
1397 sec_hdr->flavor = cpu_to_le32(PTLRPCS_FLVR_GSS_AUTH);
1398 sec_hdr->msg_len = cpu_to_le32(req->rq_replen);
1400 /* standard gss hdr */
1401 LASSERT(vlen >= 7 * 4);
1402 *vp++ = cpu_to_le32(PTLRPC_SEC_GSS_VERSION);
1403 *vp++ = cpu_to_le32(PTLRPCS_FLVR_KRB5I);
1404 *vp++ = cpu_to_le32(RPC_GSS_PROC_DATA);
1405 *vp++ = cpu_to_le32(gc->gc_seq);
1406 *vp++ = cpu_to_le32(PTLRPCS_GSS_SVC_INTEGRITY);
1407 *vp++ = 0; /* fake ctx handle */
1408 vpsave = vp++; /* reserve size */
1412 mic.data = (unsigned char *)vp;
1414 major = kgss_get_mic(rscp->mechctx, 0, &lmsg, &mic);
1416 CERROR("fail to get MIC: 0x%x\n", major);
1417 GOTO(out, ret = -EINVAL);
1419 *vpsave = cpu_to_le32(mic.len);
1420 seclen = seclen - vlen + mic.len;
1421 sec_hdr->sec_len = cpu_to_le32(seclen);
1422 rs->rs_repdata_len += size_round(seclen);
1424 case PTLRPCS_GSS_SVC_PRIVACY:
1425 vp = (__u32 *) (rs->rs_repbuf + sizeof(*sec_hdr));
1426 vlen = rs->rs_repbuf_len - sizeof(*sec_hdr);
1429 sec_hdr->flavor = cpu_to_le32(PTLRPCS_FLVR_GSS_PRIV);
1430 sec_hdr->msg_len = cpu_to_le32(0);
1432 /* standard gss hdr */
1433 LASSERT(vlen >= 7 * 4);
1434 *vp++ = cpu_to_le32(PTLRPC_SEC_GSS_VERSION);
1435 *vp++ = cpu_to_le32(PTLRPCS_FLVR_KRB5I);
1436 *vp++ = cpu_to_le32(RPC_GSS_PROC_DATA);
1437 *vp++ = cpu_to_le32(gc->gc_seq);
1438 *vp++ = cpu_to_le32(PTLRPCS_GSS_SVC_PRIVACY);
1439 *vp++ = 0; /* fake ctx handle */
1440 vpsave = vp++; /* reserve size */
1443 msg_buf.buf = (__u8 *) rs->rs_msg - GSS_PRIVBUF_PREFIX_LEN;
1444 msg_buf.buflen = req->rq_replen + GSS_PRIVBUF_PREFIX_LEN +
1445 GSS_PRIVBUF_SUFFIX_LEN;
1446 msg_buf.dataoff = GSS_PRIVBUF_PREFIX_LEN;
1447 msg_buf.datalen = req->rq_replen;
1449 cipher_buf.data = (__u8 *) vp;
1450 cipher_buf.len = vlen;
1452 major = kgss_wrap(rscp->mechctx, GSS_C_QOP_DEFAULT,
1453 &msg_buf, &cipher_buf);
1455 CERROR("failed to wrap: 0x%x\n", major);
1456 GOTO(out, ret = -EINVAL);
1459 *vpsave = cpu_to_le32(cipher_buf.len);
1460 seclen = seclen - vlen + cipher_buf.len;
1461 sec_hdr->sec_len = cpu_to_le32(seclen);
1462 rs->rs_repdata_len += size_round(seclen);
1465 CERROR("Unknown service %d\n", gc->gc_svc);
1466 GOTO(out, ret = -EINVAL);
1470 rsc_put(&rscp->h, &rsc_cache);
1476 void gss_svcsec_cleanup_req(struct ptlrpc_svcsec *svcsec,
1477 struct ptlrpc_request *req)
1479 struct gss_svc_data *gsd = (struct gss_svc_data *) req->rq_svcsec_data;
1482 CDEBUG(D_SEC, "no svc_data present. do nothing\n");
1486 /* gc_ctx is allocated, see gss_svcsec_accept() */
1487 rawobj_free(&gsd->clcred.gc_ctx);
1489 OBD_FREE(gsd, sizeof(*gsd));
1490 req->rq_svcsec_data = NULL;
1495 int gss_svcsec_est_payload(struct ptlrpc_svcsec *svcsec,
1496 struct ptlrpc_request *req,
1499 struct gss_svc_data *svcdata = req->rq_svcsec_data;
1502 /* just return the pre-set reserve_len for init/fini/err cases.
1505 if (svcdata->is_init) {
1506 CDEBUG(D_SEC, "is_init, reserver size %d(%d)\n",
1507 size_round(svcdata->reserve_len),
1508 svcdata->reserve_len);
1509 LASSERT(svcdata->reserve_len);
1510 LASSERT(svcdata->reserve_len % 4 == 0);
1511 RETURN(size_round(svcdata->reserve_len));
1512 } else if (svcdata->is_err_notify) {
1513 CDEBUG(D_SEC, "is_err_notify, reserver size %d(%d)\n",
1514 size_round(svcdata->reserve_len),
1515 svcdata->reserve_len);
1516 RETURN(size_round(svcdata->reserve_len));
1517 } else if (svcdata->is_fini) {
1518 CDEBUG(D_SEC, "is_fini, reserver size 0\n");
1521 if (svcdata->clcred.gc_svc == PTLRPCS_GSS_SVC_NONE ||
1522 svcdata->clcred.gc_svc == PTLRPCS_GSS_SVC_INTEGRITY)
1523 RETURN(size_round(GSS_MAX_AUTH_PAYLOAD));
1524 else if (svcdata->clcred.gc_svc == PTLRPCS_GSS_SVC_PRIVACY)
1525 RETURN(size_round16(GSS_MAX_AUTH_PAYLOAD + msgsize +
1526 GSS_PRIVBUF_PREFIX_LEN +
1527 GSS_PRIVBUF_SUFFIX_LEN));
1529 CERROR("unknown gss svc %u\n", svcdata->clcred.gc_svc);
1537 int gss_svcsec_alloc_repbuf(struct ptlrpc_svcsec *svcsec,
1538 struct ptlrpc_request *req,
1541 struct gss_svc_data *gsd = (struct gss_svc_data *) req->rq_svcsec_data;
1542 struct ptlrpc_reply_state *rs;
1543 int msg_payload, sec_payload;
1547 /* determine the security type: none/auth or priv, we have
1548 * different pack scheme for them.
1549 * init/fini/err will always be treated as none/auth.
1552 if (!gsd->is_init && !gsd->is_init_continue &&
1553 !gsd->is_fini && !gsd->is_err_notify &&
1554 gsd->clcred.gc_svc == PTLRPCS_GSS_SVC_PRIVACY)
1559 msg_payload = privacy ? 0 : msgsize;
1560 sec_payload = gss_svcsec_est_payload(svcsec, req, msgsize);
1562 rc = svcsec_alloc_reply_state(req, msg_payload, sec_payload);
1566 rs = req->rq_reply_state;
1568 rs->rs_msg_len = msgsize;
1571 /* we can choose to let msg simply point to the rear of the
1572 * buffer, which lead to buffer overlap when doing encryption.
1573 * usually it's ok and it indeed passed all existing tests.
1574 * but not sure if there will be subtle problems in the future.
1575 * so right now we choose to alloc another new buffer. we'll
1579 rs->rs_msg = (struct lustre_msg *)
1580 (rs->rs_repbuf + rs->rs_repbuf_len -
1581 msgsize - GSS_PRIVBUF_SUFFIX_LEN);
1585 msgsize += GSS_PRIVBUF_PREFIX_LEN + GSS_PRIVBUF_SUFFIX_LEN;
1586 OBD_ALLOC(msgbuf, msgsize);
1588 CERROR("can't alloc %d\n", msgsize);
1589 svcsec_free_reply_state(rs);
1590 req->rq_reply_state = NULL;
1593 rs->rs_msg = (struct lustre_msg *)
1594 (msgbuf + GSS_PRIVBUF_PREFIX_LEN);
1597 req->rq_repmsg = rs->rs_msg;
1603 void gss_svcsec_free_repbuf(struct ptlrpc_svcsec *svcsec,
1604 struct ptlrpc_reply_state *rs)
1606 unsigned long p1 = (unsigned long) rs->rs_msg;
1607 unsigned long p2 = (unsigned long) rs->rs_buf;
1609 LASSERT(rs->rs_buf);
1610 LASSERT(rs->rs_msg);
1611 LASSERT(rs->rs_msg_len);
1613 if (p1 < p2 || p1 >= p2 + rs->rs_buf_len) {
1614 char *start = (char*) rs->rs_msg - GSS_PRIVBUF_PREFIX_LEN;
1615 int size = rs->rs_msg_len + GSS_PRIVBUF_PREFIX_LEN +
1616 GSS_PRIVBUF_SUFFIX_LEN;
1617 OBD_FREE(start, size);
1620 svcsec_free_reply_state(rs);
1623 struct ptlrpc_svcsec svcsec_gss = {
1624 .pss_owner = THIS_MODULE,
1625 .pss_name = "svcsec.gss",
1626 .pss_flavor = PTLRPCS_FLVR_MAJOR_GSS,
1627 .accept = gss_svcsec_accept,
1628 .authorize = gss_svcsec_authorize,
1629 .alloc_repbuf = gss_svcsec_alloc_repbuf,
1630 .free_repbuf = gss_svcsec_free_repbuf,
1631 .cleanup_req = gss_svcsec_cleanup_req,
1635 void lgss_svc_cache_purge_all(void)
1637 cache_purge(&rsi_cache);
1638 cache_purge(&rsc_cache);
1640 EXPORT_SYMBOL(lgss_svc_cache_purge_all);
1642 void lgss_svc_cache_flush(__u32 uid)
1646 EXPORT_SYMBOL(lgss_svc_cache_flush);
1648 int gss_svc_init(void)
1652 rc = svcsec_register(&svcsec_gss);
1654 cache_register(&rsc_cache);
1655 cache_register(&rsi_cache);
1660 void gss_svc_exit(void)
1664 /* XXX rsi didn't take module refcount. without really
1665 * cleanup it we can't simply go, later user-space operations
1666 * will certainly cause oops.
1667 * use space might slow or stuck on something, wait it for
1668 * a bit -- bad hack.
1670 while ((rc = cache_unregister(&rsi_cache))) {
1671 CERROR("unregister rsi cache: %d. Try again\n", rc);
1672 schedule_timeout(2 * HZ);
1673 cache_purge(&rsi_cache);
1676 if ((rc = cache_unregister(&rsc_cache)))
1677 CERROR("unregister rsc cache: %d\n", rc);
1678 if ((rc = svcsec_unregister(&svcsec_gss)))
1679 CERROR("unregister svcsec_gss: %d\n", rc);