Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / ptlrpc / gss / sec_gss.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Modifications for Lustre
5  * Copyright 2004 - 2007, Cluster File Systems, Inc.
6  * All rights reserved
7  * Author: Eric Mei <ericm@clusterfs.com>
8  */
9
10 /*
11  * linux/net/sunrpc/auth_gss.c
12  *
13  * RPCSEC_GSS client authentication.
14  *
15  *  Copyright (c) 2000 The Regents of the University of Michigan.
16  *  All rights reserved.
17  *
18  *  Dug Song       <dugsong@monkey.org>
19  *  Andy Adamson   <andros@umich.edu>
20  *
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *  1. Redistributions of source code must retain the above copyright
26  *     notice, this list of conditions and the following disclaimer.
27  *  2. Redistributions in binary form must reproduce the above copyright
28  *     notice, this list of conditions and the following disclaimer in the
29  *     documentation and/or other materials provided with the distribution.
30  *  3. Neither the name of the University nor the names of its
31  *     contributors may be used to endorse or promote products derived
32  *     from this software without specific prior written permission.
33  *
34  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
35  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
36  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
37  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
38  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
39  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
40  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
41  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
42  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
43  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
44  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 #ifndef EXPORT_SYMTAB
49 # define EXPORT_SYMTAB
50 #endif
51 #define DEBUG_SUBSYSTEM S_SEC
52 #ifdef __KERNEL__
53 #include <linux/init.h>
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/dcache.h>
57 #include <linux/fs.h>
58 #include <linux/random.h>
59 #include <linux/mutex.h>
60 #include <asm/atomic.h>
61 #else
62 #include <liblustre.h>
63 #endif
64
65 #include <obd.h>
66 #include <obd_class.h>
67 #include <obd_support.h>
68 #include <lustre/lustre_idl.h>
69 #include <lustre_net.h>
70 #include <lustre_import.h>
71 #include <lustre_sec.h>
72
73 #include "gss_err.h"
74 #include "gss_internal.h"
75 #include "gss_api.h"
76
77 #include <linux/crypto.h>
78
79 /********************************************
80  * wire data swabber                        *
81  ********************************************/
82
83 static
84 void gss_header_swabber(struct gss_header *ghdr)
85 {
86         __swab32s(&ghdr->gh_version);
87         __swab32s(&ghdr->gh_flags);
88         __swab32s(&ghdr->gh_proc);
89         __swab32s(&ghdr->gh_seq);
90         __swab32s(&ghdr->gh_svc);
91         __swab32s(&ghdr->gh_pad1);
92         __swab32s(&ghdr->gh_pad2);
93         __swab32s(&ghdr->gh_pad3);
94         __swab32s(&ghdr->gh_handle.len);
95 }
96
97 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment)
98 {
99         struct gss_header *ghdr;
100
101         ghdr = lustre_swab_buf(msg, segment, sizeof(*ghdr),
102                                gss_header_swabber);
103
104         if (ghdr &&
105             sizeof(*ghdr) + ghdr->gh_handle.len > msg->lm_buflens[segment]) {
106                 CERROR("gss header require length %u, now %u received\n",
107                        (unsigned int) sizeof(*ghdr) + ghdr->gh_handle.len,
108                        msg->lm_buflens[segment]);
109                 return NULL;
110         }
111
112         return ghdr;
113 }
114
115 static
116 void gss_netobj_swabber(netobj_t *obj)
117 {
118         __swab32s(&obj->len);
119 }
120
121 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
122 {
123         netobj_t  *obj;
124
125         obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
126         if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
127                 CERROR("netobj require length %u but only %u received\n",
128                        (unsigned int) sizeof(*obj) + obj->len,
129                        msg->lm_buflens[segment]);
130                 return NULL;
131         }
132
133         return obj;
134 }
135
136 /*
137  * payload should be obtained from mechanism. but currently since we
138  * only support kerberos, we could simply use fixed value.
139  * krb5 header:         16
140  * krb5 checksum:       20
141  */
142 #define GSS_KRB5_INTEG_MAX_PAYLOAD      (40)
143
144 static inline
145 int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy)
146 {
147         if (privacy) {
148                 /* we suppose max cipher block size is 16 bytes. here we
149                  * add 16 for confounder and 16 for padding.
150                  */
151                 return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
152         } else {
153                 return GSS_KRB5_INTEG_MAX_PAYLOAD;
154         }
155 }
156
157 /*
158  * return signature size, otherwise < 0 to indicate error
159  */
160 static
161 int gss_sign_msg(struct lustre_msg *msg,
162                  struct gss_ctx *mechctx,
163                  __u32 proc, __u32 seq,
164                  rawobj_t *handle)
165 {
166         struct gss_header      *ghdr;
167         rawobj_t                text[3], mic;
168         int                     textcnt, mic_idx = msg->lm_bufcount - 1;
169         __u32                   major;
170
171         LASSERT(msg->lm_bufcount >= 3);
172
173         /* gss hdr */
174         LASSERT(msg->lm_buflens[0] >=
175                 sizeof(*ghdr) + (handle ? handle->len : 0));
176         ghdr = lustre_msg_buf(msg, 0, 0);
177
178         ghdr->gh_version = PTLRPC_GSS_VERSION;
179         ghdr->gh_flags = 0;
180         ghdr->gh_proc = proc;
181         ghdr->gh_seq = seq;
182         ghdr->gh_svc = PTLRPC_GSS_SVC_INTEGRITY;
183         if (!handle) {
184                 /* fill in a fake one */
185                 ghdr->gh_handle.len = 0;
186         } else {
187                 ghdr->gh_handle.len = handle->len;
188                 memcpy(ghdr->gh_handle.data, handle->data, handle->len);
189         }
190
191         /* MIC */
192         for (textcnt = 0; textcnt < mic_idx; textcnt++) {
193                 text[textcnt].len = msg->lm_buflens[textcnt];
194                 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
195         }
196
197         mic.len = msg->lm_buflens[mic_idx];
198         mic.data = lustre_msg_buf(msg, mic_idx, 0);
199
200         major = lgss_get_mic(mechctx, textcnt, text, &mic);
201         if (major != GSS_S_COMPLETE) {
202                 CERROR("fail to generate MIC: %08x\n", major);
203                 return -EPERM;
204         }
205         LASSERT(mic.len <= msg->lm_buflens[mic_idx]);
206
207         return lustre_shrink_msg(msg, mic_idx, mic.len, 0);
208 }
209
210 /*
211  * return gss error
212  */
213 static
214 __u32 gss_verify_msg(struct lustre_msg *msg,
215                    struct gss_ctx *mechctx)
216 {
217         rawobj_t         text[3];
218         rawobj_t         mic;
219         int              textcnt, mic_idx = msg->lm_bufcount - 1;
220         __u32            major;
221
222         for (textcnt = 0; textcnt < mic_idx; textcnt++) {
223                 text[textcnt].len = msg->lm_buflens[textcnt];
224                 text[textcnt].data = lustre_msg_buf(msg, textcnt, 0);
225         }
226
227         mic.len = msg->lm_buflens[mic_idx];
228         mic.data = lustre_msg_buf(msg, mic_idx, 0);
229
230         major = lgss_verify_mic(mechctx, textcnt, text, &mic);
231         if (major != GSS_S_COMPLETE)
232                 CERROR("mic verify error: %08x\n", major);
233
234         return major;
235 }
236
237 /*
238  * return gss error code
239  */
240 static
241 __u32 gss_unseal_msg(struct gss_ctx *mechctx,
242                    struct lustre_msg *msgbuf,
243                    int *msg_len, int msgbuf_len)
244 {
245         rawobj_t                 clear_obj, micobj, msgobj, token;
246         __u8                    *clear_buf;
247         int                      clear_buflen;
248         __u32                    major;
249         ENTRY;
250
251         if (msgbuf->lm_bufcount != 3) {
252                 CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
253                 RETURN(GSS_S_FAILURE);
254         }
255
256         /* verify gss header */
257         msgobj.len = msgbuf->lm_buflens[0];
258         msgobj.data = lustre_msg_buf(msgbuf, 0, 0);
259         micobj.len = msgbuf->lm_buflens[1];
260         micobj.data = lustre_msg_buf(msgbuf, 1, 0);
261
262         major = lgss_verify_mic(mechctx, 1, &msgobj, &micobj);
263         if (major != GSS_S_COMPLETE) {
264                 CERROR("priv: mic verify error: %08x\n", major);
265                 RETURN(major);
266         }
267
268         /* temporary clear text buffer */
269         clear_buflen = msgbuf->lm_buflens[2];
270         OBD_ALLOC(clear_buf, clear_buflen);
271         if (!clear_buf)
272                 RETURN(GSS_S_FAILURE);
273
274         token.len = msgbuf->lm_buflens[2];
275         token.data = lustre_msg_buf(msgbuf, 2, 0);
276
277         clear_obj.len = clear_buflen;
278         clear_obj.data = clear_buf;
279
280         major = lgss_unwrap(mechctx, &token, &clear_obj);
281         if (major != GSS_S_COMPLETE) {
282                 CERROR("priv: unwrap message error: %08x\n", major);
283                 GOTO(out_free, major = GSS_S_FAILURE);
284         }
285         LASSERT(clear_obj.len <= clear_buflen);
286
287         /* now the decrypted message */
288         memcpy(msgbuf, clear_obj.data, clear_obj.len);
289         *msg_len = clear_obj.len;
290
291         major = GSS_S_COMPLETE;
292 out_free:
293         OBD_FREE(clear_buf, clear_buflen);
294         RETURN(major);
295 }
296
297 /********************************************
298  * gss client context manipulation helpers  *
299  ********************************************/
300
301 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
302 {
303         LASSERT(atomic_read(&ctx->cc_refcount));
304
305         if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
306                 cfs_time_t now;
307
308                 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
309
310                 now = cfs_time_current_sec();
311                 if (ctx->cc_expire && cfs_time_aftereq(now, ctx->cc_expire))
312                         CWARN("ctx %p(%u->%s): get expired (%lds exceeds)\n",
313                               ctx, ctx->cc_vcred.vc_uid,
314                               sec2target_str(ctx->cc_sec),
315                               cfs_time_sub(now, ctx->cc_expire));
316                 else
317                         CWARN("ctx %p(%u->%s): force to die (%lds remains)\n",
318                               ctx, ctx->cc_vcred.vc_uid,
319                               sec2target_str(ctx->cc_sec),
320                               ctx->cc_expire == 0 ? 0 :
321                               cfs_time_sub(ctx->cc_expire, now));
322
323                 return 1;
324         }
325         return 0;
326 }
327
328 /*
329  * return 1 if the context is dead.
330  */
331 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx)
332 {
333         if (unlikely(cli_ctx_is_dead(ctx)))
334                 return 1;
335
336         /* expire is 0 means never expire. a newly created gss context
337          * which during upcall may has 0 expiration
338          */
339         if (ctx->cc_expire == 0)
340                 return 0;
341
342         /* check real expiration */
343         if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
344                 return 0;
345
346         cli_ctx_expire(ctx);
347         return 1;
348 }
349
350 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
351 {
352         struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
353         unsigned long ctx_expiry;
354
355         if (lgss_inquire_context(gctx->gc_mechctx, &ctx_expiry)) {
356                 CERROR("ctx %p(%u): unable to inquire, expire it now\n",
357                        gctx, ctx->cc_vcred.vc_uid);
358                 ctx_expiry = 1; /* make it expired now */
359         }
360
361         ctx->cc_expire = gss_round_ctx_expiry(ctx_expiry,
362                                               ctx->cc_sec->ps_flags);
363
364         /* At this point this ctx might have been marked as dead by
365          * someone else, in which case nobody will make further use
366          * of it. we don't care, and mark it UPTODATE will help
367          * destroying server side context when it be destroied.
368          */
369         set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
370
371         CWARN("%s ctx %p(%u->%s), will expire at %lu(%lds lifetime)\n",
372               (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE ?
373                "server installed reverse" : "client refreshed"),
374               ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
375               ctx->cc_expire, (long) (ctx->cc_expire - get_seconds()));
376 }
377
378 static
379 void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
380 {
381         if (gctx->gc_mechctx)
382                 lgss_delete_sec_context(&gctx->gc_mechctx);
383
384         rawobj_free(&gctx->gc_handle);
385 }
386
387 /*
388  * Based on sequence number algorithm as specified in RFC 2203.
389  *
390  * modified for our own problem: arriving request has valid sequence number,
391  * but unwrapping request might cost a long time, after that its sequence
392  * are not valid anymore (fall behind the window). It rarely happen, mostly
393  * under extreme load.
394  *
395  * note we should not check sequence before verify the integrity of incoming
396  * request, because just one attacking request with high sequence number might
397  * cause all following request be dropped.
398  *
399  * so here we use a multi-phase approach: prepare 2 sequence windows,
400  * "main window" for normal sequence and "back window" for fall behind sequence.
401  * and 3-phase checking mechanism:
402  *  0 - before integrity verification, perform a initial sequence checking in
403  *      main window, which only try and don't actually set any bits. if the
404  *      sequence is high above the window or fit in the window and the bit
405  *      is 0, then accept and proceed to integrity verification. otherwise
406  *      reject this sequence.
407  *  1 - after integrity verification, check in main window again. if this
408  *      sequence is high above the window or fit in the window and the bit
409  *      is 0, then set the bit and accept; if it fit in the window but bit
410  *      already set, then reject; if it fall behind the window, then proceed
411  *      to phase 2.
412  *  2 - check in back window. if it is high above the window or fit in the
413  *      window and the bit is 0, then set the bit and accept. otherwise reject.
414  *
415  * return value:
416  *   1: looks like a replay
417  *   0: is ok
418  *  -1: is a replay
419  *
420  * note phase 0 is necessary, because otherwise replay attacking request of
421  * sequence which between the 2 windows can't be detected.
422  *
423  * this mechanism can't totally solve the problem, but could help much less
424  * number of valid requests be dropped.
425  */
426 static
427 int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
428                      __u32 seq_num, int phase)
429 {
430         LASSERT(phase >= 0 && phase <= 2);
431
432         if (seq_num > *max_seq) {
433                 /*
434                  * 1. high above the window
435                  */
436                 if (phase == 0)
437                         return 0;
438
439                 if (seq_num >= *max_seq + win_size) {
440                         memset(window, 0, win_size / 8);
441                         *max_seq = seq_num;
442                 } else {
443                         while(*max_seq < seq_num) {
444                                 (*max_seq)++;
445                                 __clear_bit((*max_seq) % win_size, window);
446                         }
447                 }
448                 __set_bit(seq_num % win_size, window);
449         } else if (seq_num + win_size <= *max_seq) {
450                 /*
451                  * 2. low behind the window
452                  */
453                 if (phase == 0 || phase == 2)
454                         goto replay;
455
456                 CWARN("seq %u is %u behind (size %d), check backup window\n",
457                       seq_num, *max_seq - win_size - seq_num, win_size);
458                 return 1;
459         } else {
460                 /*
461                  * 3. fit into the window
462                  */
463                 switch (phase) {
464                 case 0:
465                         if (test_bit(seq_num % win_size, window))
466                                 goto replay;
467                         break;
468                 case 1:
469                 case 2:
470                      if (__test_and_set_bit(seq_num % win_size, window))
471                                 goto replay;
472                         break;
473                 }
474         }
475
476         return 0;
477
478 replay:
479         CERROR("seq %u (%s %s window) is a replay: max %u, winsize %d\n",
480                seq_num,
481                seq_num + win_size > *max_seq ? "in" : "behind",
482                phase == 2 ? "backup " : "main",
483                *max_seq, win_size);
484         return -1;
485 }
486
487 /*
488  * Based on sequence number algorithm as specified in RFC 2203.
489  *
490  * if @set == 0: initial check, don't set any bit in window
491  * if @sec == 1: final check, set bit in window
492  */
493 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
494 {
495         int rc = 0;
496
497         spin_lock(&ssd->ssd_lock);
498
499         if (set == 0) {
500                 /*
501                  * phase 0 testing
502                  */
503                 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
504                                       &ssd->ssd_max_main, seq_num, 0);
505                 if (unlikely(rc))
506                         gss_stat_oos_record_svc(0, 1);
507         } else {
508                 /*
509                  * phase 1 checking main window
510                  */
511                 rc = gss_do_check_seq(ssd->ssd_win_main, GSS_SEQ_WIN_MAIN,
512                                       &ssd->ssd_max_main, seq_num, 1);
513                 switch (rc) {
514                 case -1:
515                         gss_stat_oos_record_svc(1, 1);
516                         /* fall through */
517                 case 0:
518                         goto exit;
519                 }
520                 /*
521                  * phase 2 checking back window
522                  */
523                 rc = gss_do_check_seq(ssd->ssd_win_back, GSS_SEQ_WIN_BACK,
524                                       &ssd->ssd_max_back, seq_num, 2);
525                 if (rc)
526                         gss_stat_oos_record_svc(2, 1);
527                 else
528                         gss_stat_oos_record_svc(2, 0);
529         }
530 exit:
531         spin_unlock(&ssd->ssd_lock);
532         return rc;
533 }
534
535 /***************************************
536  * cred APIs                           *
537  ***************************************/
538
539 static inline
540 int gss_cli_payload(struct ptlrpc_cli_ctx *ctx,
541                     int msgsize, int privacy)
542 {
543         return gss_estimate_payload(NULL, msgsize, privacy);
544 }
545
546 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
547 {
548         return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
549 }
550
551 static
552 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize)
553 {
554         buf[0] = '\0';
555
556         if (flags & PTLRPC_CTX_UPTODATE)
557                 strncat(buf, "uptodate,", bufsize);
558         if (flags & PTLRPC_CTX_DEAD)
559                 strncat(buf, "dead,", bufsize);
560         if (flags & PTLRPC_CTX_ERROR)
561                 strncat(buf, "error,", bufsize);
562         if (flags & PTLRPC_CTX_CACHED)
563                 strncat(buf, "cached,", bufsize);
564         if (flags & PTLRPC_CTX_ETERNAL)
565                 strncat(buf, "eternal,", bufsize);
566         if (buf[0] == '\0')
567                 strncat(buf, "-,", bufsize);
568
569         buf[strlen(buf) - 1] = '\0';
570 }
571
572 int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
573 {
574         struct gss_cli_ctx     *gctx;
575         char                    flags_str[40];
576         int                     written;
577
578         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
579
580         gss_cli_ctx_flags2str(ctx->cc_flags, flags_str, sizeof(flags_str));
581
582         written = snprintf(buf, bufsize,
583                         "UID %d:\n" 
584                         "  flags:       %s\n"
585                         "  seqwin:      %d\n"
586                         "  sequence:    %d\n",
587                         ctx->cc_vcred.vc_uid,
588                         flags_str,
589                         gctx->gc_win,
590                         atomic_read(&gctx->gc_seq));
591
592         if (gctx->gc_mechctx) {
593                 written += lgss_display(gctx->gc_mechctx,
594                                         buf + written, bufsize - written);
595         }
596
597         return written;
598 }
599
600 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
601                      struct ptlrpc_request *req)
602 {
603         struct gss_cli_ctx      *gctx;
604         __u32                    seq;
605         int                      rc;
606         ENTRY;
607
608         LASSERT(req->rq_reqbuf);
609         LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
610         LASSERT(req->rq_cli_ctx == ctx);
611
612         /* nothing to do for context negotiation RPCs */
613         if (req->rq_ctx_init)
614                 RETURN(0);
615
616         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
617 redo:
618         seq = atomic_inc_return(&gctx->gc_seq);
619
620         rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx,
621                           gctx->gc_proc, seq, &gctx->gc_handle);
622         if (rc < 0)
623                 RETURN(rc);
624
625         /* gss_sign_msg() msg might take long time to finish, in which period
626          * more rpcs could be wrapped up and sent out. if we found too many
627          * of them we should repack this rpc, because sent it too late might
628          * lead to the sequence number fall behind the window on server and
629          * be dropped. also applies to gss_cli_ctx_seal().
630          */
631         if (atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
632                 int behind = atomic_read(&gctx->gc_seq) - seq;
633
634                 gss_stat_oos_record_cli(behind);
635                 CWARN("req %p: %u behind, retry signing\n", req, behind);
636                 goto redo;
637         }
638
639         req->rq_reqdata_len = rc;
640         RETURN(0);
641 }
642
643 static
644 int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx,
645                                   struct ptlrpc_request *req,
646                                   struct gss_header *ghdr)
647 {
648         struct gss_err_header *errhdr;
649         int rc;
650
651         LASSERT(ghdr->gh_proc == PTLRPC_GSS_PROC_ERR);
652
653         errhdr = (struct gss_err_header *) ghdr;
654
655         /* server return NO_CONTEXT might be caused by context expire
656          * or server reboot/failover. we refresh the cred transparently
657          * to upper layer.
658          * In some cases, our gss handle is possible to be incidentally
659          * identical to another handle since the handle itself is not
660          * fully random. In krb5 case, the GSS_S_BAD_SIG will be
661          * returned, maybe other gss error for other mechanism.
662          *
663          * if we add new mechanism, make sure the correct error are
664          * returned in this case.
665          *
666          * but in any cases, don't resend ctx destroying rpc, don't resend
667          * reverse rpc.
668          */
669         if (req->rq_ctx_fini) {
670                 CWARN("server respond error (%08x/%08x) for ctx fini\n",
671                       errhdr->gh_major, errhdr->gh_minor);
672                 rc = -EINVAL;
673         } else if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
674                 CWARN("reverse server respond error (%08x/%08x)\n",
675                       errhdr->gh_major, errhdr->gh_minor);
676                 rc = -EINVAL;
677         } else if (errhdr->gh_major == GSS_S_NO_CONTEXT ||
678                    errhdr->gh_major == GSS_S_BAD_SIG) {
679                 CWARN("req x"LPU64"/t"LPU64": server respond ctx %p(%u->%s) "
680                       "%s, server might lost the context.\n",
681                       req->rq_xid, req->rq_transno, ctx, ctx->cc_vcred.vc_uid,
682                       sec2target_str(ctx->cc_sec),
683                       errhdr->gh_major == GSS_S_NO_CONTEXT ?
684                       "NO_CONTEXT" : "BAD_SIG");
685
686                 sptlrpc_cli_ctx_expire(ctx);
687                 /*
688                  * we need replace the ctx right here, otherwise during
689                  * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
690                  * which keep the ctx with RESEND flag, thus we'll never
691                  * get rid of this ctx.
692                  */
693                 rc = sptlrpc_req_replace_dead_ctx(req);
694                 if (rc == 0)
695                         req->rq_resend = 1;
696         } else {
697                 CERROR("req %p: server report gss error (%x/%x)\n",
698                         req, errhdr->gh_major, errhdr->gh_minor);
699                 rc = -EACCES;
700         }
701
702         return rc;
703 }
704
705 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
706                        struct ptlrpc_request *req)
707 {
708         struct gss_cli_ctx     *gctx;
709         struct gss_header      *ghdr, *reqhdr;
710         struct lustre_msg      *msg = req->rq_repbuf;
711         __u32                   major;
712         int                     rc = 0;
713         ENTRY;
714
715         LASSERT(req->rq_cli_ctx == ctx);
716         LASSERT(msg);
717
718         req->rq_repdata_len = req->rq_nob_received;
719         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
720
721         /* special case for context negotiation, rq_repmsg/rq_replen actually
722          * are not used currently.
723          */
724         if (req->rq_ctx_init) {
725                 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
726                 req->rq_replen = msg->lm_buflens[1];
727                 RETURN(0);
728         }
729
730         if (msg->lm_bufcount < 3 || msg->lm_bufcount > 4) {
731                 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
732                 RETURN(-EPROTO);
733         }
734
735         ghdr = gss_swab_header(msg, 0);
736         if (ghdr == NULL) {
737                 CERROR("can't decode gss header\n");
738                 RETURN(-EPROTO);
739         }
740
741         /* sanity checks */
742         reqhdr = lustre_msg_buf(msg, 0, sizeof(*reqhdr));
743         LASSERT(reqhdr);
744
745         if (ghdr->gh_version != reqhdr->gh_version) {
746                 CERROR("gss version %u mismatch, expect %u\n",
747                        ghdr->gh_version, reqhdr->gh_version);
748                 RETURN(-EPROTO);
749         }
750
751         switch (ghdr->gh_proc) {
752         case PTLRPC_GSS_PROC_DATA:
753                 if (ghdr->gh_seq != reqhdr->gh_seq) {
754                         CERROR("seqnum %u mismatch, expect %u\n",
755                                ghdr->gh_seq, reqhdr->gh_seq);
756                         RETURN(-EPROTO);
757                 }
758
759                 if (ghdr->gh_svc != PTLRPC_GSS_SVC_INTEGRITY) {
760                         CERROR("unexpected svc %d\n", ghdr->gh_svc);
761                         RETURN(-EPROTO);
762                 }
763
764                 if (lustre_msg_swabbed(msg))
765                         gss_header_swabber(ghdr);
766
767                 major = gss_verify_msg(msg, gctx->gc_mechctx);
768                 if (major != GSS_S_COMPLETE)
769                         RETURN(-EPERM);
770
771                 req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
772                 req->rq_replen = msg->lm_buflens[1];
773
774                 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
775                         if (msg->lm_bufcount < 4) {
776                                 CERROR("Invalid reply bufcount %u\n",
777                                        msg->lm_bufcount);
778                                 RETURN(-EPROTO);
779                         }
780
781                         /* bulk checksum is the second last segment */
782                         rc = bulk_sec_desc_unpack(msg, msg->lm_bufcount - 2);
783                 }
784                 break;
785         case PTLRPC_GSS_PROC_ERR:
786                 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
787                 break;
788         default:
789                 CERROR("unknown gss proc %d\n", ghdr->gh_proc);
790                 rc = -EPROTO;
791         }
792
793         RETURN(rc);
794 }
795
796 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
797                      struct ptlrpc_request *req)
798 {
799         struct gss_cli_ctx      *gctx;
800         rawobj_t                 msgobj, cipher_obj, micobj;
801         struct gss_header       *ghdr;
802         int                      buflens[3], wiresize, rc;
803         __u32                    major;
804         ENTRY;
805
806         LASSERT(req->rq_clrbuf);
807         LASSERT(req->rq_cli_ctx == ctx);
808         LASSERT(req->rq_reqlen);
809
810         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
811
812         /* close clear data length */
813         req->rq_clrdata_len = lustre_msg_size_v2(req->rq_clrbuf->lm_bufcount,
814                                                  req->rq_clrbuf->lm_buflens);
815
816         /* calculate wire data length */
817         buflens[0] = PTLRPC_GSS_HEADER_SIZE;
818         buflens[1] = gss_cli_payload(&gctx->gc_base, buflens[0], 0);
819         buflens[2] = gss_cli_payload(&gctx->gc_base, req->rq_clrdata_len, 1);
820         wiresize = lustre_msg_size_v2(3, buflens);
821
822         /* allocate wire buffer */
823         if (req->rq_pool) {
824                 /* pre-allocated */
825                 LASSERT(req->rq_reqbuf);
826                 LASSERT(req->rq_reqbuf != req->rq_clrbuf);
827                 LASSERT(req->rq_reqbuf_len >= wiresize);
828         } else {
829                 OBD_ALLOC(req->rq_reqbuf, wiresize);
830                 if (!req->rq_reqbuf)
831                         RETURN(-ENOMEM);
832                 req->rq_reqbuf_len = wiresize;
833         }
834
835         lustre_init_msg_v2(req->rq_reqbuf, 3, buflens, NULL);
836         req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
837
838         /* gss header */
839         ghdr = lustre_msg_buf(req->rq_reqbuf, 0, 0);
840         ghdr->gh_version = PTLRPC_GSS_VERSION;
841         ghdr->gh_flags = 0;
842         ghdr->gh_proc = gctx->gc_proc;
843         ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
844         ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
845         ghdr->gh_handle.len = gctx->gc_handle.len;
846         memcpy(ghdr->gh_handle.data, gctx->gc_handle.data, gctx->gc_handle.len);
847
848 redo:
849         /* header signature */
850         msgobj.len = req->rq_reqbuf->lm_buflens[0];
851         msgobj.data = lustre_msg_buf(req->rq_reqbuf, 0, 0);
852         micobj.len = req->rq_reqbuf->lm_buflens[1];
853         micobj.data = lustre_msg_buf(req->rq_reqbuf, 1, 0);
854
855         major = lgss_get_mic(gctx->gc_mechctx, 1, &msgobj, &micobj);
856         if (major != GSS_S_COMPLETE) {
857                 CERROR("priv: sign message error: %08x\n", major);
858                 GOTO(err_free, rc = -EPERM);
859         }
860         /* perhaps shrink msg has potential problem in re-packing???
861          * ship a little bit more data is fine.
862         lustre_shrink_msg(req->rq_reqbuf, 1, micobj.len, 0);
863          */
864
865         /* clear text */
866         msgobj.len = req->rq_clrdata_len;
867         msgobj.data = (__u8 *) req->rq_clrbuf;
868
869         /* cipher text */
870         cipher_obj.len = req->rq_reqbuf->lm_buflens[2];
871         cipher_obj.data = lustre_msg_buf(req->rq_reqbuf, 2, 0);
872
873         major = lgss_wrap(gctx->gc_mechctx, &msgobj, req->rq_clrbuf_len,
874                           &cipher_obj);
875         if (major != GSS_S_COMPLETE) {
876                 CERROR("priv: wrap message error: %08x\n", major);
877                 GOTO(err_free, rc = -EPERM);
878         }
879         LASSERT(cipher_obj.len <= buflens[2]);
880
881         /* see explain in gss_cli_ctx_sign() */
882         if (atomic_read(&gctx->gc_seq) - ghdr->gh_seq >
883             GSS_SEQ_REPACK_THRESHOLD) {
884                 int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq;
885
886                 gss_stat_oos_record_cli(behind);
887                 CWARN("req %p: %u behind, retry sealing\n", req, behind);
888
889                 ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq);
890                 goto redo;
891         }
892
893         /* now set the final wire data length */
894         req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 2,
895                                                 cipher_obj.len, 0);
896
897         RETURN(0);
898
899 err_free:
900         if (!req->rq_pool) {
901                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
902                 req->rq_reqbuf = NULL;
903                 req->rq_reqbuf_len = 0;
904         }
905         RETURN(rc);
906 }
907
908 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
909                        struct ptlrpc_request *req)
910 {
911         struct gss_cli_ctx      *gctx;
912         struct gss_header       *ghdr;
913         int                      msglen, rc;
914         __u32                    major;
915         ENTRY;
916
917         LASSERT(req->rq_repbuf);
918         LASSERT(req->rq_cli_ctx == ctx);
919
920         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
921
922         ghdr = gss_swab_header(req->rq_repbuf, 0);
923         if (ghdr == NULL) {
924                 CERROR("can't decode gss header\n");
925                 RETURN(-EPROTO);
926         }
927
928         /* sanity checks */
929         if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
930                 CERROR("gss version %u mismatch, expect %u\n",
931                        ghdr->gh_version, PTLRPC_GSS_VERSION);
932                 RETURN(-EPROTO);
933         }
934
935         switch (ghdr->gh_proc) {
936         case PTLRPC_GSS_PROC_DATA:
937                 if (lustre_msg_swabbed(req->rq_repbuf))
938                         gss_header_swabber(ghdr);
939
940                 major = gss_unseal_msg(gctx->gc_mechctx, req->rq_repbuf,
941                                        &msglen, req->rq_repbuf_len);
942                 if (major != GSS_S_COMPLETE) {
943                         rc = -EPERM;
944                         break;
945                 }
946
947                 if (lustre_unpack_msg(req->rq_repbuf, msglen)) {
948                         CERROR("Failed to unpack after decryption\n");
949                         RETURN(-EPROTO);
950                 }
951                 req->rq_repdata_len = msglen;
952
953                 if (req->rq_repbuf->lm_bufcount < 1) {
954                         CERROR("Invalid reply buffer: empty\n");
955                         RETURN(-EPROTO);
956                 }
957
958                 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
959                         if (req->rq_repbuf->lm_bufcount < 2) {
960                                 CERROR("Too few request buffer segments %d\n",
961                                        req->rq_repbuf->lm_bufcount);
962                                 RETURN(-EPROTO);
963                         }
964
965                         /* bulk checksum is the last segment */
966                         if (bulk_sec_desc_unpack(req->rq_repbuf,
967                                                  req->rq_repbuf->lm_bufcount-1))
968                                 RETURN(-EPROTO);
969                 }
970
971                 req->rq_repmsg = lustre_msg_buf(req->rq_repbuf, 0, 0);
972                 req->rq_replen = req->rq_repbuf->lm_buflens[0];
973
974                 rc = 0;
975                 break;
976         case PTLRPC_GSS_PROC_ERR:
977                 rc = gss_cli_ctx_handle_err_notify(ctx, req, ghdr);
978                 break;
979         default:
980                 CERROR("unexpected proc %d\n", ghdr->gh_proc);
981                 rc = -EPERM;
982         }
983
984         RETURN(rc);
985 }
986
987 /*********************************************
988  * reverse context installation              *
989  *********************************************/
990
991 static inline
992 int gss_install_rvs_svc_ctx(struct obd_import *imp,
993                             struct gss_sec *gsec,
994                             struct gss_cli_ctx *gctx)
995 {
996         return gss_svc_upcall_install_rvs_ctx(imp, gsec, gctx);
997 }
998
999 /*********************************************
1000  * GSS security APIs                         *
1001  *********************************************/
1002 int gss_sec_create_common(struct gss_sec *gsec,
1003                           struct ptlrpc_sec_policy *policy,
1004                           struct obd_import *imp,
1005                           struct ptlrpc_svc_ctx *ctx,
1006                           __u32 flavor,
1007                           unsigned long flags)
1008 {
1009         struct ptlrpc_sec   *sec;
1010
1011         LASSERT(imp);
1012         LASSERT(SEC_FLAVOR_POLICY(flavor) == SPTLRPC_POLICY_GSS);
1013
1014         gsec->gs_mech = lgss_subflavor_to_mech(SEC_FLAVOR_SUB(flavor));
1015         if (!gsec->gs_mech) {
1016                 CERROR("gss backend 0x%x not found\n", SEC_FLAVOR_SUB(flavor));
1017                 return -EOPNOTSUPP;
1018         }
1019
1020         spin_lock_init(&gsec->gs_lock);
1021         gsec->gs_rvs_hdl = 0ULL;
1022
1023         /* initialize upper ptlrpc_sec */
1024         sec = &gsec->gs_base;
1025         sec->ps_policy = policy;
1026         sec->ps_flavor = flavor;
1027         sec->ps_flags = flags;
1028         sec->ps_import = class_import_get(imp);
1029         sec->ps_lock = SPIN_LOCK_UNLOCKED;
1030         atomic_set(&sec->ps_busy, 0);
1031         INIT_LIST_HEAD(&sec->ps_gc_list);
1032
1033         if (!ctx) {
1034                 sec->ps_gc_interval = GSS_GC_INTERVAL;
1035                 sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
1036         } else {
1037                 LASSERT(sec->ps_flags & PTLRPC_SEC_FL_REVERSE);
1038
1039                 /* never do gc on reverse sec */
1040                 sec->ps_gc_interval = 0;
1041                 sec->ps_gc_next = 0;
1042         }
1043
1044         if (SEC_FLAVOR_SVC(flavor) == SPTLRPC_SVC_PRIV &&
1045             flags & PTLRPC_SEC_FL_BULK)
1046                 sptlrpc_enc_pool_add_user();
1047
1048         CWARN("create %s%s@%p\n", (ctx ? "reverse " : ""),
1049               policy->sp_name, gsec);
1050         return 0;
1051 }
1052
1053 void gss_sec_destroy_common(struct gss_sec *gsec)
1054 {
1055         struct ptlrpc_sec      *sec = &gsec->gs_base;
1056         ENTRY;
1057
1058         LASSERT(sec->ps_import);
1059         LASSERT(atomic_read(&sec->ps_refcount) == 0);
1060         LASSERT(atomic_read(&sec->ps_busy) == 0);
1061
1062         if (gsec->gs_mech) {
1063                 lgss_mech_put(gsec->gs_mech);
1064                 gsec->gs_mech = NULL;
1065         }
1066
1067         class_import_put(sec->ps_import);
1068
1069         if (SEC_FLAVOR_SVC(sec->ps_flavor) == SPTLRPC_SVC_PRIV &&
1070             sec->ps_flags & PTLRPC_SEC_FL_BULK)
1071                 sptlrpc_enc_pool_del_user();
1072
1073         EXIT;
1074 }
1075
1076 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
1077                             struct ptlrpc_cli_ctx *ctx,
1078                             struct ptlrpc_ctx_ops *ctxops,
1079                             struct vfs_cred *vcred)
1080 {
1081         struct gss_cli_ctx    *gctx = ctx2gctx(ctx);
1082
1083         gctx->gc_win = 0;
1084         atomic_set(&gctx->gc_seq, 0);
1085
1086         INIT_HLIST_NODE(&ctx->cc_hash);
1087         atomic_set(&ctx->cc_refcount, 0);
1088         ctx->cc_sec = sec;
1089         ctx->cc_ops = ctxops;
1090         ctx->cc_expire = 0;
1091         ctx->cc_flags = PTLRPC_CTX_NEW;
1092         ctx->cc_vcred = *vcred;
1093         spin_lock_init(&ctx->cc_lock);
1094         INIT_LIST_HEAD(&ctx->cc_req_list);
1095
1096         /* take a ref on belonging sec */
1097         atomic_inc(&sec->ps_busy);
1098
1099         CWARN("%s@%p: create ctx %p(%u->%s)\n",
1100               sec->ps_policy->sp_name, ctx->cc_sec,
1101               ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1102         return 0;
1103 }
1104
1105 /*
1106  * return 1 if the busy count of the sec dropped to zero, then usually caller
1107  * should destroy the sec too; otherwise return 0.
1108  */
1109 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
1110                             struct ptlrpc_cli_ctx *ctx)
1111 {
1112         struct gss_cli_ctx *gctx = ctx2gctx(ctx);
1113
1114         LASSERT(ctx->cc_sec == sec);
1115         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1116         LASSERT(atomic_read(&sec->ps_busy) > 0);
1117
1118         if (gctx->gc_mechctx) {
1119                 gss_do_ctx_fini_rpc(gctx);
1120                 gss_cli_ctx_finalize(gctx);
1121         }
1122
1123         CWARN("%s@%p: destroy ctx %p(%u->%s)\n",
1124               sec->ps_policy->sp_name, ctx->cc_sec,
1125               ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
1126
1127         if (atomic_dec_and_test(&sec->ps_busy)) {
1128                 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1129                 return 1;
1130         }
1131
1132         return 0;
1133 }
1134
1135 static
1136 int gss_alloc_reqbuf_auth(struct ptlrpc_sec *sec,
1137                           struct ptlrpc_request *req,
1138                           int msgsize)
1139 {
1140         struct sec_flavor_config *conf;
1141         int bufsize, txtsize;
1142         int buflens[5], bufcnt = 2;
1143         ENTRY;
1144
1145         /*
1146          * - gss header
1147          * - lustre message
1148          * - user descriptor
1149          * - bulk sec descriptor
1150          * - signature
1151          */
1152         buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1153         buflens[1] = msgsize;
1154         txtsize = buflens[0] + buflens[1];
1155
1156         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1157                 buflens[bufcnt] = sptlrpc_current_user_desc_size();
1158                 txtsize += buflens[bufcnt];
1159                 bufcnt++;
1160         }
1161
1162         if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1163                 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1164                 buflens[bufcnt] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
1165                                                      req->rq_bulk_read);
1166                 txtsize += buflens[bufcnt];
1167                 bufcnt++;
1168         }
1169
1170         buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
1171                             gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1172
1173         bufsize = lustre_msg_size_v2(bufcnt, buflens);
1174
1175         if (!req->rq_reqbuf) {
1176                 bufsize = size_roundup_power2(bufsize);
1177
1178                 OBD_ALLOC(req->rq_reqbuf, bufsize);
1179                 if (!req->rq_reqbuf)
1180                         RETURN(-ENOMEM);
1181
1182                 req->rq_reqbuf_len = bufsize;
1183         } else {
1184                 LASSERT(req->rq_pool);
1185                 LASSERT(req->rq_reqbuf_len >= bufsize);
1186                 memset(req->rq_reqbuf, 0, bufsize);
1187         }
1188
1189         lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
1190         req->rq_reqbuf->lm_secflvr = req->rq_sec_flavor;
1191
1192         req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, msgsize);
1193         LASSERT(req->rq_reqmsg);
1194
1195         /* pack user desc here, later we might leave current user's process */
1196         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1197                 sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
1198
1199         RETURN(0);
1200 }
1201
1202 static
1203 int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
1204                           struct ptlrpc_request *req,
1205                           int msgsize)
1206 {
1207         struct sec_flavor_config *conf;
1208         int ibuflens[3], ibufcnt;
1209         int buflens[3];
1210         int clearsize, wiresize;
1211         ENTRY;
1212
1213         LASSERT(req->rq_clrbuf == NULL);
1214         LASSERT(req->rq_clrbuf_len == 0);
1215
1216         /* Inner (clear) buffers
1217          *  - lustre message
1218          *  - user descriptor
1219          *  - bulk checksum
1220          */
1221         ibufcnt = 1;
1222         ibuflens[0] = msgsize;
1223
1224         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1225                 ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
1226         if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1227                 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1228                 ibuflens[ibufcnt++] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
1229                                                          req->rq_bulk_read);
1230         }
1231         clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
1232         /* to allow append padding during encryption */
1233         clearsize += GSS_MAX_CIPHER_BLOCK;
1234
1235         /* Wrapper (wire) buffers
1236          *  - gss header
1237          *  - signature of gss header
1238          *  - cipher text
1239          */
1240         buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1241         buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1242         buflens[2] = gss_cli_payload(req->rq_cli_ctx, clearsize, 1);
1243         wiresize = lustre_msg_size_v2(3, buflens);
1244
1245         if (req->rq_pool) {
1246                 /* rq_reqbuf is preallocated */
1247                 LASSERT(req->rq_reqbuf);
1248                 LASSERT(req->rq_reqbuf_len >= wiresize);
1249
1250                 memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
1251
1252                 /* if the pre-allocated buffer is big enough, we just pack
1253                  * both clear buf & request buf in it, to avoid more alloc.
1254                  */
1255                 if (clearsize + wiresize <= req->rq_reqbuf_len) {
1256                         req->rq_clrbuf =
1257                                 (void *) (((char *) req->rq_reqbuf) + wiresize);
1258                 } else {
1259                         CWARN("pre-allocated buf size %d is not enough for "
1260                               "both clear (%d) and cipher (%d) text, proceed "
1261                               "with extra allocation\n", req->rq_reqbuf_len,
1262                               clearsize, wiresize);
1263                 }
1264         }
1265
1266         if (!req->rq_clrbuf) {
1267                 clearsize = size_roundup_power2(clearsize);
1268
1269                 OBD_ALLOC(req->rq_clrbuf, clearsize);
1270                 if (!req->rq_clrbuf)
1271                         RETURN(-ENOMEM);
1272         }
1273         req->rq_clrbuf_len = clearsize;
1274
1275         lustre_init_msg_v2(req->rq_clrbuf, ibufcnt, ibuflens, NULL);
1276         req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, msgsize);
1277
1278         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
1279                 sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
1280
1281         RETURN(0);
1282 }
1283
1284 /*
1285  * NOTE: any change of request buffer allocation should also consider
1286  * changing enlarge_reqbuf() series functions.
1287  */
1288 int gss_alloc_reqbuf(struct ptlrpc_sec *sec,
1289                      struct ptlrpc_request *req,
1290                      int msgsize)
1291 {
1292         LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
1293                 (req->rq_bulk_read || req->rq_bulk_write));
1294
1295         switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
1296         case SPTLRPC_SVC_NONE:
1297         case SPTLRPC_SVC_AUTH:
1298                 return gss_alloc_reqbuf_auth(sec, req, msgsize);
1299         case SPTLRPC_SVC_PRIV:
1300                 return gss_alloc_reqbuf_priv(sec, req, msgsize);
1301         default:
1302                 LBUG();
1303         }
1304         return 0;
1305 }
1306
1307 void gss_free_reqbuf(struct ptlrpc_sec *sec,
1308                      struct ptlrpc_request *req)
1309 {
1310         int privacy;
1311         ENTRY;
1312
1313         LASSERT(!req->rq_pool || req->rq_reqbuf);
1314         privacy = SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV;
1315
1316         if (!req->rq_clrbuf)
1317                 goto release_reqbuf;
1318
1319         /* release clear buffer */
1320         LASSERT(privacy);
1321         LASSERT(req->rq_clrbuf_len);
1322
1323         if (req->rq_pool &&
1324             req->rq_clrbuf >= req->rq_reqbuf &&
1325             (char *) req->rq_clrbuf <
1326             (char *) req->rq_reqbuf + req->rq_reqbuf_len)
1327                 goto release_reqbuf;
1328
1329         OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1330         req->rq_clrbuf = NULL;
1331         req->rq_clrbuf_len = 0;
1332
1333 release_reqbuf:
1334         if (!req->rq_pool && req->rq_reqbuf) {
1335                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1336                 req->rq_reqbuf = NULL;
1337                 req->rq_reqbuf_len = 0;
1338         }
1339
1340         EXIT;
1341 }
1342
1343 int gss_alloc_repbuf(struct ptlrpc_sec *sec,
1344                      struct ptlrpc_request *req,
1345                      int msgsize)
1346 {
1347         struct sec_flavor_config *conf;
1348         int privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV);
1349         int bufsize, txtsize;
1350         int buflens[4], bufcnt;
1351         ENTRY;
1352
1353         LASSERT(!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) ||
1354                 (req->rq_bulk_read || req->rq_bulk_write));
1355
1356         if (privacy) {
1357                 bufcnt = 1;
1358                 buflens[0] = msgsize;
1359                 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1360                         conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1361                         buflens[bufcnt++] = bulk_sec_desc_size(
1362                                                         conf->sfc_bulk_csum, 0,
1363                                                         req->rq_bulk_read);
1364                 }
1365                 txtsize = lustre_msg_size_v2(bufcnt, buflens);
1366                 txtsize += GSS_MAX_CIPHER_BLOCK;
1367
1368                 bufcnt = 3;
1369                 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1370                 buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1371                 buflens[2] = gss_cli_payload(req->rq_cli_ctx, txtsize, 1);
1372         } else {
1373                 bufcnt = 2;
1374                 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1375                 buflens[1] = msgsize;
1376                 txtsize = buflens[0] + buflens[1];
1377
1378                 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1379                         conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1380                         buflens[bufcnt] = bulk_sec_desc_size(
1381                                                         conf->sfc_bulk_csum, 0,
1382                                                         req->rq_bulk_read);
1383                         txtsize += buflens[bufcnt];
1384                         bufcnt++;
1385                 }
1386                 buflens[bufcnt++] = req->rq_ctx_init ? GSS_CTX_INIT_MAX_LEN :
1387                                    gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1388         }
1389
1390         bufsize = lustre_msg_size_v2(bufcnt, buflens);
1391         bufsize = size_roundup_power2(bufsize);
1392
1393         OBD_ALLOC(req->rq_repbuf, bufsize);
1394         if (!req->rq_repbuf)
1395                 return -ENOMEM;
1396
1397         req->rq_repbuf_len = bufsize;
1398         return 0;
1399 }
1400
1401 void gss_free_repbuf(struct ptlrpc_sec *sec,
1402                      struct ptlrpc_request *req)
1403 {
1404         OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
1405         req->rq_repbuf = NULL;
1406         req->rq_repbuf_len = 0;
1407 }
1408
1409 static int get_enlarged_msgsize(struct lustre_msg *msg,
1410                                 int segment, int newsize)
1411 {
1412         int save, newmsg_size;
1413
1414         LASSERT(newsize >= msg->lm_buflens[segment]);
1415
1416         save = msg->lm_buflens[segment];
1417         msg->lm_buflens[segment] = newsize;
1418         newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1419         msg->lm_buflens[segment] = save;
1420
1421         return newmsg_size;
1422 }
1423
1424 static int get_enlarged_msgsize2(struct lustre_msg *msg,
1425                                  int segment1, int newsize1,
1426                                  int segment2, int newsize2)
1427 {
1428         int save1, save2, newmsg_size;
1429
1430         LASSERT(newsize1 >= msg->lm_buflens[segment1]);
1431         LASSERT(newsize2 >= msg->lm_buflens[segment2]);
1432
1433         save1 = msg->lm_buflens[segment1];
1434         save2 = msg->lm_buflens[segment2];
1435         msg->lm_buflens[segment1] = newsize1;
1436         msg->lm_buflens[segment2] = newsize2;
1437         newmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1438         msg->lm_buflens[segment1] = save1;
1439         msg->lm_buflens[segment2] = save2;
1440
1441         return newmsg_size;
1442 }
1443
1444 static inline int msg_last_seglen(struct lustre_msg *msg)
1445 {
1446         return msg->lm_buflens[msg->lm_bufcount - 1];
1447 }
1448
1449 static
1450 int gss_enlarge_reqbuf_auth(struct ptlrpc_sec *sec,
1451                             struct ptlrpc_request *req,
1452                             int segment, int newsize)
1453 {
1454         struct lustre_msg      *newbuf;
1455         int                     txtsize, sigsize, i;
1456         int                     newmsg_size, newbuf_size;
1457
1458         /*
1459          * embedded msg is at seg 1; signature is at the last seg
1460          */
1461         LASSERT(req->rq_reqbuf);
1462         LASSERT(req->rq_reqbuf_len > req->rq_reqlen);
1463         LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
1464         LASSERT(lustre_msg_buf(req->rq_reqbuf, 1, 0) == req->rq_reqmsg);
1465
1466         /* compute new embedded msg size */
1467         newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1468         LASSERT(newmsg_size >= req->rq_reqbuf->lm_buflens[1]);
1469
1470         /* compute new wrapper msg size */
1471         for (txtsize = 0, i = 0; i < req->rq_reqbuf->lm_bufcount; i++)
1472                 txtsize += req->rq_reqbuf->lm_buflens[i];
1473         txtsize += newmsg_size - req->rq_reqbuf->lm_buflens[1];
1474
1475         sigsize = gss_cli_payload(req->rq_cli_ctx, txtsize, 0);
1476         LASSERT(sigsize >= msg_last_seglen(req->rq_reqbuf));
1477         newbuf_size = get_enlarged_msgsize2(req->rq_reqbuf, 1, newmsg_size,
1478                                             req->rq_reqbuf->lm_bufcount - 1,
1479                                             sigsize);
1480
1481         /* request from pool should always have enough buffer */
1482         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
1483
1484         if (req->rq_reqbuf_len < newbuf_size) {
1485                 newbuf_size = size_roundup_power2(newbuf_size);
1486
1487                 OBD_ALLOC(newbuf, newbuf_size);
1488                 if (newbuf == NULL)
1489                         RETURN(-ENOMEM);
1490
1491                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
1492
1493                 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1494                 req->rq_reqbuf = newbuf;
1495                 req->rq_reqbuf_len = newbuf_size;
1496                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0);
1497         }
1498
1499         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf,
1500                                      req->rq_reqbuf->lm_bufcount - 1, sigsize);
1501         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 1, newmsg_size);
1502         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1503
1504         req->rq_reqlen = newmsg_size;
1505         RETURN(0);
1506 }
1507
1508 static
1509 int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
1510                             struct ptlrpc_request *req,
1511                             int segment, int newsize)
1512 {
1513         struct lustre_msg      *newclrbuf;
1514         int                     newmsg_size, newclrbuf_size, newcipbuf_size;
1515         int                     buflens[3];
1516
1517         /*
1518          * embedded msg is at seg 0 of clear buffer;
1519          * cipher text is at seg 2 of cipher buffer;
1520          */
1521         LASSERT(req->rq_pool ||
1522                 (req->rq_reqbuf == NULL && req->rq_reqbuf_len == 0));
1523         LASSERT(req->rq_reqbuf == NULL ||
1524                 (req->rq_pool && req->rq_reqbuf->lm_bufcount == 3));
1525         LASSERT(req->rq_clrbuf);
1526         LASSERT(req->rq_clrbuf_len > req->rq_reqlen);
1527         LASSERT(lustre_msg_buf(req->rq_clrbuf, 0, 0) == req->rq_reqmsg);
1528
1529         /* compute new embedded msg size */
1530         newmsg_size = get_enlarged_msgsize(req->rq_reqmsg, segment, newsize);
1531
1532         /* compute new clear buffer size */
1533         newclrbuf_size = get_enlarged_msgsize(req->rq_clrbuf, 0, newmsg_size);
1534         newclrbuf_size += GSS_MAX_CIPHER_BLOCK;
1535
1536         /* compute new cipher buffer size */
1537         buflens[0] = PTLRPC_GSS_HEADER_SIZE;
1538         buflens[1] = gss_cli_payload(req->rq_cli_ctx, buflens[0], 0);
1539         buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
1540         newcipbuf_size = lustre_msg_size_v2(3, buflens);
1541
1542         /*
1543          * handle the case that we put both clear buf and cipher buf into
1544          * pre-allocated single buffer.
1545          */
1546         if (unlikely(req->rq_pool) &&
1547             req->rq_clrbuf >= req->rq_reqbuf &&
1548             (char *) req->rq_clrbuf <
1549             (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1550                 /*
1551                  * it couldn't be better we still fit into the
1552                  * pre-allocated buffer.
1553                  */
1554                 if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
1555                         void *src, *dst;
1556
1557                         /* move clear text backward. */
1558                         src = req->rq_clrbuf;
1559                         dst = (char *) req->rq_reqbuf + newcipbuf_size;
1560
1561                         memmove(dst, src, req->rq_clrbuf_len);
1562
1563                         req->rq_clrbuf = (struct lustre_msg *) dst;
1564                         req->rq_clrbuf_len = newclrbuf_size;
1565                         req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1566                 } else {
1567                         /*
1568                          * sadly we have to split out the clear buffer
1569                          */
1570                         LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
1571                         LASSERT(req->rq_clrbuf_len < newclrbuf_size);
1572                 }
1573         }
1574
1575         if (req->rq_clrbuf_len < newclrbuf_size) {
1576                 newclrbuf_size = size_roundup_power2(newclrbuf_size);
1577
1578                 OBD_ALLOC(newclrbuf, newclrbuf_size);
1579                 if (newclrbuf == NULL)
1580                         RETURN(-ENOMEM);
1581
1582                 memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
1583
1584                 if (req->rq_reqbuf == NULL ||
1585                     req->rq_clrbuf < req->rq_reqbuf ||
1586                     (char *) req->rq_clrbuf >=
1587                     (char *) req->rq_reqbuf + req->rq_reqbuf_len) {
1588                         OBD_FREE(req->rq_clrbuf, req->rq_clrbuf_len);
1589                 }
1590
1591                 req->rq_clrbuf = newclrbuf;
1592                 req->rq_clrbuf_len = newclrbuf_size;
1593                 req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
1594         }
1595
1596         _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size);
1597         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
1598         req->rq_reqlen = newmsg_size;
1599
1600         RETURN(0);
1601 }
1602
1603 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
1604                        struct ptlrpc_request *req,
1605                        int segment, int newsize)
1606 {
1607         LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
1608
1609         switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
1610         case SPTLRPC_SVC_AUTH:
1611                 return gss_enlarge_reqbuf_auth(sec, req, segment, newsize);
1612         case SPTLRPC_SVC_PRIV:
1613                 return gss_enlarge_reqbuf_priv(sec, req, segment, newsize);
1614         default:
1615                 LASSERTF(0, "bad flavor %x\n", req->rq_sec_flavor);
1616                 return 0;
1617         }
1618 }
1619
1620 int gss_sec_install_rctx(struct obd_import *imp,
1621                          struct ptlrpc_sec *sec,
1622                          struct ptlrpc_cli_ctx *ctx)
1623 {
1624         struct gss_sec     *gsec;
1625         struct gss_cli_ctx *gctx;
1626         int                 rc;
1627
1628         gsec = container_of(sec, struct gss_sec, gs_base);
1629         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
1630
1631         rc = gss_install_rvs_svc_ctx(imp, gsec, gctx);
1632         return rc;
1633 }
1634
1635 /********************************************
1636  * server side API                          *
1637  ********************************************/
1638
1639 static inline
1640 int gss_svc_reqctx_is_special(struct gss_svc_reqctx *grctx)
1641 {
1642         LASSERT(grctx);
1643         return (grctx->src_init || grctx->src_init_continue ||
1644                 grctx->src_err_notify);
1645 }
1646
1647 static
1648 void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx)
1649 {
1650         if (grctx->src_ctx)
1651                 gss_svc_upcall_put_ctx(grctx->src_ctx);
1652
1653         sptlrpc_policy_put(grctx->src_base.sc_policy);
1654         OBD_FREE_PTR(grctx);
1655 }
1656
1657 static inline
1658 void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx)
1659 {
1660         LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1661         atomic_inc(&grctx->src_base.sc_refcount);
1662 }
1663
1664 static inline
1665 void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx)
1666 {
1667         LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0);
1668
1669         if (atomic_dec_and_test(&grctx->src_base.sc_refcount))
1670                 gss_svc_reqctx_free(grctx);
1671 }
1672
1673 static
1674 int gss_svc_sign(struct ptlrpc_request *req,
1675                  struct ptlrpc_reply_state *rs,
1676                  struct gss_svc_reqctx *grctx)
1677 {
1678         int     rc;
1679         ENTRY;
1680
1681         LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
1682
1683         /* embedded lustre_msg might have been shrinked */
1684         if (req->rq_replen != rs->rs_repbuf->lm_buflens[1])
1685                 lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1);
1686
1687         rc = gss_sign_msg(rs->rs_repbuf, grctx->src_ctx->gsc_mechctx,
1688                           PTLRPC_GSS_PROC_DATA, grctx->src_wirectx.gw_seq,
1689                           NULL);
1690         if (rc < 0)
1691                 RETURN(rc);
1692
1693         rs->rs_repdata_len = rc;
1694         RETURN(0);
1695 }
1696
1697 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
1698 {
1699         struct gss_svc_reqctx     *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1700         struct ptlrpc_reply_state *rs;
1701         struct gss_err_header     *ghdr;
1702         int                        replen = sizeof(struct ptlrpc_body);
1703         int                        rc;
1704         ENTRY;
1705
1706         //OBD_FAIL_RETURN(OBD_FAIL_SVCGSS_ERR_NOTIFY|OBD_FAIL_ONCE, -EINVAL);
1707
1708         grctx->src_err_notify = 1;
1709         grctx->src_reserve_len = 0;
1710
1711         rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
1712         if (rc) {
1713                 CERROR("could not pack reply, err %d\n", rc);
1714                 RETURN(rc);
1715         }
1716
1717         /* gss hdr */
1718         rs = req->rq_reply_state;
1719         LASSERT(rs->rs_repbuf->lm_buflens[1] >= sizeof(*ghdr));
1720         ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
1721         ghdr->gh_version = PTLRPC_GSS_VERSION;
1722         ghdr->gh_flags = 0;
1723         ghdr->gh_proc = PTLRPC_GSS_PROC_ERR;
1724         ghdr->gh_major = major;
1725         ghdr->gh_minor = minor;
1726         ghdr->gh_handle.len = 0; /* fake context handle */
1727
1728         rs->rs_repdata_len = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
1729                                                 rs->rs_repbuf->lm_buflens);
1730
1731         CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
1732                major, minor, libcfs_nid2str(req->rq_peer.nid));
1733         RETURN(0);
1734 }
1735
1736 static
1737 int gss_svc_handle_init(struct ptlrpc_request *req,
1738                         struct gss_wire_ctx *gw)
1739 {
1740         struct gss_svc_reqctx     *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1741         struct lustre_msg         *reqbuf = req->rq_reqbuf;
1742         struct obd_uuid           *uuid;
1743         struct obd_device         *target;
1744         rawobj_t                   uuid_obj, rvs_hdl, in_token;
1745         __u32                      lustre_svc;
1746         __u32                     *secdata, seclen;
1747         int                        rc;
1748         ENTRY;
1749
1750         CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
1751                libcfs_nid2str(req->rq_peer.nid));
1752
1753         if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
1754                 CERROR("proc %u: invalid handle length %u\n",
1755                        gw->gw_proc, gw->gw_handle.len);
1756                 RETURN(SECSVC_DROP);
1757         }
1758
1759         if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
1760                 CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
1761                 RETURN(SECSVC_DROP);
1762         }
1763
1764         /* ctx initiate payload is in last segment */
1765         secdata = lustre_msg_buf(reqbuf, reqbuf->lm_bufcount - 1, 0);
1766         seclen = reqbuf->lm_buflens[reqbuf->lm_bufcount - 1];
1767
1768         if (seclen < 4 + 4) {
1769                 CERROR("sec size %d too small\n", seclen);
1770                 RETURN(SECSVC_DROP);
1771         }
1772
1773         /* lustre svc type */
1774         lustre_svc = le32_to_cpu(*secdata++);
1775         seclen -= 4;
1776
1777         /* extract target uuid, note this code is somewhat fragile
1778          * because touched internal structure of obd_uuid
1779          */
1780         if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
1781                 CERROR("failed to extract target uuid\n");
1782                 RETURN(SECSVC_DROP);
1783         }
1784         uuid_obj.data[uuid_obj.len - 1] = '\0';
1785
1786         uuid = (struct obd_uuid *) uuid_obj.data;
1787         target = class_uuid2obd(uuid);
1788         if (!target || target->obd_stopping || !target->obd_set_up) {
1789                 CERROR("target '%s' is not available for context init (%s)",
1790                        uuid->uuid, target == NULL ? "no target" :
1791                        (target->obd_stopping ? "stopping" : "not set up"));
1792                 RETURN(SECSVC_DROP);
1793         }
1794
1795         /* extract reverse handle */
1796         if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
1797                 CERROR("failed extract reverse handle\n");
1798                 RETURN(SECSVC_DROP);
1799         }
1800
1801         /* extract token */
1802         if (rawobj_extract(&in_token, &secdata, &seclen)) {
1803                 CERROR("can't extract token\n");
1804                 RETURN(SECSVC_DROP);
1805         }
1806
1807         rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
1808                                         &rvs_hdl, &in_token);
1809         if (rc != SECSVC_OK)
1810                 RETURN(rc);
1811
1812         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1813                 if (reqbuf->lm_bufcount < 4) {
1814                         CERROR("missing user descriptor\n");
1815                         RETURN(SECSVC_DROP);
1816                 }
1817                 if (sptlrpc_unpack_user_desc(reqbuf, 2)) {
1818                         CERROR("Mal-formed user descriptor\n");
1819                         RETURN(SECSVC_DROP);
1820                 }
1821                 req->rq_user_desc = lustre_msg_buf(reqbuf, 2, 0);
1822         }
1823
1824         req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
1825         req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
1826
1827         RETURN(rc);
1828 }
1829
1830 /*
1831  * last segment must be the gss signature.
1832  */
1833 static
1834 int gss_svc_verify_request(struct ptlrpc_request *req,
1835                            struct gss_svc_ctx *gctx,
1836                            struct gss_wire_ctx *gw,
1837                            __u32 *major)
1838 {
1839         struct lustre_msg  *msg = req->rq_reqbuf;
1840         int                 offset = 2;
1841         ENTRY;
1842
1843         *major = GSS_S_COMPLETE;
1844
1845         if (msg->lm_bufcount < 3) {
1846                 CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
1847                 RETURN(-EINVAL);
1848         }
1849
1850         if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
1851                 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
1852                 *major = GSS_S_DUPLICATE_TOKEN;
1853                 RETURN(-EACCES);
1854         }
1855
1856         *major = gss_verify_msg(msg, gctx->gsc_mechctx);
1857         if (*major != GSS_S_COMPLETE)
1858                 RETURN(-EACCES);
1859
1860         if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
1861                 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
1862                 *major = GSS_S_DUPLICATE_TOKEN;
1863                 RETURN(-EACCES);
1864         }
1865
1866         /* user descriptor */
1867         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1868                 if (msg->lm_bufcount < (offset + 1 + 1)) {
1869                         CERROR("no user desc included\n");
1870                         RETURN(-EINVAL);
1871                 }
1872
1873                 if (sptlrpc_unpack_user_desc(msg, offset)) {
1874                         CERROR("Mal-formed user descriptor\n");
1875                         RETURN(-EINVAL);
1876                 }
1877
1878                 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
1879                 offset++;
1880         }
1881
1882         /* check bulk cksum data */
1883         if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1884                 if (msg->lm_bufcount < (offset + 1 + 1)) {
1885                         CERROR("no bulk checksum included\n");
1886                         RETURN(-EINVAL);
1887                 }
1888
1889                 if (bulk_sec_desc_unpack(msg, offset))
1890                         RETURN(-EINVAL);
1891         }
1892
1893         req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
1894         req->rq_reqlen = msg->lm_buflens[1];
1895         RETURN(0);
1896 }
1897
1898 static
1899 int gss_svc_unseal_request(struct ptlrpc_request *req,
1900                            struct gss_svc_ctx *gctx,
1901                            struct gss_wire_ctx *gw,
1902                            __u32 *major)
1903 {
1904         struct lustre_msg  *msg = req->rq_reqbuf;
1905         int                 msglen, offset = 1;
1906         ENTRY;
1907
1908         if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
1909                 CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
1910                 *major = GSS_S_DUPLICATE_TOKEN;
1911                 RETURN(-EACCES);
1912         }
1913
1914         *major = gss_unseal_msg(gctx->gsc_mechctx, msg,
1915                                &msglen, req->rq_reqdata_len);
1916         if (*major != GSS_S_COMPLETE)
1917                 RETURN(-EACCES);
1918
1919         if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
1920                 CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
1921                 *major = GSS_S_DUPLICATE_TOKEN;
1922                 RETURN(-EACCES);
1923         }
1924
1925         if (lustre_unpack_msg(msg, msglen)) {
1926                 CERROR("Failed to unpack after decryption\n");
1927                 RETURN(-EINVAL);
1928         }
1929         req->rq_reqdata_len = msglen;
1930
1931         if (msg->lm_bufcount < 1) {
1932                 CERROR("Invalid buffer: is empty\n");
1933                 RETURN(-EINVAL);
1934         }
1935
1936         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
1937                 if (msg->lm_bufcount < offset + 1) {
1938                         CERROR("no user descriptor included\n");
1939                         RETURN(-EINVAL);
1940                 }
1941
1942                 if (sptlrpc_unpack_user_desc(msg, offset)) {
1943                         CERROR("Mal-formed user descriptor\n");
1944                         RETURN(-EINVAL);
1945                 }
1946
1947                 req->rq_user_desc = lustre_msg_buf(msg, offset, 0);
1948                 offset++;
1949         }
1950
1951         if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
1952                 if (msg->lm_bufcount < offset + 1) {
1953                         CERROR("no bulk checksum included\n");
1954                         RETURN(-EINVAL);
1955                 }
1956
1957                 if (bulk_sec_desc_unpack(msg, offset))
1958                         RETURN(-EINVAL);
1959         }
1960
1961         req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
1962         req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
1963         RETURN(0);
1964 }
1965
1966 static
1967 int gss_svc_handle_data(struct ptlrpc_request *req,
1968                         struct gss_wire_ctx *gw)
1969 {
1970         struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
1971         __u32                  major = 0;
1972         int                    rc = 0;
1973         ENTRY;
1974
1975         grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
1976         if (!grctx->src_ctx) {
1977                 major = GSS_S_NO_CONTEXT;
1978                 goto error;
1979         }
1980
1981         switch (gw->gw_svc) {
1982         case PTLRPC_GSS_SVC_INTEGRITY:
1983                 rc = gss_svc_verify_request(req, grctx->src_ctx, gw, &major);
1984                 break;
1985         case PTLRPC_GSS_SVC_PRIVACY:
1986                 rc = gss_svc_unseal_request(req, grctx->src_ctx, gw, &major);
1987                 break;
1988         default:
1989                 CERROR("unsupported gss service %d\n", gw->gw_svc);
1990                 rc = -EINVAL;
1991         }
1992
1993         if (rc == 0)
1994                 RETURN(SECSVC_OK);
1995
1996         CERROR("svc %u failed: major 0x%08x: ctx %p(%u->%s)\n",
1997                gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
1998                libcfs_nid2str(req->rq_peer.nid));
1999 error:
2000         /*
2001          * we only notify client in case of NO_CONTEXT/BAD_SIG, which
2002          * might happen after server reboot, to allow recovery.
2003          */
2004         if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
2005             gss_pack_err_notify(req, major, 0) == 0)
2006                 RETURN(SECSVC_COMPLETE);
2007
2008         RETURN(SECSVC_DROP);
2009 }
2010
2011 static
2012 int gss_svc_handle_destroy(struct ptlrpc_request *req,
2013                            struct gss_wire_ctx *gw)
2014 {
2015         struct gss_svc_reqctx  *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2016         int                     replen = sizeof(struct ptlrpc_body);
2017         __u32                   major;
2018         ENTRY;
2019
2020         grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
2021         if (!grctx->src_ctx) {
2022                 CWARN("invalid gss context handle for destroy.\n");
2023                 RETURN(SECSVC_DROP);
2024         }
2025
2026         if (gw->gw_svc != PTLRPC_GSS_SVC_INTEGRITY) {
2027                 CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
2028                 RETURN(SECSVC_DROP);
2029         }
2030
2031         if (gss_svc_verify_request(req, grctx->src_ctx, gw, &major))
2032                 RETURN(SECSVC_DROP);
2033
2034         if (lustre_pack_reply_v2(req, 1, &replen, NULL))
2035                 RETURN(SECSVC_DROP);
2036
2037         CWARN("gss svc destroy ctx %p(%u->%s)\n", grctx->src_ctx,
2038               grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
2039
2040         gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2041
2042         if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
2043                 if (req->rq_reqbuf->lm_bufcount < 4) {
2044                         CERROR("missing user descriptor, ignore it\n");
2045                         RETURN(SECSVC_OK);
2046                 }
2047                 if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2)) {
2048                         CERROR("Mal-formed user descriptor, ignore it\n");
2049                         RETURN(SECSVC_OK);
2050                 }
2051                 req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
2052         }
2053
2054         RETURN(SECSVC_OK);
2055 }
2056
2057 int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
2058 {
2059         struct gss_header      *ghdr;
2060         struct gss_svc_reqctx  *grctx;
2061         struct gss_wire_ctx    *gw;
2062         int                     rc;
2063         ENTRY;
2064
2065         LASSERT(req->rq_reqbuf);
2066         LASSERT(req->rq_svc_ctx == NULL);
2067
2068         if (req->rq_reqbuf->lm_bufcount < 2) {
2069                 CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
2070                 RETURN(SECSVC_DROP);
2071         }
2072
2073         ghdr = gss_swab_header(req->rq_reqbuf, 0);
2074         if (ghdr == NULL) {
2075                 CERROR("can't decode gss header\n");
2076                 RETURN(SECSVC_DROP);
2077         }
2078
2079         /* sanity checks */
2080         if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
2081                 CERROR("gss version %u, expect %u\n", ghdr->gh_version,
2082                        PTLRPC_GSS_VERSION);
2083                 RETURN(SECSVC_DROP);
2084         }
2085
2086         /* alloc grctx data */
2087         OBD_ALLOC_PTR(grctx);
2088         if (!grctx) {
2089                 CERROR("fail to alloc svc reqctx\n");
2090                 RETURN(SECSVC_DROP);
2091         }
2092         grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
2093         atomic_set(&grctx->src_base.sc_refcount, 1);
2094         req->rq_svc_ctx = &grctx->src_base;
2095         gw = &grctx->src_wirectx;
2096
2097         /* save wire context */
2098         gw->gw_proc = ghdr->gh_proc;
2099         gw->gw_seq = ghdr->gh_seq;
2100         gw->gw_svc = ghdr->gh_svc;
2101         rawobj_from_netobj(&gw->gw_handle, &ghdr->gh_handle);
2102
2103         /* keep original wire header which subject to checksum verification */
2104         if (lustre_msg_swabbed(req->rq_reqbuf))
2105                 gss_header_swabber(ghdr);
2106
2107         switch(ghdr->gh_proc) {
2108         case PTLRPC_GSS_PROC_INIT:
2109         case PTLRPC_GSS_PROC_CONTINUE_INIT:
2110                 rc = gss_svc_handle_init(req, gw);
2111                 break;
2112         case PTLRPC_GSS_PROC_DATA:
2113                 rc = gss_svc_handle_data(req, gw);
2114                 break;
2115         case PTLRPC_GSS_PROC_DESTROY:
2116                 rc = gss_svc_handle_destroy(req, gw);
2117                 break;
2118         default:
2119                 CERROR("unknown proc %u\n", gw->gw_proc);
2120                 rc = SECSVC_DROP;
2121                 break;
2122         }
2123
2124         switch (rc) {
2125         case SECSVC_OK:
2126                 LASSERT (grctx->src_ctx);
2127
2128                 req->rq_auth_gss = 1;
2129                 req->rq_auth_remote = grctx->src_ctx->gsc_remote;
2130                 req->rq_auth_usr_mdt = grctx->src_ctx->gsc_usr_mds;
2131                 req->rq_auth_usr_root = grctx->src_ctx->gsc_usr_root;
2132                 req->rq_auth_uid = grctx->src_ctx->gsc_uid;
2133                 req->rq_auth_mapped_uid = grctx->src_ctx->gsc_mapped_uid;
2134                 break;
2135         case SECSVC_COMPLETE:
2136                 break;
2137         case SECSVC_DROP:
2138                 gss_svc_reqctx_free(grctx);
2139                 req->rq_svc_ctx = NULL;
2140                 break;
2141         }
2142
2143         RETURN(rc);
2144 }
2145
2146 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
2147 {
2148         struct gss_svc_reqctx  *grctx;
2149         ENTRY;
2150
2151         if (svc_ctx == NULL) {
2152                 EXIT;
2153                 return;
2154         }
2155
2156         grctx = gss_svc_ctx2reqctx(svc_ctx);
2157
2158         CWARN("gss svc invalidate ctx %p(%u)\n",
2159               grctx->src_ctx, grctx->src_ctx->gsc_uid);
2160         gss_svc_upcall_destroy_ctx(grctx->src_ctx);
2161
2162         EXIT;
2163 }
2164
2165 static inline
2166 int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
2167 {
2168         if (gss_svc_reqctx_is_special(grctx))
2169                 return grctx->src_reserve_len;
2170
2171         return gss_estimate_payload(NULL, msgsize, privacy);
2172 }
2173
2174 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2175 {
2176         struct gss_svc_reqctx       *grctx;
2177         struct ptlrpc_reply_state   *rs;
2178         struct ptlrpc_bulk_sec_desc *bsd;
2179         int                          privacy;
2180         int                          ibuflens[2], ibufcnt = 0;
2181         int                          buflens[4], bufcnt;
2182         int                          txtsize, wmsg_size, rs_size;
2183         ENTRY;
2184
2185         LASSERT(msglen % 8 == 0);
2186
2187         if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) &&
2188             !req->rq_bulk_read && !req->rq_bulk_write) {
2189                 CERROR("client request bulk sec on non-bulk rpc\n");
2190                 RETURN(-EPROTO);
2191         }
2192
2193         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2194         if (gss_svc_reqctx_is_special(grctx))
2195                 privacy = 0;
2196         else
2197                 privacy = (SEC_FLAVOR_SVC(req->rq_sec_flavor) ==
2198                            SPTLRPC_SVC_PRIV);
2199
2200         if (privacy) {
2201                 /* Inner buffer */
2202                 ibufcnt = 1;
2203                 ibuflens[0] = msglen;
2204
2205                 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2206                         LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
2207                         bsd = lustre_msg_buf(req->rq_reqbuf,
2208                                              req->rq_reqbuf->lm_bufcount - 1,
2209                                              sizeof(*bsd));
2210
2211                         ibuflens[ibufcnt++] = bulk_sec_desc_size(
2212                                                         bsd->bsd_csum_alg, 0,
2213                                                         req->rq_bulk_read);
2214                 }
2215
2216                 txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
2217                 txtsize += GSS_MAX_CIPHER_BLOCK;
2218
2219                 /* wrapper buffer */
2220                 bufcnt = 3;
2221                 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2222                 buflens[1] = gss_svc_payload(grctx, buflens[0], 0);
2223                 buflens[2] = gss_svc_payload(grctx, txtsize, 1);
2224         } else {
2225                 bufcnt = 2;
2226                 buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2227                 buflens[1] = msglen;
2228                 txtsize = buflens[0] + buflens[1];
2229
2230                 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
2231                         LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
2232                         bsd = lustre_msg_buf(req->rq_reqbuf,
2233                                              req->rq_reqbuf->lm_bufcount - 2,
2234                                              sizeof(*bsd));
2235
2236                         buflens[bufcnt] = bulk_sec_desc_size(
2237                                                         bsd->bsd_csum_alg, 0,
2238                                                         req->rq_bulk_read);
2239                         txtsize += buflens[bufcnt];
2240                         bufcnt++;
2241                 }
2242                 buflens[bufcnt++] = gss_svc_payload(grctx, txtsize, 0);
2243         }
2244
2245         wmsg_size = lustre_msg_size_v2(bufcnt, buflens);
2246
2247         rs_size = sizeof(*rs) + wmsg_size;
2248         rs = req->rq_reply_state;
2249
2250         if (rs) {
2251                 /* pre-allocated */
2252                 LASSERT(rs->rs_size >= rs_size);
2253         } else {
2254                 OBD_ALLOC(rs, rs_size);
2255                 if (rs == NULL)
2256                         RETURN(-ENOMEM);
2257
2258                 rs->rs_size = rs_size;
2259         }
2260
2261         rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
2262         rs->rs_repbuf_len = wmsg_size;
2263
2264         if (privacy) {
2265                 lustre_init_msg_v2(rs->rs_repbuf, ibufcnt, ibuflens, NULL);
2266                 rs->rs_msg = lustre_msg_buf(rs->rs_repbuf, 0, msglen);
2267         } else {
2268                 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
2269                 rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
2270
2271                 rs->rs_msg = (struct lustre_msg *)
2272                                 lustre_msg_buf(rs->rs_repbuf, 1, 0);
2273         }
2274
2275         gss_svc_reqctx_addref(grctx);
2276         rs->rs_svc_ctx = req->rq_svc_ctx;
2277
2278         LASSERT(rs->rs_msg);
2279         req->rq_reply_state = rs;
2280         RETURN(0);
2281 }
2282
2283 static
2284 int gss_svc_seal(struct ptlrpc_request *req,
2285                  struct ptlrpc_reply_state *rs,
2286                  struct gss_svc_reqctx *grctx)
2287 {
2288         struct gss_svc_ctx      *gctx = grctx->src_ctx;
2289         rawobj_t                 msgobj, cipher_obj, micobj;
2290         struct gss_header       *ghdr;
2291         __u8                    *cipher_buf;
2292         int                      cipher_buflen, buflens[3];
2293         int                      msglen, rc;
2294         __u32                    major;
2295         ENTRY;
2296
2297         /* embedded lustre_msg might have been shrinked */
2298         if (req->rq_replen != rs->rs_repbuf->lm_buflens[0])
2299                 lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1);
2300
2301         /* clear data length */
2302         msglen = lustre_msg_size_v2(rs->rs_repbuf->lm_bufcount,
2303                                     rs->rs_repbuf->lm_buflens);
2304
2305         /* clear text */
2306         msgobj.len = msglen;
2307         msgobj.data = (__u8 *) rs->rs_repbuf;
2308
2309         /* allocate temporary cipher buffer */
2310         cipher_buflen = gss_estimate_payload(gctx->gsc_mechctx, msglen, 1);
2311         OBD_ALLOC(cipher_buf, cipher_buflen);
2312         if (!cipher_buf)
2313                 RETURN(-ENOMEM);
2314
2315         cipher_obj.len = cipher_buflen;
2316         cipher_obj.data = cipher_buf;
2317
2318         major = lgss_wrap(gctx->gsc_mechctx, &msgobj, rs->rs_repbuf_len,
2319                           &cipher_obj);
2320         if (major != GSS_S_COMPLETE) {
2321                 CERROR("priv: wrap message error: %08x\n", major);
2322                 GOTO(out_free, rc = -EPERM);
2323         }
2324         LASSERT(cipher_obj.len <= cipher_buflen);
2325
2326         /* now the real wire data */
2327         buflens[0] = PTLRPC_GSS_HEADER_SIZE;
2328         buflens[1] = gss_estimate_payload(gctx->gsc_mechctx, buflens[0], 0);
2329         buflens[2] = cipher_obj.len;
2330
2331         LASSERT(lustre_msg_size_v2(3, buflens) <= rs->rs_repbuf_len);
2332         lustre_init_msg_v2(rs->rs_repbuf, 3, buflens, NULL);
2333         rs->rs_repbuf->lm_secflvr = req->rq_sec_flavor;
2334
2335         /* gss header */
2336         ghdr = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2337         ghdr->gh_version = PTLRPC_GSS_VERSION;
2338         ghdr->gh_flags = 0;
2339         ghdr->gh_proc = PTLRPC_GSS_PROC_DATA;
2340         ghdr->gh_seq = grctx->src_wirectx.gw_seq;
2341         ghdr->gh_svc = PTLRPC_GSS_SVC_PRIVACY;
2342         ghdr->gh_handle.len = 0;
2343
2344         /* header signature */
2345         msgobj.len = rs->rs_repbuf->lm_buflens[0];
2346         msgobj.data = lustre_msg_buf(rs->rs_repbuf, 0, 0);
2347         micobj.len = rs->rs_repbuf->lm_buflens[1];
2348         micobj.data = lustre_msg_buf(rs->rs_repbuf, 1, 0);
2349
2350         major = lgss_get_mic(gctx->gsc_mechctx, 1, &msgobj, &micobj);
2351         if (major != GSS_S_COMPLETE) {
2352                 CERROR("priv: sign message error: %08x\n", major);
2353                 GOTO(out_free, rc = -EPERM);
2354         }
2355         lustre_shrink_msg(rs->rs_repbuf, 1, micobj.len, 0);
2356
2357         /* cipher token */
2358         memcpy(lustre_msg_buf(rs->rs_repbuf, 2, 0),
2359                cipher_obj.data, cipher_obj.len);
2360
2361         rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
2362                                                cipher_obj.len, 0);
2363
2364         /* to catch upper layer's further access */
2365         rs->rs_msg = NULL;
2366         req->rq_repmsg = NULL;
2367         req->rq_replen = 0;
2368
2369         rc = 0;
2370 out_free:
2371         OBD_FREE(cipher_buf, cipher_buflen);
2372         RETURN(rc);
2373 }
2374
2375 int gss_svc_authorize(struct ptlrpc_request *req)
2376 {
2377         struct ptlrpc_reply_state *rs = req->rq_reply_state;
2378         struct gss_svc_reqctx     *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
2379         struct gss_wire_ctx       *gw;
2380         int                        rc;
2381         ENTRY;
2382
2383         if (gss_svc_reqctx_is_special(grctx))
2384                 RETURN(0);
2385
2386         gw = &grctx->src_wirectx;
2387         if (gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
2388             gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
2389                 CERROR("proc %d not support\n", gw->gw_proc);
2390                 RETURN(-EINVAL);
2391         }
2392
2393         LASSERT(grctx->src_ctx);
2394
2395         switch (gw->gw_svc) {
2396         case  PTLRPC_GSS_SVC_INTEGRITY:
2397                 rc = gss_svc_sign(req, rs, grctx);
2398                 break;
2399         case  PTLRPC_GSS_SVC_PRIVACY:
2400                 rc = gss_svc_seal(req, rs, grctx);
2401                 break;
2402         default:
2403                 CERROR("Unknown service %d\n", gw->gw_svc);
2404                 GOTO(out, rc = -EINVAL);
2405         }
2406         rc = 0;
2407
2408 out:
2409         RETURN(rc);
2410 }
2411
2412 void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
2413 {
2414         struct gss_svc_reqctx *grctx;
2415
2416         LASSERT(rs->rs_svc_ctx);
2417         grctx = container_of(rs->rs_svc_ctx, struct gss_svc_reqctx, src_base);
2418
2419         gss_svc_reqctx_decref(grctx);
2420         rs->rs_svc_ctx = NULL;
2421
2422         if (!rs->rs_prealloc)
2423                 OBD_FREE(rs, rs->rs_size);
2424 }
2425
2426 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx)
2427 {
2428         LASSERT(atomic_read(&ctx->sc_refcount) == 0);
2429         gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx));
2430 }
2431
2432 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
2433                          struct ptlrpc_svc_ctx *svc_ctx)
2434 {
2435         struct gss_cli_ctx     *cli_gctx = ctx2gctx(cli_ctx);
2436         struct gss_svc_reqctx  *grctx;
2437         struct gss_ctx         *mechctx = NULL;
2438
2439         cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA;
2440         cli_gctx->gc_win = GSS_SEQ_WIN;
2441         atomic_set(&cli_gctx->gc_seq, 0);
2442
2443         grctx = container_of(svc_ctx, struct gss_svc_reqctx, src_base);
2444         LASSERT(grctx->src_ctx);
2445         LASSERT(grctx->src_ctx->gsc_mechctx);
2446
2447         if (lgss_copy_reverse_context(grctx->src_ctx->gsc_mechctx, &mechctx) !=
2448             GSS_S_COMPLETE) {
2449                 CERROR("failed to copy mech context\n");
2450                 return -ENOMEM;
2451         }
2452
2453         if (rawobj_dup(&cli_gctx->gc_handle, &grctx->src_ctx->gsc_rvs_hdl)) {
2454                 CERROR("failed to dup reverse handle\n");
2455                 lgss_delete_sec_context(&mechctx);
2456                 return -ENOMEM;
2457         }
2458
2459         cli_gctx->gc_mechctx = mechctx;
2460         gss_cli_ctx_uptodate(cli_gctx);
2461
2462         return 0;
2463 }
2464
2465 int __init sptlrpc_gss_init(void)
2466 {
2467         int rc;
2468
2469         rc = gss_init_lproc();
2470         if (rc)
2471                 return rc;
2472
2473         rc = gss_init_cli_upcall();
2474         if (rc)
2475                 goto out_lproc;
2476
2477         rc = gss_init_svc_upcall();
2478         if (rc)
2479                 goto out_cli_upcall;
2480
2481         rc = init_kerberos_module();
2482         if (rc)
2483                 goto out_svc_upcall;
2484
2485         /*
2486          * register policy after all other stuff be intialized, because it
2487          * might be in used immediately after the registration.
2488          */
2489
2490         rc = gss_init_keyring();
2491         if (rc)
2492                 goto out_kerberos;
2493
2494 #ifdef HAVE_GSS_PIPEFS
2495         rc = gss_init_pipefs();
2496         if (rc)
2497                 goto out_keyring;
2498 #endif
2499
2500         return 0;
2501
2502 #ifdef HAVE_GSS_PIPEFS
2503 out_keyring:
2504         gss_exit_keyring();
2505 #endif
2506
2507 out_kerberos:
2508         cleanup_kerberos_module();
2509 out_svc_upcall:
2510         gss_exit_svc_upcall();
2511 out_cli_upcall:
2512         gss_exit_cli_upcall();
2513 out_lproc:
2514         gss_exit_lproc();
2515         return rc;
2516 }
2517
2518 static void __exit sptlrpc_gss_exit(void)
2519 {
2520         gss_exit_keyring();
2521 #ifdef HAVE_GSS_PIPEFS
2522         gss_exit_pipefs();
2523 #endif
2524         cleanup_kerberos_module();
2525         gss_exit_svc_upcall();
2526         gss_exit_cli_upcall();
2527         gss_exit_lproc();
2528 }
2529
2530 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2531 MODULE_DESCRIPTION("GSS security policy for Lustre");
2532 MODULE_LICENSE("GPL");
2533
2534 module_init(sptlrpc_gss_init);
2535 module_exit(sptlrpc_gss_exit);