Whamcloud - gitweb
03fd0ce2a428dce27c59de0868929c2fc51e0598
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/gss/gss_bulk.c
37  *
38  * Author: Eric Mei <eric.mei@sun.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 # define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45 #ifdef __KERNEL__
46 #include <linux/init.h>
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/dcache.h>
50 #include <linux/fs.h>
51 #include <linux/random.h>
52 #include <linux/mutex.h>
53 #include <linux/crypto.h>
54 #else
55 #include <liblustre.h>
56 #endif
57
58 #include <obd.h>
59 #include <obd_class.h>
60 #include <obd_support.h>
61 #include <lustre/lustre_idl.h>
62 #include <lustre_net.h>
63 #include <lustre_import.h>
64 #include <lustre_sec.h>
65
66 #include "gss_err.h"
67 #include "gss_internal.h"
68 #include "gss_api.h"
69
70 static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
71
72 static void buf_to_sl(struct scatterlist *sl,
73                       void *buf, unsigned int len)
74 {
75         sl->page = virt_to_page(buf);
76         sl->offset = offset_in_page(buf);
77         sl->length = len;
78 }
79
80 /*
81  * CTS CBC encryption:
82  * 1. X(n-1) = P(n-1)
83  * 2. E(n-1) = Encrypt(K, X(n-1))
84  * 3. C(n)   = HEAD(E(n-1))
85  * 4. P      = P(n) | 0
86  * 5. D(n)   = E(n-1) XOR P
87  * 6. C(n-1) = Encrypt(K, D(n))
88  *
89  * CTS encryption using standard CBC interface:
90  * 1. pad the last partial block with 0.
91  * 2. do CBC encryption.
92  * 3. swap the last two ciphertext blocks.
93  * 4. truncate to original plaintext size.
94  */
95 static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
96                            struct scatterlist      *sld,
97                            struct scatterlist      *sls)
98 {
99         struct scatterlist      slst, sldt;
100         struct blkcipher_desc   desc;
101         void                   *data;
102         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
103         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
104         unsigned int            blksize, blks, tail;
105         int                     rc;
106
107         blksize = ll_crypto_blkcipher_blocksize(tfm);
108         blks = sls->length / blksize;
109         tail = sls->length % blksize;
110         LASSERT(blks > 0 && tail > 0);
111
112         /* pad tail block with 0, copy to sbuf */
113         data = cfs_kmap(sls->page);
114         memcpy(sbuf, data + sls->offset + blks * blksize, tail);
115         memset(sbuf + tail, 0, blksize - tail);
116         cfs_kunmap(sls->page);
117
118         buf_to_sl(&slst, sbuf, blksize);
119         buf_to_sl(&sldt, dbuf, blksize);
120         desc.tfm   = tfm;
121         desc.flags = 0;
122
123         /* encrypt head */
124         rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
125         if (unlikely(rc)) {
126                 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
127                 return rc;
128         }
129         /* encrypt tail */
130         rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
131         if (unlikely(rc)) {
132                 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
133                 return rc;
134         }
135
136         /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
137         data = cfs_kmap(sld->page);
138
139         memcpy(data + sld->offset + blks * blksize,
140                data + sld->offset + (blks - 1) * blksize, tail);
141         memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
142         cfs_kunmap(sld->page);
143
144         return 0;
145 }
146
147 /*
148  * CTS CBC decryption:
149  * 1. D(n)   = Decrypt(K, C(n-1))
150  * 2. C      = C(n) | 0
151  * 3. X(n)   = D(n) XOR C
152  * 4. P(n)   = HEAD(X(n))
153  * 5. E(n-1) = C(n) | TAIL(X(n))
154  * 6. X(n-1) = Decrypt(K, E(n-1))
155  * 7. P(n-1) = X(n-1) XOR C(n-2)
156  *
157  * CTS decryption using standard CBC interface:
158  * 1. D(n)   = Decrypt(K, C(n-1))
159  * 2. C(n)   = C(n) | TAIL(D(n))
160  * 3. swap the last two ciphertext blocks.
161  * 4. do CBC decryption.
162  * 5. truncate to original ciphertext size.
163  */
164 static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
165                            struct scatterlist *sld,
166                            struct scatterlist *sls)
167 {
168         struct blkcipher_desc   desc;
169         struct scatterlist      slst, sldt;
170         void                   *data;
171         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
172         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
173         unsigned int            blksize, blks, tail;
174         int                     rc;
175
176         blksize = ll_crypto_blkcipher_blocksize(tfm);
177         blks = sls->length / blksize;
178         tail = sls->length % blksize;
179         LASSERT(blks > 0 && tail > 0);
180
181         /* save current IV, and set IV to zero */
182         ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
183         ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
184
185         /* D(n) = Decrypt(K, C(n-1)) */
186         slst = *sls;
187         slst.offset += (blks - 1) * blksize;
188         slst.length = blksize;
189
190         buf_to_sl(&sldt, dbuf, blksize);
191         desc.tfm   = tfm;
192         desc.flags = 0;
193
194         rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
195         if (unlikely(rc)) {
196                 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
197                 return rc;
198         }
199
200         /* restore IV */
201         ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
202
203         data = cfs_kmap(sls->page);
204         /* C(n) = C(n) | TAIL(D(n)) */
205         memcpy(dbuf, data + sls->offset + blks * blksize, tail);
206         /* swab C(n) and C(n-1) */
207         memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
208         memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
209         cfs_kunmap(sls->page);
210
211         /* do cbc decrypt */
212         buf_to_sl(&slst, sbuf, blksize);
213         buf_to_sl(&sldt, dbuf, blksize);
214
215         /* decrypt head */
216         rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
217         if (unlikely(rc)) {
218                 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
219                 return rc;
220         }
221         /* decrypt tail */
222         rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
223         if (unlikely(rc)) {
224                 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
225                 return rc;
226         }
227
228         /* truncate to original ciphertext size */
229         data = cfs_kmap(sld->page);
230         memcpy(data + sld->offset + blks * blksize, dbuf, tail);
231         cfs_kunmap(sld->page);
232
233         return 0;
234 }
235
236 static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
237                              int encrypt,
238                              struct scatterlist *sld,
239                              struct scatterlist *sls)
240 {
241 #ifndef HAVE_ASYNC_BLOCK_CIPHER
242         LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
243 #endif
244
245         if (encrypt)
246                 return cbc_cts_encrypt(tfm, sld, sls);
247         else
248                 return cbc_cts_decrypt(tfm, sld, sls);
249 }
250
251 /*
252  * normal encrypt/decrypt of data of even blocksize
253  */
254 static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
255                                 int encrypt,
256                                 struct scatterlist *sld,
257                                 struct scatterlist *sls)
258 {
259         struct blkcipher_desc desc;
260         desc.tfm   = tfm;
261         desc.flags = 0;
262         if (encrypt)
263                 return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
264         else
265                 return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
266 }
267
268 static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
269 {
270         const struct sptlrpc_ciph_type *ct;
271         struct ll_crypto_cipher        *tfm;
272         int                             rc;
273
274         /* using ARC4, the only stream cipher in linux for now */
275         ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
276         LASSERT(ct);
277
278         tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
279         if (tfm == NULL) {
280                 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
281                 return NULL;
282         }
283         LASSERT(ll_crypto_blkcipher_blocksize(tfm));
284
285         if (keylen > ct->sct_keysize)
286                 keylen = ct->sct_keysize;
287
288         LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
289         LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
290
291         rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
292         if (rc) {
293                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
294                 ll_crypto_free_blkcipher(tfm);
295                 return NULL;
296         }
297
298         return tfm;
299 }
300
301 static int do_bulk_privacy(struct gss_ctx *gctx,
302                            struct ptlrpc_bulk_desc *desc,
303                            int encrypt, __u32 alg,
304                            struct ptlrpc_bulk_sec_desc *bsd)
305 {
306         const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
307         struct ll_crypto_cipher  *tfm;
308         struct ll_crypto_cipher  *stfm = NULL; /* backup stream cipher */
309         struct scatterlist        sls, sld, *sldp;
310         unsigned int              blksize, keygen_size;
311         int                       i, rc;
312         __u8                      key[CIPHER_MAX_KEYSIZE];
313
314         LASSERT(ct);
315
316         if (encrypt)
317                 bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
318
319         if (alg == BULK_CIPH_ALG_NULL)
320                 return 0;
321
322         if (desc->bd_iov_count <= 0) {
323                 if (encrypt)
324                         bsd->bsd_ciph_alg = alg;
325                 return 0;
326         }
327
328         tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
329         if (tfm == NULL) {
330                 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
331                 return -ENOMEM;
332         }
333         blksize = ll_crypto_blkcipher_blocksize(tfm);
334
335         LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
336         LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
337         LASSERT(ct->sct_ivsize == 0 ||
338                 ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
339         LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
340         LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
341
342         /* generate ramdom key seed and compute the secret key based on it.
343          * note determined by algorithm which lgss_plain_encrypt use, it
344          * might require the key size be its (blocksize * n). so here for
345          * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
346         keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
347                       ~(CIPHER_MAX_BLKSIZE - 1);
348         if (encrypt) {
349                 get_random_bytes(bsd->bsd_key, ct->sct_keysize);
350                 if (ct->sct_keysize < keygen_size)
351                         memset(bsd->bsd_key + ct->sct_keysize, 0,
352                                keygen_size - ct->sct_keysize);
353         }
354
355         rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
356         if (rc) {
357                 CERROR("failed to compute secret key: %d\n", rc);
358                 goto out;
359         }
360
361         rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
362         if (rc) {
363                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
364                 goto out;
365         }
366
367         /* stream cipher doesn't need iv */
368         if (blksize > 1)
369                 ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
370
371         for (i = 0; i < desc->bd_iov_count; i++) {
372                 sls.page = desc->bd_iov[i].kiov_page;
373                 sls.offset = desc->bd_iov[i].kiov_offset;
374                 sls.length = desc->bd_iov[i].kiov_len;
375
376                 if (unlikely(sls.length == 0)) {
377                         CWARN("page %d with 0 length data?\n", i);
378                         continue;
379                 }
380
381                 if (unlikely(sls.offset % blksize)) {
382                         CERROR("page %d with odd offset %u, TFM %s\n",
383                                i, sls.offset, ct->sct_name);
384                         rc = -EINVAL;
385                         goto out;
386                 }
387
388                 if (desc->bd_enc_pages) {
389                         sld.page = desc->bd_enc_pages[i];
390                         sld.offset = desc->bd_iov[i].kiov_offset;
391                         sld.length = desc->bd_iov[i].kiov_len;
392
393                         sldp = &sld;
394                 } else {
395                         sldp = &sls;
396                 }
397
398                 if (likely(sls.length % blksize == 0)) {
399                         /* data length is n * blocksize, do the normal tfm */
400                         rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
401                 } else if (sls.length < blksize) {
402                         /* odd data length, and smaller than 1 block, CTS
403                          * doesn't work in this case because it requires
404                          * transfer a modified IV to peer. here we use a
405                          * "backup" stream cipher to do the tfm */
406                         if (stfm == NULL) {
407                                 stfm = get_stream_cipher(key, ct->sct_keysize);
408                                 if (tfm == NULL) {
409                                         rc = -ENOMEM;
410                                         goto out;
411                                 }
412                         }
413                         rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
414                 } else {
415                         /* odd data length but > 1 block, do CTS tfm */
416                         rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
417                 }
418
419                 if (unlikely(rc)) {
420                         CERROR("error %s page %d/%d: %d\n",
421                                encrypt ? "encrypt" : "decrypt",
422                                i + 1, desc->bd_iov_count, rc);
423                         goto out;
424                 }
425
426                 if (desc->bd_enc_pages)
427                         desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
428         }
429
430         if (encrypt)
431                 bsd->bsd_ciph_alg = alg;
432
433 out:
434         if (stfm)
435                 ll_crypto_free_blkcipher(stfm);
436
437         ll_crypto_free_blkcipher(tfm);
438         return rc;
439 }
440
441 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
442                           struct ptlrpc_request *req,
443                           struct ptlrpc_bulk_desc *desc)
444 {
445         struct gss_cli_ctx              *gctx;
446         struct lustre_msg               *msg;
447         struct ptlrpc_bulk_sec_desc     *bsdr;
448         int                              offset, rc;
449         ENTRY;
450
451         LASSERT(req->rq_pack_bulk);
452         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
453
454         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
455         case SPTLRPC_SVC_NULL:
456                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
457                 msg = req->rq_reqbuf;
458                 offset = msg->lm_bufcount - 1;
459                 break;
460         case SPTLRPC_SVC_AUTH:
461         case SPTLRPC_SVC_INTG:
462                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
463                 msg = req->rq_reqbuf;
464                 offset = msg->lm_bufcount - 2;
465                 break;
466         case SPTLRPC_SVC_PRIV:
467                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
468                 msg = req->rq_clrbuf;
469                 offset = msg->lm_bufcount - 1;
470                 break;
471         default:
472                 LBUG();
473         }
474
475         /* make checksum */
476         rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
477                                    req->rq_flvr.sf_bulk_hash, msg, offset);
478         if (rc) {
479                 CERROR("client bulk %s: failed to generate checksum: %d\n",
480                        req->rq_bulk_read ? "read" : "write", rc);
481                 RETURN(rc);
482         }
483
484         if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
485                 RETURN(0);
486
487         /* previous bulk_csum_cli_request() has verified bsdr is good */
488         bsdr = lustre_msg_buf(msg, offset, 0);
489
490         if (req->rq_bulk_read) {
491                 bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
492                 RETURN(0);
493         }
494
495         /* it turn out to be bulk write */
496         rc = sptlrpc_enc_pool_get_pages(desc);
497         if (rc) {
498                 CERROR("bulk write: failed to allocate encryption pages\n");
499                 RETURN(rc);
500         }
501
502         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
503         LASSERT(gctx->gc_mechctx);
504
505         rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
506                              req->rq_flvr.sf_bulk_ciph, bsdr);
507         if (rc)
508                 CERROR("bulk write: client failed to encrypt pages\n");
509
510         RETURN(rc);
511 }
512
513 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
514                             struct ptlrpc_request *req,
515                             struct ptlrpc_bulk_desc *desc)
516 {
517         struct gss_cli_ctx              *gctx;
518         struct lustre_msg               *rmsg, *vmsg;
519         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
520         int                              roff, voff, rc;
521         ENTRY;
522
523         LASSERT(req->rq_pack_bulk);
524         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
525
526         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
527         case SPTLRPC_SVC_NULL:
528                 vmsg = req->rq_repdata;
529                 voff = vmsg->lm_bufcount - 1;
530                 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
531
532                 rmsg = req->rq_reqbuf;
533                 roff = rmsg->lm_bufcount - 1; /* last segment */
534                 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
535                 break;
536         case SPTLRPC_SVC_AUTH:
537         case SPTLRPC_SVC_INTG:
538                 vmsg = req->rq_repdata;
539                 voff = vmsg->lm_bufcount - 2;
540                 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
541
542                 rmsg = req->rq_reqbuf;
543                 roff = rmsg->lm_bufcount - 2; /* second last segment */
544                 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
545                 break;
546         case SPTLRPC_SVC_PRIV:
547                 vmsg = req->rq_repdata;
548                 voff = vmsg->lm_bufcount - 1;
549                 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
550
551                 rmsg = req->rq_clrbuf;
552                 roff = rmsg->lm_bufcount - 1; /* last segment */
553                 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
554                 break;
555         default:
556                 LBUG();
557         }
558
559         if (req->rq_bulk_read) {
560                 bsdr = lustre_msg_buf(rmsg, roff, 0);
561                 if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
562                         goto verify_csum;
563
564                 bsdv = lustre_msg_buf(vmsg, voff, 0);
565                 if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
566                         CERROR("bulk read: cipher algorithm mismatch: client "
567                                "request %s but server reply with %s. try to "
568                                "use the new one for decryption\n",
569                                sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
570                                sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
571                 }
572
573                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
574                 LASSERT(gctx->gc_mechctx);
575
576                 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
577                                      bsdv->bsd_ciph_alg, bsdv);
578                 if (rc) {
579                         CERROR("bulk read: client failed to decrypt data\n");
580                         RETURN(rc);
581                 }
582         }
583
584 verify_csum:
585         rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
586                                  rmsg, roff, vmsg, voff);
587         RETURN(rc);
588 }
589
590 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
591                         struct ptlrpc_bulk_desc *desc)
592 {
593         struct gss_svc_reqctx        *grctx;
594         int                           rc;
595         ENTRY;
596
597         LASSERT(req->rq_svc_ctx);
598         LASSERT(req->rq_pack_bulk);
599         LASSERT(req->rq_bulk_write);
600
601         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
602
603         LASSERT(grctx->src_reqbsd);
604         LASSERT(grctx->src_repbsd);
605         LASSERT(grctx->src_ctx);
606         LASSERT(grctx->src_ctx->gsc_mechctx);
607
608         /* decrypt bulk data if it's encrypted */
609         if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
610                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
611                                      grctx->src_reqbsd->bsd_ciph_alg,
612                                      grctx->src_reqbsd);
613                 if (rc) {
614                         CERROR("bulk write: server failed to decrypt data\n");
615                         RETURN(rc);
616                 }
617         }
618
619         /* verify bulk data checksum */
620         rc = bulk_csum_svc(desc, req->rq_bulk_read,
621                            grctx->src_reqbsd, grctx->src_reqbsd_size,
622                            grctx->src_repbsd, grctx->src_repbsd_size);
623
624         RETURN(rc);
625 }
626
627 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
628                       struct ptlrpc_bulk_desc *desc)
629 {
630         struct gss_svc_reqctx        *grctx;
631         int                           rc;
632         ENTRY;
633
634         LASSERT(req->rq_svc_ctx);
635         LASSERT(req->rq_pack_bulk);
636         LASSERT(req->rq_bulk_read);
637
638         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
639
640         LASSERT(grctx->src_reqbsd);
641         LASSERT(grctx->src_repbsd);
642         LASSERT(grctx->src_ctx);
643         LASSERT(grctx->src_ctx->gsc_mechctx);
644
645         /* generate bulk data checksum */
646         rc = bulk_csum_svc(desc, req->rq_bulk_read,
647                            grctx->src_reqbsd, grctx->src_reqbsd_size,
648                            grctx->src_repbsd, grctx->src_repbsd_size);
649         if (rc)
650                 RETURN(rc);
651
652         /* encrypt bulk data if required */
653         if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
654                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
655                                      grctx->src_reqbsd->bsd_ciph_alg,
656                                      grctx->src_repbsd);
657                 if (rc)
658                         CERROR("bulk read: server failed to encrypt data: "
659                                "rc %d\n", rc);
660         }
661
662         RETURN(rc);
663 }