Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2008 Sun Microsystems. Inc.
5  *   Author: Eric Mei <eric.mei@sun.com>
6  * Copyright (C) 2006,2007 Cluster File Systems, Inc.
7  *   Author: Eric Mei <ericm@clusterfs.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25 #ifndef EXPORT_SYMTAB
26 # define EXPORT_SYMTAB
27 #endif
28 #define DEBUG_SUBSYSTEM S_SEC
29 #ifdef __KERNEL__
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/dcache.h>
34 #include <linux/fs.h>
35 #include <linux/random.h>
36 #include <linux/mutex.h>
37 #include <linux/crypto.h>
38 #else
39 #include <liblustre.h>
40 #endif
41
42 #include <obd.h>
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_net.h>
47 #include <lustre_import.h>
48 #include <lustre_sec.h>
49
50 #include "gss_err.h"
51 #include "gss_internal.h"
52 #include "gss_api.h"
53
54 static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
55
56 static void buf_to_sl(struct scatterlist *sl,
57                       void *buf, unsigned int len)
58 {
59         sl->page = virt_to_page(buf);
60         sl->offset = offset_in_page(buf);
61         sl->length = len;
62 }
63
64 /*
65  * CTS CBC encryption:
66  * 1. X(n-1) = P(n-1)
67  * 2. E(n-1) = Encrypt(K, X(n-1))
68  * 3. C(n)   = HEAD(E(n-1))
69  * 4. P      = P(n) | 0
70  * 5. D(n)   = E(n-1) XOR P
71  * 6. C(n-1) = Encrypt(K, D(n))
72  *
73  * CTS encryption using standard CBC interface:
74  * 1. pad the last partial block with 0.
75  * 2. do CBC encryption.
76  * 3. swap the last two ciphertext blocks.
77  * 4. truncate to original plaintext size.
78  */
79 static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
80                            struct scatterlist      *sld,
81                            struct scatterlist      *sls)
82 {
83         struct scatterlist      slst, sldt;
84         struct blkcipher_desc   desc;
85         void                   *data;
86         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
87         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
88         unsigned int            blksize, blks, tail;
89         int                     rc;
90
91         blksize = ll_crypto_blkcipher_blocksize(tfm);
92         blks = sls->length / blksize;
93         tail = sls->length % blksize;
94         LASSERT(blks > 0 && tail > 0);
95
96         /* pad tail block with 0, copy to sbuf */
97         data = cfs_kmap(sls->page);
98         memcpy(sbuf, data + sls->offset + blks * blksize, tail);
99         memset(sbuf + tail, 0, blksize - tail);
100         cfs_kunmap(sls->page);
101
102         buf_to_sl(&slst, sbuf, blksize);
103         buf_to_sl(&sldt, dbuf, blksize);
104         desc.tfm   = tfm;
105         desc.flags = 0;
106
107         /* encrypt head */
108         rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
109         if (unlikely(rc)) {
110                 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
111                 return rc;
112         }
113         /* encrypt tail */
114         rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
115         if (unlikely(rc)) {
116                 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
117                 return rc;
118         }
119
120         /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
121         data = cfs_kmap(sld->page);
122
123         memcpy(data + sld->offset + blks * blksize,
124                data + sld->offset + (blks - 1) * blksize, tail);
125         memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
126         cfs_kunmap(sld->page);
127
128         return 0;
129 }
130
131 /*
132  * CTS CBC decryption:
133  * 1. D(n)   = Decrypt(K, C(n-1))
134  * 2. C      = C(n) | 0
135  * 3. X(n)   = D(n) XOR C
136  * 4. P(n)   = HEAD(X(n))
137  * 5. E(n-1) = C(n) | TAIL(X(n))
138  * 6. X(n-1) = Decrypt(K, E(n-1))
139  * 7. P(n-1) = X(n-1) XOR C(n-2)
140  *
141  * CTS decryption using standard CBC interface:
142  * 1. D(n)   = Decrypt(K, C(n-1))
143  * 2. C(n)   = C(n) | TAIL(D(n))
144  * 3. swap the last two ciphertext blocks.
145  * 4. do CBC decryption.
146  * 5. truncate to original ciphertext size.
147  */
148 static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
149                            struct scatterlist *sld,
150                            struct scatterlist *sls)
151 {
152         struct blkcipher_desc   desc;
153         struct scatterlist      slst, sldt;
154         void                   *data;
155         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
156         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
157         unsigned int            blksize, blks, tail;
158         int                     rc;
159
160         blksize = ll_crypto_blkcipher_blocksize(tfm);
161         blks = sls->length / blksize;
162         tail = sls->length % blksize;
163         LASSERT(blks > 0 && tail > 0);
164
165         /* save current IV, and set IV to zero */
166         ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
167         ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
168
169         /* D(n) = Decrypt(K, C(n-1)) */
170         slst = *sls;
171         slst.offset += (blks - 1) * blksize;
172         slst.length = blksize;
173
174         buf_to_sl(&sldt, dbuf, blksize);
175         desc.tfm   = tfm;
176         desc.flags = 0;
177
178         rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
179         if (unlikely(rc)) {
180                 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
181                 return rc;
182         }
183
184         /* restore IV */
185         ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
186
187         data = cfs_kmap(sls->page);
188         /* C(n) = C(n) | TAIL(D(n)) */
189         memcpy(dbuf, data + sls->offset + blks * blksize, tail);
190         /* swab C(n) and C(n-1) */
191         memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
192         memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
193         cfs_kunmap(sls->page);
194
195         /* do cbc decrypt */
196         buf_to_sl(&slst, sbuf, blksize);
197         buf_to_sl(&sldt, dbuf, blksize);
198
199         /* decrypt head */
200         rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
201         if (unlikely(rc)) {
202                 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
203                 return rc;
204         }
205         /* decrypt tail */
206         rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
207         if (unlikely(rc)) {
208                 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
209                 return rc;
210         }
211
212         /* truncate to original ciphertext size */
213         data = cfs_kmap(sld->page);
214         memcpy(data + sld->offset + blks * blksize, dbuf, tail);
215         cfs_kunmap(sld->page);
216
217         return 0;
218 }
219
220 static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
221                              int encrypt,
222                              struct scatterlist *sld,
223                              struct scatterlist *sls)
224 {
225 #ifndef HAVE_ASYNC_BLOCK_CIPHER
226         LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
227 #endif
228
229         if (encrypt)
230                 return cbc_cts_encrypt(tfm, sld, sls);
231         else
232                 return cbc_cts_decrypt(tfm, sld, sls);
233 }
234
235 /*
236  * normal encrypt/decrypt of data of even blocksize
237  */
238 static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
239                                 int encrypt,
240                                 struct scatterlist *sld,
241                                 struct scatterlist *sls)
242 {
243         struct blkcipher_desc desc;
244         desc.tfm   = tfm;
245         desc.flags = 0;
246         if (encrypt)
247                 return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
248         else
249                 return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
250 }
251
252 static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
253 {
254         const struct sptlrpc_ciph_type *ct;
255         struct ll_crypto_cipher        *tfm;
256         int                             rc;
257
258         /* using ARC4, the only stream cipher in linux for now */
259         ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
260         LASSERT(ct);
261
262         tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
263         if (tfm == NULL) {
264                 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
265                 return NULL;
266         }
267         LASSERT(ll_crypto_blkcipher_blocksize(tfm));
268
269         if (keylen > ct->sct_keysize)
270                 keylen = ct->sct_keysize;
271
272         LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
273         LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
274
275         rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
276         if (rc) {
277                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
278                 ll_crypto_free_blkcipher(tfm);
279                 return NULL;
280         }
281
282         return tfm;
283 }
284
285 static int do_bulk_privacy(struct gss_ctx *gctx,
286                            struct ptlrpc_bulk_desc *desc,
287                            int encrypt, __u32 alg,
288                            struct ptlrpc_bulk_sec_desc *bsd)
289 {
290         const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
291         struct ll_crypto_cipher  *tfm;
292         struct ll_crypto_cipher  *stfm = NULL; /* backup stream cipher */
293         struct scatterlist        sls, sld, *sldp;
294         unsigned int              blksize, keygen_size;
295         int                       i, rc;
296         __u8                      key[CIPHER_MAX_KEYSIZE];
297
298         LASSERT(ct);
299
300         if (encrypt)
301                 bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
302
303         if (alg == BULK_CIPH_ALG_NULL)
304                 return 0;
305
306         if (desc->bd_iov_count <= 0) {
307                 if (encrypt)
308                         bsd->bsd_ciph_alg = alg;
309                 return 0;
310         }
311
312         tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
313         if (tfm == NULL) {
314                 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
315                 return -ENOMEM;
316         }
317         blksize = ll_crypto_blkcipher_blocksize(tfm);
318
319         LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
320         LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
321         LASSERT(ct->sct_ivsize == 0 ||
322                 ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
323         LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
324         LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
325
326         /* generate ramdom key seed and compute the secret key based on it.
327          * note determined by algorithm which lgss_plain_encrypt use, it
328          * might require the key size be its (blocksize * n). so here for
329          * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
330         keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
331                       ~(CIPHER_MAX_BLKSIZE - 1);
332         if (encrypt) {
333                 get_random_bytes(bsd->bsd_key, ct->sct_keysize);
334                 if (ct->sct_keysize < keygen_size)
335                         memset(bsd->bsd_key + ct->sct_keysize, 0,
336                                keygen_size - ct->sct_keysize);
337         }
338
339         rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
340         if (rc) {
341                 CERROR("failed to compute secret key: %d\n", rc);
342                 goto out;
343         }
344
345         rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
346         if (rc) {
347                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
348                 goto out;
349         }
350
351         /* stream cipher doesn't need iv */
352         if (blksize > 1)
353                 ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
354
355         for (i = 0; i < desc->bd_iov_count; i++) {
356                 sls.page = desc->bd_iov[i].kiov_page;
357                 sls.offset = desc->bd_iov[i].kiov_offset;
358                 sls.length = desc->bd_iov[i].kiov_len;
359
360                 if (unlikely(sls.length == 0)) {
361                         CWARN("page %d with 0 length data?\n", i);
362                         continue;
363                 }
364
365                 if (unlikely(sls.offset % blksize)) {
366                         CERROR("page %d with odd offset %u, TFM %s\n",
367                                i, sls.offset, ct->sct_name);
368                         rc = -EINVAL;
369                         goto out;
370                 }
371
372                 if (desc->bd_enc_pages) {
373                         sld.page = desc->bd_enc_pages[i];
374                         sld.offset = desc->bd_iov[i].kiov_offset;
375                         sld.length = desc->bd_iov[i].kiov_len;
376
377                         sldp = &sld;
378                 } else {
379                         sldp = &sls;
380                 }
381
382                 if (likely(sls.length % blksize == 0)) {
383                         /* data length is n * blocksize, do the normal tfm */
384                         rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
385                 } else if (sls.length < blksize) {
386                         /* odd data length, and smaller than 1 block, CTS
387                          * doesn't work in this case because it requires
388                          * transfer a modified IV to peer. here we use a
389                          * "backup" stream cipher to do the tfm */
390                         if (stfm == NULL) {
391                                 stfm = get_stream_cipher(key, ct->sct_keysize);
392                                 if (tfm == NULL) {
393                                         rc = -ENOMEM;
394                                         goto out;
395                                 }
396                         }
397                         rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
398                 } else {
399                         /* odd data length but > 1 block, do CTS tfm */
400                         rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
401                 }
402
403                 if (unlikely(rc)) {
404                         CERROR("error %s page %d/%d: %d\n",
405                                encrypt ? "encrypt" : "decrypt",
406                                i + 1, desc->bd_iov_count, rc);
407                         goto out;
408                 }
409
410                 if (desc->bd_enc_pages)
411                         desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
412         }
413
414         if (encrypt)
415                 bsd->bsd_ciph_alg = alg;
416
417 out:
418         if (stfm)
419                 ll_crypto_free_blkcipher(stfm);
420
421         ll_crypto_free_blkcipher(tfm);
422         return rc;
423 }
424
425 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
426                           struct ptlrpc_request *req,
427                           struct ptlrpc_bulk_desc *desc)
428 {
429         struct gss_cli_ctx              *gctx;
430         struct lustre_msg               *msg;
431         struct ptlrpc_bulk_sec_desc     *bsdr;
432         int                              offset, rc;
433         ENTRY;
434
435         LASSERT(req->rq_pack_bulk);
436         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
437
438         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
439         case SPTLRPC_SVC_NULL:
440                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
441                 msg = req->rq_reqbuf;
442                 offset = msg->lm_bufcount - 1;
443                 break;
444         case SPTLRPC_SVC_AUTH:
445         case SPTLRPC_SVC_INTG:
446                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
447                 msg = req->rq_reqbuf;
448                 offset = msg->lm_bufcount - 2;
449                 break;
450         case SPTLRPC_SVC_PRIV:
451                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
452                 msg = req->rq_clrbuf;
453                 offset = msg->lm_bufcount - 1;
454                 break;
455         default:
456                 LBUG();
457         }
458
459         /* make checksum */
460         rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
461                                    req->rq_flvr.sf_bulk_hash, msg, offset);
462         if (rc) {
463                 CERROR("client bulk %s: failed to generate checksum: %d\n",
464                        req->rq_bulk_read ? "read" : "write", rc);
465                 RETURN(rc);
466         }
467
468         if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
469                 RETURN(0);
470
471         /* previous bulk_csum_cli_request() has verified bsdr is good */
472         bsdr = lustre_msg_buf(msg, offset, 0);
473
474         if (req->rq_bulk_read) {
475                 bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
476                 RETURN(0);
477         }
478
479         /* it turn out to be bulk write */
480         rc = sptlrpc_enc_pool_get_pages(desc);
481         if (rc) {
482                 CERROR("bulk write: failed to allocate encryption pages\n");
483                 RETURN(rc);
484         }
485
486         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
487         LASSERT(gctx->gc_mechctx);
488
489         rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
490                              req->rq_flvr.sf_bulk_ciph, bsdr);
491         if (rc)
492                 CERROR("bulk write: client failed to encrypt pages\n");
493
494         RETURN(rc);
495 }
496
497 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
498                             struct ptlrpc_request *req,
499                             struct ptlrpc_bulk_desc *desc)
500 {
501         struct gss_cli_ctx              *gctx;
502         struct lustre_msg               *rmsg, *vmsg;
503         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
504         int                              roff, voff, rc;
505         ENTRY;
506
507         LASSERT(req->rq_pack_bulk);
508         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
509
510         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
511         case SPTLRPC_SVC_NULL:
512                 vmsg = req->rq_repbuf;
513                 voff = vmsg->lm_bufcount - 1;
514                 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
515
516                 rmsg = req->rq_reqbuf;
517                 roff = rmsg->lm_bufcount - 1; /* last segment */
518                 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
519                 break;
520         case SPTLRPC_SVC_AUTH:
521         case SPTLRPC_SVC_INTG:
522                 vmsg = req->rq_repbuf;
523                 voff = vmsg->lm_bufcount - 2;
524                 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
525
526                 rmsg = req->rq_reqbuf;
527                 roff = rmsg->lm_bufcount - 2; /* second last segment */
528                 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
529                 break;
530         case SPTLRPC_SVC_PRIV:
531                 vmsg = req->rq_repbuf;
532                 voff = vmsg->lm_bufcount - 1;
533                 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
534
535                 rmsg = req->rq_clrbuf;
536                 roff = rmsg->lm_bufcount - 1; /* last segment */
537                 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
538                 break;
539         default:
540                 LBUG();
541         }
542
543         if (req->rq_bulk_read) {
544                 bsdr = lustre_msg_buf(rmsg, roff, 0);
545                 if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
546                         goto verify_csum;
547
548                 bsdv = lustre_msg_buf(vmsg, voff, 0);
549                 if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
550                         CERROR("bulk read: cipher algorithm mismatch: client "
551                                "request %s but server reply with %s. try to "
552                                "use the new one for decryption\n",
553                                sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
554                                sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
555                 }
556
557                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
558                 LASSERT(gctx->gc_mechctx);
559
560                 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
561                                      bsdv->bsd_ciph_alg, bsdv);
562                 if (rc) {
563                         CERROR("bulk read: client failed to decrypt data\n");
564                         RETURN(rc);
565                 }
566         }
567
568 verify_csum:
569         rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
570                                  rmsg, roff, vmsg, voff);
571         RETURN(rc);
572 }
573
574 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
575                         struct ptlrpc_bulk_desc *desc)
576 {
577         struct gss_svc_reqctx        *grctx;
578         int                           rc;
579         ENTRY;
580
581         LASSERT(req->rq_svc_ctx);
582         LASSERT(req->rq_pack_bulk);
583         LASSERT(req->rq_bulk_write);
584
585         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
586
587         LASSERT(grctx->src_reqbsd);
588         LASSERT(grctx->src_repbsd);
589         LASSERT(grctx->src_ctx);
590         LASSERT(grctx->src_ctx->gsc_mechctx);
591
592         /* decrypt bulk data if it's encrypted */
593         if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
594                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
595                                      grctx->src_reqbsd->bsd_ciph_alg,
596                                      grctx->src_reqbsd);
597                 if (rc) {
598                         CERROR("bulk write: server failed to decrypt data\n");
599                         RETURN(rc);
600                 }
601         }
602
603         /* verify bulk data checksum */
604         rc = bulk_csum_svc(desc, req->rq_bulk_read,
605                            grctx->src_reqbsd, grctx->src_reqbsd_size,
606                            grctx->src_repbsd, grctx->src_repbsd_size);
607
608         RETURN(rc);
609 }
610
611 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
612                       struct ptlrpc_bulk_desc *desc)
613 {
614         struct gss_svc_reqctx        *grctx;
615         int                           rc;
616         ENTRY;
617
618         LASSERT(req->rq_svc_ctx);
619         LASSERT(req->rq_pack_bulk);
620         LASSERT(req->rq_bulk_read);
621
622         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
623
624         LASSERT(grctx->src_reqbsd);
625         LASSERT(grctx->src_repbsd);
626         LASSERT(grctx->src_ctx);
627         LASSERT(grctx->src_ctx->gsc_mechctx);
628
629         /* generate bulk data checksum */
630         rc = bulk_csum_svc(desc, req->rq_bulk_read,
631                            grctx->src_reqbsd, grctx->src_reqbsd_size,
632                            grctx->src_repbsd, grctx->src_repbsd_size);
633         if (rc)
634                 RETURN(rc);
635
636         /* encrypt bulk data if required */
637         if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
638                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
639                                      grctx->src_reqbsd->bsd_ciph_alg,
640                                      grctx->src_repbsd);
641                 if (rc)
642                         CERROR("bulk read: server failed to encrypt data: "
643                                "rc %d\n", rc);
644         }
645
646         RETURN(rc);
647 }
648