Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / ptlrpc / gss / gss_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2008 Sun Microsystems. Inc.
5  *   Author: Eric Mei <eric.mei@sun.com>
6  * Copyright (C) 2006,2007 Cluster File Systems, Inc.
7  *   Author: Eric Mei <ericm@clusterfs.com>
8  *
9  *   This file is part of Lustre, http://www.lustre.org.
10  *
11  *   Lustre is free software; you can redistribute it and/or
12  *   modify it under the terms of version 2 of the GNU General Public
13  *   License as published by the Free Software Foundation.
14  *
15  *   Lustre is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *   GNU General Public License for more details.
19  *
20  *   You should have received a copy of the GNU General Public License
21  *   along with Lustre; if not, write to the Free Software
22  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25 #ifndef EXPORT_SYMTAB
26 # define EXPORT_SYMTAB
27 #endif
28 #define DEBUG_SUBSYSTEM S_SEC
29 #ifdef __KERNEL__
30 #include <linux/init.h>
31 #include <linux/module.h>
32 #include <linux/slab.h>
33 #include <linux/dcache.h>
34 #include <linux/fs.h>
35 #include <linux/random.h>
36 #include <linux/mutex.h>
37 #include <linux/crypto.h>
38 #else
39 #include <liblustre.h>
40 #endif
41
42 #include <obd.h>
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre/lustre_idl.h>
46 #include <lustre_net.h>
47 #include <lustre_import.h>
48 #include <lustre_sec.h>
49
50 #include "gss_err.h"
51 #include "gss_internal.h"
52 #include "gss_api.h"
53
54 static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
55
56 static void buf_to_sl(struct scatterlist *sl,
57                       void *buf, unsigned int len)
58 {
59         sl->page = virt_to_page(buf);
60         sl->offset = offset_in_page(buf);
61         sl->length = len;
62 }
63
64 /*
65  * CTS CBC encryption:
66  * 1. X(n-1) = P(n-1)
67  * 2. E(n-1) = Encrypt(K, X(n-1))
68  * 3. C(n)   = HEAD(E(n-1))
69  * 4. P      = P(n) | 0
70  * 5. D(n)   = E(n-1) XOR P
71  * 6. C(n-1) = Encrypt(K, D(n))
72  *
73  * CTS encryption using standard CBC interface:
74  * 1. pad the last partial block with 0.
75  * 2. do CBC encryption.
76  * 3. swap the last two ciphertext blocks.
77  * 4. truncate to original plaintext size.
78  */
79 static int cbc_cts_encrypt(struct crypto_tfm *tfm,
80                            struct scatterlist *sld,
81                            struct scatterlist *sls)
82 {
83         struct scatterlist      slst, sldt;
84         void                   *data;
85         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
86         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
87         unsigned int            blksize, blks, tail;
88         int                     rc;
89
90         blksize = crypto_tfm_alg_blocksize(tfm);
91         blks = sls->length / blksize;
92         tail = sls->length % blksize;
93         LASSERT(blks > 0 && tail > 0);
94
95         /* pad tail block with 0, copy to sbuf */
96         data = cfs_kmap(sls->page);
97         memcpy(sbuf, data + sls->offset + blks * blksize, tail);
98         memset(sbuf + tail, 0, blksize - tail);
99         cfs_kunmap(sls->page);
100
101         buf_to_sl(&slst, sbuf, blksize);
102         buf_to_sl(&sldt, dbuf, blksize);
103
104         /* encrypt head */
105         rc = crypto_cipher_encrypt(tfm, sld, sls, sls->length - tail);
106         if (unlikely(rc)) {
107                 CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
108                 return rc;
109         }
110         /* encrypt tail */
111         rc = crypto_cipher_encrypt(tfm, &sldt, &slst, blksize);
112         if (unlikely(rc)) {
113                 CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
114                 return rc;
115         }
116
117         /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
118         data = cfs_kmap(sld->page);
119
120         memcpy(data + sld->offset + blks * blksize,
121                data + sld->offset + (blks - 1) * blksize, tail);
122         memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
123         cfs_kunmap(sld->page);
124
125         return 0;
126 }
127
128 /*
129  * CTS CBC decryption:
130  * 1. D(n)   = Decrypt(K, C(n-1))
131  * 2. C      = C(n) | 0
132  * 3. X(n)   = D(n) XOR C
133  * 4. P(n)   = HEAD(X(n))
134  * 5. E(n-1) = C(n) | TAIL(X(n))
135  * 6. X(n-1) = Decrypt(K, E(n-1))
136  * 7. P(n-1) = X(n-1) XOR C(n-2)
137  *
138  * CTS decryption using standard CBC interface:
139  * 1. D(n)   = Decrypt(K, C(n-1))
140  * 2. C(n)   = C(n) | TAIL(D(n))
141  * 3. swap the last two ciphertext blocks.
142  * 4. do CBC decryption.
143  * 5. truncate to original ciphertext size.
144  */
145 static int cbc_cts_decrypt(struct crypto_tfm *tfm,
146                            struct scatterlist *sld,
147                            struct scatterlist *sls)
148 {
149         struct scatterlist      slst, sldt;
150         void                   *data;
151         __u8                    sbuf[CIPHER_MAX_BLKSIZE];
152         __u8                    dbuf[CIPHER_MAX_BLKSIZE];
153         unsigned int            blksize, blks, tail;
154         int                     rc;
155
156         blksize = crypto_tfm_alg_blocksize(tfm);
157         blks = sls->length / blksize;
158         tail = sls->length % blksize;
159         LASSERT(blks > 0 && tail > 0);
160
161         /* save current IV, and set IV to zero */
162         crypto_cipher_get_iv(tfm, sbuf, blksize);
163         crypto_cipher_set_iv(tfm, zero_iv, blksize);
164
165         /* D(n) = Decrypt(K, C(n-1)) */
166         slst = *sls;
167         slst.offset += (blks - 1) * blksize;
168         slst.length = blksize;
169
170         buf_to_sl(&sldt, dbuf, blksize);
171
172         rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
173         if (unlikely(rc)) {
174                 CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
175                 return rc;
176         }
177
178         /* restore IV */
179         crypto_cipher_set_iv(tfm, sbuf, blksize);
180
181         data = cfs_kmap(sls->page);
182         /* C(n) = C(n) | TAIL(D(n)) */
183         memcpy(dbuf, data + sls->offset + blks * blksize, tail);
184         /* swab C(n) and C(n-1) */
185         memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
186         memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
187         cfs_kunmap(sls->page);
188
189         /* do cbc decrypt */
190         buf_to_sl(&slst, sbuf, blksize);
191         buf_to_sl(&sldt, dbuf, blksize);
192
193         /* decrypt head */
194         rc = crypto_cipher_decrypt(tfm, sld, sls, sls->length - tail);
195         if (unlikely(rc)) {
196                 CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
197                 return rc;
198         }
199         /* decrypt tail */
200         rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
201         if (unlikely(rc)) {
202                 CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
203                 return rc;
204         }
205
206         /* truncate to original ciphertext size */
207         data = cfs_kmap(sld->page);
208         memcpy(data + sld->offset + blks * blksize, dbuf, tail);
209         cfs_kunmap(sld->page);
210
211         return 0;
212 }
213
214 static inline int do_cts_tfm(struct crypto_tfm *tfm,
215                              int encrypt,
216                              struct scatterlist *sld,
217                              struct scatterlist *sls)
218 {
219         LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
220
221         if (encrypt)
222                 return cbc_cts_encrypt(tfm, sld, sls);
223         else
224                 return cbc_cts_decrypt(tfm, sld, sls);
225 }
226
227 /*
228  * normal encrypt/decrypt of data of even blocksize
229  */
230 static inline int do_cipher_tfm(struct crypto_tfm *tfm,
231                                 int encrypt,
232                                 struct scatterlist *sld,
233                                 struct scatterlist *sls)
234 {
235         if (encrypt)
236                 return crypto_cipher_encrypt(tfm, sld, sls, sls->length);
237         else
238                 return crypto_cipher_decrypt(tfm, sld, sls, sls->length);
239 }
240
241 static struct crypto_tfm *get_stream_cipher(__u8 *key, unsigned int keylen)
242 {
243         const struct sptlrpc_ciph_type *ct;
244         struct crypto_tfm              *tfm;
245         int                             rc;
246
247         /* using ARC4, the only stream cipher in linux for now */
248         ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
249         LASSERT(ct);
250
251         tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
252         if (tfm == NULL) {
253                 CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
254                 return NULL;
255         }
256         LASSERT(crypto_tfm_alg_blocksize(tfm));
257
258         if (keylen > ct->sct_keysize)
259                 keylen = ct->sct_keysize;
260
261         LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
262         LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
263
264         rc = crypto_cipher_setkey(tfm, key, keylen);
265         if (rc) {
266                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
267                 crypto_free_tfm(tfm);
268                 return NULL;
269         }
270
271         return tfm;
272 }
273
274 static int do_bulk_privacy(struct gss_ctx *gctx,
275                            struct ptlrpc_bulk_desc *desc,
276                            int encrypt, __u32 alg,
277                            struct ptlrpc_bulk_sec_desc *bsd)
278 {
279         const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
280         struct crypto_tfm  *tfm;
281         struct crypto_tfm  *stfm = NULL; /* backup stream cipher */
282         struct scatterlist  sls, sld, *sldp;
283         unsigned int        blksize, keygen_size;
284         int                 i, rc;
285         __u8                key[CIPHER_MAX_KEYSIZE];
286
287         LASSERT(ct);
288
289         if (encrypt)
290                 bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
291
292         if (alg == BULK_CIPH_ALG_NULL)
293                 return 0;
294
295         if (desc->bd_iov_count <= 0) {
296                 if (encrypt)
297                         bsd->bsd_ciph_alg = alg;
298                 return 0;
299         }
300
301         tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
302         if (tfm == NULL) {
303                 CERROR("Failed to allocate TFM %s\n", ct->sct_name);
304                 return -ENOMEM;
305         }
306         blksize = crypto_tfm_alg_blocksize(tfm);
307
308         LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
309         LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
310         LASSERT(ct->sct_ivsize == 0 ||
311                 crypto_tfm_alg_ivsize(tfm) == ct->sct_ivsize);
312         LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
313         LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
314
315         /* generate ramdom key seed and compute the secret key based on it.
316          * note determined by algorithm which lgss_plain_encrypt use, it
317          * might require the key size be its (blocksize * n). so here for
318          * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
319         keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
320                       ~(CIPHER_MAX_BLKSIZE - 1);
321         if (encrypt) {
322                 get_random_bytes(bsd->bsd_key, ct->sct_keysize);
323                 if (ct->sct_keysize < keygen_size)
324                         memset(bsd->bsd_key + ct->sct_keysize, 0,
325                                keygen_size - ct->sct_keysize);
326         }
327
328         rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
329         if (rc) {
330                 CERROR("failed to compute secret key: %d\n", rc);
331                 goto out;
332         }
333
334         rc = crypto_cipher_setkey(tfm, key, ct->sct_keysize);
335         if (rc) {
336                 CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
337                 goto out;
338         }
339
340         /* stream cipher doesn't need iv */
341         if (blksize > 1)
342                 crypto_cipher_set_iv(tfm, zero_iv, blksize);
343
344         for (i = 0; i < desc->bd_iov_count; i++) {
345                 sls.page = desc->bd_iov[i].kiov_page;
346                 sls.offset = desc->bd_iov[i].kiov_offset;
347                 sls.length = desc->bd_iov[i].kiov_len;
348
349                 if (unlikely(sls.length == 0)) {
350                         CWARN("page %d with 0 length data?\n", i);
351                         continue;
352                 }
353
354                 if (unlikely(sls.offset % blksize)) {
355                         CERROR("page %d with odd offset %u, TFM %s\n",
356                                i, sls.offset, ct->sct_name);
357                         rc = -EINVAL;
358                         goto out;
359                 }
360
361                 if (desc->bd_enc_pages) {
362                         sld.page = desc->bd_enc_pages[i];
363                         sld.offset = desc->bd_iov[i].kiov_offset;
364                         sld.length = desc->bd_iov[i].kiov_len;
365
366                         sldp = &sld;
367                 } else {
368                         sldp = &sls;
369                 }
370
371                 if (likely(sls.length % blksize == 0)) {
372                         /* data length is n * blocksize, do the normal tfm */
373                         rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
374                 } else if (sls.length < blksize) {
375                         /* odd data length, and smaller than 1 block, CTS
376                          * doesn't work in this case because it requires
377                          * transfer a modified IV to peer. here we use a
378                          * "backup" stream cipher to do the tfm */
379                         if (stfm == NULL) {
380                                 stfm = get_stream_cipher(key, ct->sct_keysize);
381                                 if (tfm == NULL) {
382                                         rc = -ENOMEM;
383                                         goto out;
384                                 }
385                         }
386                         rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
387                 } else {
388                         /* odd data length but > 1 block, do CTS tfm */
389                         rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
390                 }
391
392                 if (unlikely(rc)) {
393                         CERROR("error %s page %d/%d: %d\n",
394                                encrypt ? "encrypt" : "decrypt",
395                                i + 1, desc->bd_iov_count, rc);
396                         goto out;
397                 }
398
399                 if (desc->bd_enc_pages)
400                         desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
401         }
402
403         if (encrypt)
404                 bsd->bsd_ciph_alg = alg;
405
406 out:
407         if (stfm)
408                 crypto_free_tfm(stfm);
409
410         crypto_free_tfm(tfm);
411         return rc;
412 }
413
414 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
415                           struct ptlrpc_request *req,
416                           struct ptlrpc_bulk_desc *desc)
417 {
418         struct gss_cli_ctx              *gctx;
419         struct lustre_msg               *msg;
420         struct ptlrpc_bulk_sec_desc     *bsdr;
421         int                              offset, rc;
422         ENTRY;
423
424         LASSERT(req->rq_pack_bulk);
425         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
426
427         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
428         case SPTLRPC_SVC_NULL:
429                 LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
430                 msg = req->rq_reqbuf;
431                 offset = msg->lm_bufcount - 1;
432                 break;
433         case SPTLRPC_SVC_AUTH:
434         case SPTLRPC_SVC_INTG:
435                 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
436                 msg = req->rq_reqbuf;
437                 offset = msg->lm_bufcount - 2;
438                 break;
439         case SPTLRPC_SVC_PRIV:
440                 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
441                 msg = req->rq_clrbuf;
442                 offset = msg->lm_bufcount - 1;
443                 break;
444         default:
445                 LBUG();
446         }
447
448         /* make checksum */
449         rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
450                                    req->rq_flvr.sf_bulk_hash, msg, offset);
451         if (rc) {
452                 CERROR("client bulk %s: failed to generate checksum: %d\n",
453                        req->rq_bulk_read ? "read" : "write", rc);
454                 RETURN(rc);
455         }
456
457         if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
458                 RETURN(0);
459
460         /* previous bulk_csum_cli_request() has verified bsdr is good */
461         bsdr = lustre_msg_buf(msg, offset, 0);
462
463         if (req->rq_bulk_read) {
464                 bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
465                 RETURN(0);
466         }
467
468         /* it turn out to be bulk write */
469         rc = sptlrpc_enc_pool_get_pages(desc);
470         if (rc) {
471                 CERROR("bulk write: failed to allocate encryption pages\n");
472                 RETURN(rc);
473         }
474
475         gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
476         LASSERT(gctx->gc_mechctx);
477
478         rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
479                              req->rq_flvr.sf_bulk_ciph, bsdr);
480         if (rc)
481                 CERROR("bulk write: client failed to encrypt pages\n");
482
483         RETURN(rc);
484 }
485
486 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
487                             struct ptlrpc_request *req,
488                             struct ptlrpc_bulk_desc *desc)
489 {
490         struct gss_cli_ctx              *gctx;
491         struct lustre_msg               *rmsg, *vmsg;
492         struct ptlrpc_bulk_sec_desc     *bsdr, *bsdv;
493         int                              roff, voff, rc;
494         ENTRY;
495
496         LASSERT(req->rq_pack_bulk);
497         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
498
499         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
500         case SPTLRPC_SVC_NULL:
501                 vmsg = req->rq_repbuf;
502                 voff = vmsg->lm_bufcount - 1;
503                 LASSERT(vmsg && vmsg->lm_bufcount >= 3);
504
505                 rmsg = req->rq_reqbuf;
506                 roff = rmsg->lm_bufcount - 1; /* last segment */
507                 LASSERT(rmsg && rmsg->lm_bufcount >= 3);
508                 break;
509         case SPTLRPC_SVC_AUTH:
510         case SPTLRPC_SVC_INTG:
511                 vmsg = req->rq_repbuf;
512                 voff = vmsg->lm_bufcount - 2;
513                 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
514
515                 rmsg = req->rq_reqbuf;
516                 roff = rmsg->lm_bufcount - 2; /* second last segment */
517                 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
518                 break;
519         case SPTLRPC_SVC_PRIV:
520                 vmsg = req->rq_repbuf;
521                 voff = vmsg->lm_bufcount - 1;
522                 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
523
524                 rmsg = req->rq_clrbuf;
525                 roff = rmsg->lm_bufcount - 1; /* last segment */
526                 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
527                 break;
528         default:
529                 LBUG();
530         }
531
532         if (req->rq_bulk_read) {
533                 bsdr = lustre_msg_buf(rmsg, roff, 0);
534                 if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
535                         goto verify_csum;
536
537                 bsdv = lustre_msg_buf(vmsg, voff, 0);
538                 if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
539                         CERROR("bulk read: cipher algorithm mismatch: client "
540                                "request %s but server reply with %s. try to "
541                                "use the new one for decryption\n",
542                                sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
543                                sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
544                 }
545
546                 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
547                 LASSERT(gctx->gc_mechctx);
548
549                 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
550                                      bsdv->bsd_ciph_alg, bsdv);
551                 if (rc) {
552                         CERROR("bulk read: client failed to decrypt data\n");
553                         RETURN(rc);
554                 }
555         }
556
557 verify_csum:
558         rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
559                                  rmsg, roff, vmsg, voff);
560         RETURN(rc);
561 }
562
563 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
564                         struct ptlrpc_bulk_desc *desc)
565 {
566         struct gss_svc_reqctx        *grctx;
567         int                           rc;
568         ENTRY;
569
570         LASSERT(req->rq_svc_ctx);
571         LASSERT(req->rq_pack_bulk);
572         LASSERT(req->rq_bulk_write);
573
574         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
575
576         LASSERT(grctx->src_reqbsd);
577         LASSERT(grctx->src_repbsd);
578         LASSERT(grctx->src_ctx);
579         LASSERT(grctx->src_ctx->gsc_mechctx);
580
581         /* decrypt bulk data if it's encrypted */
582         if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
583                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
584                                      grctx->src_reqbsd->bsd_ciph_alg,
585                                      grctx->src_reqbsd);
586                 if (rc) {
587                         CERROR("bulk write: server failed to decrypt data\n");
588                         RETURN(rc);
589                 }
590         }
591
592         /* verify bulk data checksum */
593         rc = bulk_csum_svc(desc, req->rq_bulk_read,
594                            grctx->src_reqbsd, grctx->src_reqbsd_size,
595                            grctx->src_repbsd, grctx->src_repbsd_size);
596
597         RETURN(rc);
598 }
599
600 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
601                       struct ptlrpc_bulk_desc *desc)
602 {
603         struct gss_svc_reqctx        *grctx;
604         int                           rc;
605         ENTRY;
606
607         LASSERT(req->rq_svc_ctx);
608         LASSERT(req->rq_pack_bulk);
609         LASSERT(req->rq_bulk_read);
610
611         grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
612
613         LASSERT(grctx->src_reqbsd);
614         LASSERT(grctx->src_repbsd);
615         LASSERT(grctx->src_ctx);
616         LASSERT(grctx->src_ctx->gsc_mechctx);
617
618         /* generate bulk data checksum */
619         rc = bulk_csum_svc(desc, req->rq_bulk_read,
620                            grctx->src_reqbsd, grctx->src_reqbsd_size,
621                            grctx->src_repbsd, grctx->src_repbsd_size);
622         if (rc)
623                 RETURN(rc);
624
625         /* encrypt bulk data if required */
626         if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
627                 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
628                                      grctx->src_reqbsd->bsd_ciph_alg,
629                                      grctx->src_repbsd);
630                 if (rc)
631                         CERROR("bulk read: server failed to encrypt data: "
632                                "rc %d\n", rc);
633         }
634
635         RETURN(rc);
636 }
637