1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # define EXPORT_SYMTAB
25 #define DEBUG_SUBSYSTEM S_SEC
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/dcache.h>
32 #include <linux/random.h>
33 #include <linux/crypto.h>
35 #include <liblustre.h>
39 #include <obd_class.h>
40 #include <obd_support.h>
41 #include <lustre/lustre_idl.h>
42 #include <lustre_net.h>
43 #include <lustre_import.h>
44 #include <lustre_sec.h>
47 #include "gss_internal.h"
51 int do_bulk_privacy(struct gss_ctx *gctx,
52 struct ptlrpc_bulk_desc *desc,
53 int encrypt, __u32 alg,
54 struct ptlrpc_bulk_sec_desc *bsd)
56 struct crypto_tfm *tfm;
57 struct scatterlist sg, sg2, *sgd;
59 __u8 local_iv[sizeof(bsd->bsd_iv)];
61 LASSERT(alg < BULK_PRIV_ALG_MAX);
64 bsd->bsd_priv_alg = BULK_PRIV_ALG_NULL;
66 if (alg == BULK_PRIV_ALG_NULL)
70 get_random_bytes(bsd->bsd_iv, sizeof(bsd->bsd_iv));
72 /* compute the secret iv */
73 lgss_plain_encrypt(gctx, sizeof(local_iv), bsd->bsd_iv, local_iv);
75 tfm = crypto_alloc_tfm(priv_types[alg].name, priv_types[alg].flags);
77 CERROR("Failed to allocate TFM %s\n", priv_types[alg].name);
81 rc = crypto_cipher_setkey(tfm, local_iv, sizeof(local_iv));
83 CERROR("Failed to set key for TFM %s: %d\n",
84 priv_types[alg].name, rc);
89 for (i = 0; i < desc->bd_iov_count; i++) {
90 sg.page = desc->bd_iov[i].kiov_page;
91 sg.offset = desc->bd_iov[i].kiov_offset;
92 sg.length = desc->bd_iov[i].kiov_len;
94 if (desc->bd_enc_iov) {
95 sg2.page = desc->bd_enc_iov[i].kiov_page;
96 sg2.offset = desc->bd_enc_iov[i].kiov_offset;
97 sg2.length = desc->bd_enc_iov[i].kiov_len;
104 rc = crypto_cipher_encrypt(tfm, sgd, &sg, sg.length);
106 rc = crypto_cipher_decrypt(tfm, sgd, &sg, sg.length);
110 /* although the procedure might be lengthy, the crypto functions
111 * internally called cond_resched() from time to time.
115 crypto_free_tfm(tfm);
118 bsd->bsd_priv_alg = alg;
123 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
124 struct ptlrpc_request *req,
125 struct ptlrpc_bulk_desc *desc)
127 struct gss_cli_ctx *gctx;
128 struct lustre_msg *msg;
129 struct ptlrpc_bulk_sec_desc *bsdr;
130 struct sec_flavor_config *conf;
134 LASSERT(SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor));
135 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
137 switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
138 case SPTLRPC_SVC_AUTH:
139 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
140 msg = req->rq_reqbuf;
141 offset = msg->lm_bufcount - 2;
143 case SPTLRPC_SVC_PRIV:
144 LASSERT(req->rq_clrbuf->lm_bufcount >= 2);
145 msg = req->rq_clrbuf;
146 offset = msg->lm_bufcount - 1;
153 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
154 rc = bulk_csum_cli_request(desc, req->rq_bulk_read, conf->sfc_bulk_csum,
157 CERROR("client bulk %s: failed to generate checksum: %d\n",
158 req->rq_bulk_read ? "read" : "write", rc);
162 if (conf->sfc_bulk_priv == BULK_PRIV_ALG_NULL)
165 /* previous bulk_csum_cli_request() has verified bsdr is good */
166 bsdr = lustre_msg_buf(msg, offset, 0);
168 if (req->rq_bulk_read) {
169 bsdr->bsd_priv_alg = conf->sfc_bulk_priv;
173 /* it turn out to be bulk write */
174 rc = sptlrpc_enc_pool_get_pages(desc);
176 CERROR("bulk write: failed to allocate encryption pages\n");
180 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
181 LASSERT(gctx->gc_mechctx);
183 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
184 conf->sfc_bulk_priv, bsdr);
186 CERROR("bulk write: client failed to encrypt pages\n");
191 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
192 struct ptlrpc_request *req,
193 struct ptlrpc_bulk_desc *desc)
195 struct gss_cli_ctx *gctx;
196 struct lustre_msg *rmsg, *vmsg;
197 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
201 LASSERT(SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor));
202 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
204 switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
205 case SPTLRPC_SVC_AUTH:
206 vmsg = req->rq_repbuf;
207 voff = vmsg->lm_bufcount - 2;
208 LASSERT(vmsg && vmsg->lm_bufcount >= 4);
210 rmsg = req->rq_reqbuf;
211 roff = rmsg->lm_bufcount - 2; /* second last segment */
212 LASSERT(rmsg && rmsg->lm_bufcount >= 4);
214 case SPTLRPC_SVC_PRIV:
215 vmsg = req->rq_repbuf;
216 voff = vmsg->lm_bufcount - 1;
217 LASSERT(vmsg && vmsg->lm_bufcount >= 2);
219 rmsg = req->rq_clrbuf;
220 roff = rmsg->lm_bufcount - 1; /* last segment */
221 LASSERT(rmsg && rmsg->lm_bufcount >= 2);
227 if (req->rq_bulk_read) {
228 bsdr = lustre_msg_buf(rmsg, roff, 0);
229 if (bsdr->bsd_priv_alg == BULK_PRIV_ALG_NULL)
232 bsdv = lustre_msg_buf(vmsg, voff, 0);
233 if (bsdr->bsd_priv_alg != bsdv->bsd_priv_alg) {
234 CERROR("bulk read: cipher algorithm mismatch: client "
235 "request %s but server reply with %s. try to "
236 "use the new one for decryption\n",
237 sptlrpc_bulk_priv_alg2name(bsdr->bsd_priv_alg),
238 sptlrpc_bulk_priv_alg2name(bsdv->bsd_priv_alg));
241 gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
242 LASSERT(gctx->gc_mechctx);
244 rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
245 bsdv->bsd_priv_alg, bsdv);
247 CERROR("bulk read: client failed to decrypt data\n");
253 rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
254 rmsg, roff, vmsg, voff);
258 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
259 struct ptlrpc_bulk_desc *desc)
261 struct ptlrpc_reply_state *rs = req->rq_reply_state;
262 struct gss_svc_reqctx *grctx;
263 struct ptlrpc_bulk_sec_desc *bsdv;
268 LASSERT(req->rq_bulk_write);
270 if (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV) {
271 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
272 LASSERT(rs->rs_repbuf->lm_bufcount >= 2);
273 voff = req->rq_reqbuf->lm_bufcount - 1;
274 roff = rs->rs_repbuf->lm_bufcount - 1;
276 LASSERT(req->rq_reqbuf->lm_bufcount >= 4);
277 LASSERT(rs->rs_repbuf->lm_bufcount >= 4);
278 voff = req->rq_reqbuf->lm_bufcount - 2;
279 roff = rs->rs_repbuf->lm_bufcount - 2;
282 bsdv = lustre_msg_buf(req->rq_reqbuf, voff, sizeof(*bsdv));
283 if (bsdv->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
284 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
285 LASSERT(grctx->src_ctx);
286 LASSERT(grctx->src_ctx->gsc_mechctx);
288 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
289 bsdv->bsd_priv_alg, bsdv);
291 CERROR("bulk write: server failed to decrypt data\n");
296 rc = bulk_csum_svc(desc, req->rq_bulk_read,
297 req->rq_reqbuf, voff, rs->rs_repbuf, roff);
302 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
303 struct ptlrpc_bulk_desc *desc)
305 struct ptlrpc_reply_state *rs = req->rq_reply_state;
306 struct gss_svc_reqctx *grctx;
307 struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
312 LASSERT(req->rq_bulk_read);
314 if (SEC_FLAVOR_SVC(req->rq_sec_flavor) == SPTLRPC_SVC_PRIV) {
315 voff = req->rq_reqbuf->lm_bufcount - 1;
316 roff = rs->rs_repbuf->lm_bufcount - 1;
318 voff = req->rq_reqbuf->lm_bufcount - 2;
319 roff = rs->rs_repbuf->lm_bufcount - 2;
322 rc = bulk_csum_svc(desc, req->rq_bulk_read,
323 req->rq_reqbuf, voff, rs->rs_repbuf, roff);
327 bsdv = lustre_msg_buf(req->rq_reqbuf, voff, sizeof(*bsdv));
328 if (bsdv->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
329 grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
330 LASSERT(grctx->src_ctx);
331 LASSERT(grctx->src_ctx->gsc_mechctx);
333 bsdr = lustre_msg_buf(rs->rs_repbuf, roff, sizeof(*bsdr));
335 rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
336 bsdv->bsd_priv_alg, bsdr);
338 CERROR("bulk read: server failed to encrypt data\n");