1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
37 static struct ptlrpc_sec_policy plain_policy;
38 static struct ptlrpc_ctx_ops plain_ctx_ops;
39 static struct ptlrpc_sec plain_sec;
40 static struct ptlrpc_cli_ctx plain_cli_ctx;
41 static struct ptlrpc_svc_ctx plain_svc_ctx;
43 /****************************************
45 ****************************************/
48 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
50 /* should never reach here */
56 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
58 struct lustre_msg_v2 *msg = req->rq_reqbuf;
61 msg->lm_secflvr = req->rq_sec_flavor;
62 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
68 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
70 struct lustre_msg *msg = req->rq_repbuf;
73 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
74 if (msg->lm_bufcount != 2) {
75 CERROR("Protocol error: invalid buf count %d\n",
80 if (bulk_sec_desc_unpack(msg, 1)) {
81 CERROR("Mal-formed bulk checksum reply\n");
86 req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
87 req->rq_replen = msg->lm_buflens[0];
92 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
93 struct ptlrpc_request *req,
94 struct ptlrpc_bulk_desc *desc)
96 struct sec_flavor_config *conf;
98 LASSERT(req->rq_import);
99 LASSERT(SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor));
100 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
102 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
103 return bulk_csum_cli_request(desc, req->rq_bulk_read,
106 req->rq_reqbuf->lm_bufcount - 1);
110 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
111 struct ptlrpc_request *req,
112 struct ptlrpc_bulk_desc *desc)
114 LASSERT(SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor));
115 LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
116 LASSERT(req->rq_repbuf->lm_bufcount >= 2);
118 return bulk_csum_cli_reply(desc, req->rq_bulk_read,
120 req->rq_reqbuf->lm_bufcount - 1,
122 req->rq_repbuf->lm_bufcount - 1);
125 /****************************************
127 ****************************************/
130 struct ptlrpc_sec* plain_create_sec(struct obd_import *imp,
131 struct ptlrpc_svc_ctx *ctx,
136 LASSERT(SEC_FLAVOR_POLICY(flavor) == SPTLRPC_POLICY_PLAIN);
141 void plain_destroy_sec(struct ptlrpc_sec *sec)
144 LASSERT(sec == &plain_sec);
149 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
150 struct vfs_cred *vcred,
151 int create, int remove_dead)
154 atomic_inc(&plain_cli_ctx.cc_refcount);
155 RETURN(&plain_cli_ctx);
159 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
161 int grace, int force)
167 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
168 struct ptlrpc_request *req,
171 struct sec_flavor_config *conf;
172 int bufcnt = 1, buflens[2], alloc_len;
175 buflens[0] = msgsize;
177 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
178 buflens[bufcnt++] = sptlrpc_current_user_desc_size();
180 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
181 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
183 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
184 buflens[bufcnt++] = bulk_sec_desc_size(conf->sfc_bulk_csum, 1,
188 alloc_len = lustre_msg_size_v2(bufcnt, buflens);
190 if (!req->rq_reqbuf) {
191 LASSERT(!req->rq_pool);
193 alloc_len = size_roundup_power2(alloc_len);
194 OBD_ALLOC(req->rq_reqbuf, alloc_len);
198 req->rq_reqbuf_len = alloc_len;
200 LASSERT(req->rq_pool);
201 LASSERT(req->rq_reqbuf_len >= alloc_len);
202 memset(req->rq_reqbuf, 0, alloc_len);
205 lustre_init_msg_v2(req->rq_reqbuf, bufcnt, buflens, NULL);
206 req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
208 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor))
209 sptlrpc_pack_user_desc(req->rq_reqbuf, 1);
215 void plain_free_reqbuf(struct ptlrpc_sec *sec,
216 struct ptlrpc_request *req)
220 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
221 req->rq_reqbuf = NULL;
222 req->rq_reqbuf_len = 0;
228 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
229 struct ptlrpc_request *req,
232 struct sec_flavor_config *conf;
233 int bufcnt = 1, buflens[2], alloc_len;
236 buflens[0] = msgsize;
238 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
239 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
241 conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
242 buflens[bufcnt++] = bulk_sec_desc_size(conf->sfc_bulk_csum, 0,
246 alloc_len = lustre_msg_size_v2(bufcnt, buflens);
247 alloc_len = size_roundup_power2(alloc_len);
249 OBD_ALLOC(req->rq_repbuf, alloc_len);
253 req->rq_repbuf_len = alloc_len;
258 void plain_free_repbuf(struct ptlrpc_sec *sec,
259 struct ptlrpc_request *req)
262 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
263 req->rq_repbuf = NULL;
264 req->rq_repbuf_len = 0;
269 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
270 struct ptlrpc_request *req,
271 int segment, int newsize)
273 struct lustre_msg *newbuf;
275 int newmsg_size, newbuf_size;
278 /* embedded msg always at seg 0 */
279 LASSERT(req->rq_reqbuf);
280 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
281 LASSERT(lustre_msg_buf(req->rq_reqbuf, 0, 0) == req->rq_reqmsg);
283 /* compute new embedded msg size. */
284 oldsize = req->rq_reqmsg->lm_buflens[segment];
285 req->rq_reqmsg->lm_buflens[segment] = newsize;
286 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
287 req->rq_reqmsg->lm_buflens);
288 req->rq_reqmsg->lm_buflens[segment] = oldsize;
290 /* compute new wrapper msg size. */
291 oldsize = req->rq_reqbuf->lm_buflens[0];
292 req->rq_reqbuf->lm_buflens[0] = newmsg_size;
293 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
294 req->rq_reqbuf->lm_buflens);
295 req->rq_reqbuf->lm_buflens[0] = oldsize;
297 /* request from pool should always have enough buffer */
298 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
300 if (req->rq_reqbuf_len < newbuf_size) {
301 newbuf_size = size_roundup_power2(newbuf_size);
303 OBD_ALLOC(newbuf, newbuf_size);
307 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
309 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
310 req->rq_reqbuf = newbuf;
311 req->rq_reqbuf_len = newbuf_size;
312 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
315 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, 0, newmsg_size);
316 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
318 req->rq_reqlen = newmsg_size;
322 /****************************************
324 ****************************************/
326 static struct ptlrpc_svc_ctx plain_svc_ctx = {
327 .sc_refcount = ATOMIC_INIT(1),
328 .sc_policy = &plain_policy,
332 int plain_accept(struct ptlrpc_request *req)
334 struct lustre_msg *msg = req->rq_reqbuf;
338 LASSERT(SEC_FLAVOR_POLICY(req->rq_sec_flavor) == SPTLRPC_POLICY_PLAIN);
340 if (SEC_FLAVOR_RPC(req->rq_sec_flavor) != SPTLRPC_FLVR_PLAIN) {
341 CERROR("Invalid flavor 0x%x\n", req->rq_sec_flavor);
345 if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
346 if (msg->lm_bufcount < ++bufcnt) {
347 CERROR("Protocal error: too small buf count %d\n",
352 if (sptlrpc_unpack_user_desc(msg, bufcnt - 1)) {
353 CERROR("Mal-formed user descriptor\n");
357 req->rq_user_desc = lustre_msg_buf(msg, bufcnt - 1, 0);
360 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor)) {
361 if (msg->lm_bufcount != ++bufcnt) {
362 CERROR("Protocal error: invalid buf count %d\n",
367 if (bulk_sec_desc_unpack(msg, bufcnt - 1)) {
368 CERROR("Mal-formed bulk checksum request\n");
373 req->rq_reqmsg = lustre_msg_buf(msg, 0, 0);
374 req->rq_reqlen = msg->lm_buflens[0];
376 req->rq_svc_ctx = &plain_svc_ctx;
377 atomic_inc(&req->rq_svc_ctx->sc_refcount);
383 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
385 struct ptlrpc_reply_state *rs;
386 struct ptlrpc_bulk_sec_desc *bsd;
387 int bufcnt = 1, buflens[2];
388 int rs_size = sizeof(*rs);
391 LASSERT(msgsize % 8 == 0);
393 buflens[0] = msgsize;
394 if (SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor) &&
395 (req->rq_bulk_read || req->rq_bulk_write)) {
396 bsd = lustre_msg_buf(req->rq_reqbuf,
397 req->rq_reqbuf->lm_bufcount - 1,
401 buflens[bufcnt++] = bulk_sec_desc_size(bsd->bsd_csum_alg, 0,
404 rs_size += lustre_msg_size_v2(bufcnt, buflens);
406 rs = req->rq_reply_state;
410 LASSERT(rs->rs_size >= rs_size);
412 OBD_ALLOC(rs, rs_size);
416 rs->rs_size = rs_size;
419 rs->rs_svc_ctx = req->rq_svc_ctx;
420 atomic_inc(&req->rq_svc_ctx->sc_refcount);
421 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
422 rs->rs_repbuf_len = rs_size - sizeof(*rs);
424 lustre_init_msg_v2(rs->rs_repbuf, bufcnt, buflens, NULL);
425 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, 0, 0);
427 req->rq_reply_state = rs;
432 void plain_free_rs(struct ptlrpc_reply_state *rs)
436 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
437 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
439 if (!rs->rs_prealloc)
440 OBD_FREE(rs, rs->rs_size);
445 int plain_authorize(struct ptlrpc_request *req)
447 struct ptlrpc_reply_state *rs = req->rq_reply_state;
448 struct lustre_msg_v2 *msg = rs->rs_repbuf;
455 if (req->rq_replen != msg->lm_buflens[0])
456 len = lustre_shrink_msg(msg, 0, req->rq_replen, 1);
458 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
460 msg->lm_secflvr = req->rq_sec_flavor;
461 rs->rs_repdata_len = len;
466 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
467 struct ptlrpc_bulk_desc *desc)
469 struct ptlrpc_reply_state *rs = req->rq_reply_state;
474 voff = req->rq_reqbuf->lm_bufcount - 1;
475 roff = rs->rs_repbuf->lm_bufcount - 1;
477 return bulk_csum_svc(desc, req->rq_bulk_read,
478 lustre_msg_buf(req->rq_reqbuf, voff, 0),
479 lustre_msg_buflen(req->rq_reqbuf, voff),
480 lustre_msg_buf(rs->rs_repbuf, roff, 0),
481 lustre_msg_buflen(rs->rs_repbuf, roff));
485 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
486 struct ptlrpc_bulk_desc *desc)
488 struct ptlrpc_reply_state *rs = req->rq_reply_state;
493 voff = req->rq_reqbuf->lm_bufcount - 1;
494 roff = rs->rs_repbuf->lm_bufcount - 1;
496 return bulk_csum_svc(desc, req->rq_bulk_read,
497 lustre_msg_buf(req->rq_reqbuf, voff, 0),
498 lustre_msg_buflen(req->rq_reqbuf, voff),
499 lustre_msg_buf(rs->rs_repbuf, roff, 0),
500 lustre_msg_buflen(rs->rs_repbuf, roff));
503 static struct ptlrpc_ctx_ops plain_ctx_ops = {
504 .refresh = plain_ctx_refresh,
505 .sign = plain_ctx_sign,
506 .verify = plain_ctx_verify,
507 .wrap_bulk = plain_cli_wrap_bulk,
508 .unwrap_bulk = plain_cli_unwrap_bulk,
511 static struct ptlrpc_sec_cops plain_sec_cops = {
512 .create_sec = plain_create_sec,
513 .destroy_sec = plain_destroy_sec,
514 .lookup_ctx = plain_lookup_ctx,
515 .flush_ctx_cache = plain_flush_ctx_cache,
516 .alloc_reqbuf = plain_alloc_reqbuf,
517 .alloc_repbuf = plain_alloc_repbuf,
518 .free_reqbuf = plain_free_reqbuf,
519 .free_repbuf = plain_free_repbuf,
520 .enlarge_reqbuf = plain_enlarge_reqbuf,
523 static struct ptlrpc_sec_sops plain_sec_sops = {
524 .accept = plain_accept,
525 .alloc_rs = plain_alloc_rs,
526 .authorize = plain_authorize,
527 .free_rs = plain_free_rs,
528 .unwrap_bulk = plain_svc_unwrap_bulk,
529 .wrap_bulk = plain_svc_wrap_bulk,
532 static struct ptlrpc_sec_policy plain_policy = {
533 .sp_owner = THIS_MODULE,
534 .sp_name = "sec.plain",
535 .sp_policy = SPTLRPC_POLICY_PLAIN,
536 .sp_cops = &plain_sec_cops,
537 .sp_sops = &plain_sec_sops,
541 void plain_init_internal(void)
543 static HLIST_HEAD(__list);
545 plain_sec.ps_policy = &plain_policy;
546 atomic_set(&plain_sec.ps_refcount, 1); /* always busy */
547 plain_sec.ps_import = NULL;
548 plain_sec.ps_flavor = SPTLRPC_FLVR_PLAIN;
549 plain_sec.ps_flags = 0;
550 spin_lock_init(&plain_sec.ps_lock);
551 atomic_set(&plain_sec.ps_busy, 1); /* for "plain_cli_ctx" */
552 CFS_INIT_LIST_HEAD(&plain_sec.ps_gc_list);
553 plain_sec.ps_gc_interval = 0;
554 plain_sec.ps_gc_next = 0;
556 hlist_add_head(&plain_cli_ctx.cc_cache, &__list);
557 atomic_set(&plain_cli_ctx.cc_refcount, 1); /* for hash */
558 plain_cli_ctx.cc_sec = &plain_sec;
559 plain_cli_ctx.cc_ops = &plain_ctx_ops;
560 plain_cli_ctx.cc_expire = 0;
561 plain_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
563 plain_cli_ctx.cc_vcred.vc_uid = 0;
564 spin_lock_init(&plain_cli_ctx.cc_lock);
565 CFS_INIT_LIST_HEAD(&plain_cli_ctx.cc_req_list);
566 CFS_INIT_LIST_HEAD(&plain_cli_ctx.cc_gc_chain);
569 int sptlrpc_plain_init(void)
573 plain_init_internal();
575 rc = sptlrpc_register_policy(&plain_policy);
577 CERROR("failed to register sec.plain: %d\n", rc);
582 void sptlrpc_plain_fini(void)
586 rc = sptlrpc_unregister_policy(&plain_policy);
588 CERROR("cannot unregister sec.plain: %d\n", rc);