1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004-2006 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 # define EXPORT_SYMTAB
26 #define DEBUG_SUBSYSTEM S_SEC
29 #include <liblustre.h>
32 #include <obd_support.h>
33 #include <obd_class.h>
34 #include <lustre_net.h>
35 #include <lustre_sec.h>
37 static struct ptlrpc_sec_policy null_policy;
38 static struct ptlrpc_sec null_sec;
39 static struct ptlrpc_cli_ctx null_cli_ctx;
40 static struct ptlrpc_svc_ctx null_svc_ctx;
43 * null sec temporarily use the third byte of lm_secflvr to identify
44 * the source sec part.
47 void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
49 msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
53 enum lustre_sec_part null_decode_sec_part(struct lustre_msg *msg)
55 switch (msg->lm_magic) {
56 case LUSTRE_MSG_MAGIC_V2:
57 return (msg->lm_secflvr >> 16) & 0xFF;
58 case LUSTRE_MSG_MAGIC_V2_SWABBED:
59 return (msg->lm_secflvr >> 8) & 0xFF;
66 int null_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
68 /* should never reach here */
74 int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
76 if (req->rq_reqbuf->lm_magic != LUSTRE_MSG_MAGIC_V1) {
77 req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
79 if (!req->rq_import->imp_dlm_fake) {
80 struct obd_device *obd = req->rq_import->imp_obd;
81 null_encode_sec_part(req->rq_reqbuf,
82 obd->u.cli.cl_sec_part);
85 req->rq_reqdata_len = req->rq_reqlen;
90 int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
92 req->rq_repmsg = req->rq_repbuf;
93 req->rq_replen = req->rq_repdata_len;
97 static struct ptlrpc_ctx_ops null_ctx_ops = {
98 .refresh = null_ctx_refresh,
99 .sign = null_ctx_sign,
100 .verify = null_ctx_verify,
103 static struct ptlrpc_svc_ctx null_svc_ctx = {
104 .sc_refcount = ATOMIC_INIT(1),
105 .sc_policy = &null_policy,
109 struct ptlrpc_sec *null_create_sec(struct obd_import *imp,
110 struct ptlrpc_svc_ctx *svc_ctx,
111 struct sptlrpc_flavor *sf)
113 LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
115 if (sf->sf_bulk_priv != BULK_PRIV_ALG_NULL ||
116 sf->sf_bulk_csum != BULK_CSUM_ALG_NULL) {
117 CERROR("null sec don't support bulk algorithm: %u/%u\n",
118 sf->sf_bulk_priv, sf->sf_bulk_csum);
122 /* general layer has take a module reference for us, because we never
123 * really destroy the sec, simply release the reference here.
125 sptlrpc_policy_put(&null_policy);
130 void null_destroy_sec(struct ptlrpc_sec *sec)
132 LASSERT(sec == &null_sec);
136 struct ptlrpc_cli_ctx *null_lookup_ctx(struct ptlrpc_sec *sec,
137 struct vfs_cred *vcred,
138 int create, int remove_dead)
140 atomic_inc(&null_cli_ctx.cc_refcount);
141 return &null_cli_ctx;
145 int null_flush_ctx_cache(struct ptlrpc_sec *sec,
147 int grace, int force)
153 int null_alloc_reqbuf(struct ptlrpc_sec *sec,
154 struct ptlrpc_request *req,
157 if (!req->rq_reqbuf) {
158 int alloc_size = size_roundup_power2(msgsize);
160 LASSERT(!req->rq_pool);
161 OBD_ALLOC(req->rq_reqbuf, alloc_size);
165 req->rq_reqbuf_len = alloc_size;
167 LASSERT(req->rq_pool);
168 LASSERT(req->rq_reqbuf_len >= msgsize);
169 memset(req->rq_reqbuf, 0, msgsize);
172 req->rq_reqmsg = req->rq_reqbuf;
177 void null_free_reqbuf(struct ptlrpc_sec *sec,
178 struct ptlrpc_request *req)
181 LASSERTF(req->rq_reqmsg == req->rq_reqbuf,
182 "req %p: reqmsg %p is not reqbuf %p in null sec\n",
183 req, req->rq_reqmsg, req->rq_reqbuf);
184 LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen,
185 "req %p: reqlen %d should smaller than buflen %d\n",
186 req, req->rq_reqlen, req->rq_reqbuf_len);
188 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
189 req->rq_reqmsg = req->rq_reqbuf = NULL;
190 req->rq_reqbuf_len = 0;
193 req->rq_reqmsg = NULL;
197 int null_alloc_repbuf(struct ptlrpc_sec *sec,
198 struct ptlrpc_request *req,
201 msgsize = size_roundup_power2(msgsize);
203 OBD_ALLOC(req->rq_repbuf, msgsize);
207 req->rq_repbuf_len = msgsize;
212 void null_free_repbuf(struct ptlrpc_sec *sec,
213 struct ptlrpc_request *req)
215 OBD_FREE(req->rq_repbuf, req->rq_repbuf_len);
216 req->rq_repbuf = NULL;
217 req->rq_repbuf_len = 0;
219 req->rq_repmsg = NULL;
223 int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
224 struct ptlrpc_request *req,
225 int segment, int newsize)
227 struct lustre_msg *newbuf;
228 struct lustre_msg *oldbuf = req->rq_reqmsg;
229 int oldsize, newmsg_size, alloc_size;
231 LASSERT(req->rq_reqbuf);
232 LASSERT(req->rq_reqbuf == req->rq_reqmsg);
233 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
234 LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf));
236 /* compute new message size */
237 oldsize = req->rq_reqbuf->lm_buflens[segment];
238 req->rq_reqbuf->lm_buflens[segment] = newsize;
239 newmsg_size = lustre_packed_msg_size(oldbuf);
240 req->rq_reqbuf->lm_buflens[segment] = oldsize;
242 /* request from pool should always have enough buffer */
243 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size);
245 if (req->rq_reqbuf_len < newmsg_size) {
246 alloc_size = size_roundup_power2(newmsg_size);
248 OBD_ALLOC(newbuf, alloc_size);
252 memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
254 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
255 req->rq_reqbuf = req->rq_reqmsg = newbuf;
256 req->rq_reqbuf_len = alloc_size;
259 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
260 req->rq_reqlen = newmsg_size;
266 int null_accept(struct ptlrpc_request *req)
268 LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_NULL);
270 if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
271 CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
275 req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
277 req->rq_reqmsg = req->rq_reqbuf;
278 req->rq_reqlen = req->rq_reqdata_len;
280 req->rq_svc_ctx = &null_svc_ctx;
281 atomic_inc(&req->rq_svc_ctx->sc_refcount);
287 int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
289 struct ptlrpc_reply_state *rs;
290 int rs_size = sizeof(*rs) + msgsize;
292 LASSERT(msgsize % 8 == 0);
294 rs = req->rq_reply_state;
298 LASSERT(rs->rs_size >= rs_size);
300 OBD_ALLOC(rs, rs_size);
304 rs->rs_size = rs_size;
307 rs->rs_svc_ctx = req->rq_svc_ctx;
308 atomic_inc(&req->rq_svc_ctx->sc_refcount);
310 rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
311 rs->rs_repbuf_len = rs_size - sizeof(*rs);
312 rs->rs_msg = rs->rs_repbuf;
314 req->rq_reply_state = rs;
319 void null_free_rs(struct ptlrpc_reply_state *rs)
321 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
322 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
324 if (!rs->rs_prealloc)
325 OBD_FREE(rs, rs->rs_size);
329 int null_authorize(struct ptlrpc_request *req)
331 struct ptlrpc_reply_state *rs = req->rq_reply_state;
334 if (rs->rs_repbuf->lm_magic != LUSTRE_MSG_MAGIC_V1)
335 rs->rs_repbuf->lm_secflvr = SPTLRPC_FLVR_NULL;
336 rs->rs_repdata_len = req->rq_replen;
340 static struct ptlrpc_sec_cops null_sec_cops = {
341 .create_sec = null_create_sec,
342 .destroy_sec = null_destroy_sec,
343 .lookup_ctx = null_lookup_ctx,
344 .flush_ctx_cache = null_flush_ctx_cache,
345 .alloc_reqbuf = null_alloc_reqbuf,
346 .alloc_repbuf = null_alloc_repbuf,
347 .free_reqbuf = null_free_reqbuf,
348 .free_repbuf = null_free_repbuf,
349 .enlarge_reqbuf = null_enlarge_reqbuf,
352 static struct ptlrpc_sec_sops null_sec_sops = {
353 .accept = null_accept,
354 .alloc_rs = null_alloc_rs,
355 .authorize = null_authorize,
356 .free_rs = null_free_rs,
359 static struct ptlrpc_sec_policy null_policy = {
360 .sp_owner = THIS_MODULE,
361 .sp_name = "sec.null",
362 .sp_policy = SPTLRPC_POLICY_NULL,
363 .sp_cops = &null_sec_cops,
364 .sp_sops = &null_sec_sops,
368 void null_init_internal(void)
370 static HLIST_HEAD(__list);
372 null_sec.ps_policy = &null_policy;
373 atomic_set(&null_sec.ps_refcount, 1); /* always busy */
375 null_sec.ps_import = NULL;
376 null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
377 null_sec.ps_flvr.sf_bulk_priv = BULK_PRIV_ALG_NULL;
378 null_sec.ps_flvr.sf_bulk_csum = BULK_CSUM_ALG_NULL;
379 null_sec.ps_flvr.sf_flags = 0;
380 null_sec.ps_part = LUSTRE_SP_ANY;
381 null_sec.ps_dying = 0;
382 spin_lock_init(&null_sec.ps_lock);
383 atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
384 INIT_LIST_HEAD(&null_sec.ps_gc_list);
385 null_sec.ps_gc_interval = 0;
386 null_sec.ps_gc_next = 0;
388 hlist_add_head(&null_cli_ctx.cc_cache, &__list);
389 atomic_set(&null_cli_ctx.cc_refcount, 1); /* for hash */
390 null_cli_ctx.cc_sec = &null_sec;
391 null_cli_ctx.cc_ops = &null_ctx_ops;
392 null_cli_ctx.cc_expire = 0;
393 null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
395 null_cli_ctx.cc_vcred.vc_uid = 0;
396 spin_lock_init(&null_cli_ctx.cc_lock);
397 INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
398 INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
401 int sptlrpc_null_init(void)
405 null_init_internal();
407 rc = sptlrpc_register_policy(&null_policy);
409 CERROR("failed to register %s: %d\n", null_policy.sp_name, rc);
414 void sptlrpc_null_fini(void)
418 rc = sptlrpc_unregister_policy(&null_policy);
420 CERROR("failed to unregister %s: %d\n", null_policy.sp_name,rc);