4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/gss/gss_cli_upcall.c
33 * Author: Eric Mei <ericm@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_SEC
37 #include <linux/init.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/dcache.h>
42 #include <linux/mutex.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <lustre_net.h>
48 #include <lustre_import.h>
49 #include <lustre_sec.h>
50 #include <uapi/linux/lustre/lgss.h>
53 #include "gss_internal.h"
56 /**********************************************
57 * gss context init/fini helper *
58 **********************************************/
61 int ctx_init_pack_request(struct obd_import *imp,
62 struct ptlrpc_request *req,
68 struct lustre_msg *msg = req->rq_reqbuf;
70 struct gss_header *ghdr;
71 struct ptlrpc_user_desc *pud;
72 __u32 *p, size, offset = 2;
75 LASSERT(msg->lm_bufcount <= 4);
76 LASSERT(req->rq_cli_ctx);
77 LASSERT(req->rq_cli_ctx->cc_sec);
80 ghdr = lustre_msg_buf(msg, 0, sizeof(*ghdr));
81 ghdr->gh_version = PTLRPC_GSS_VERSION;
82 ghdr->gh_sp = (__u8) imp->imp_sec->ps_part;
84 ghdr->gh_proc = PTLRPC_GSS_PROC_INIT;
86 ghdr->gh_svc = SPTLRPC_SVC_NULL;
87 ghdr->gh_handle.len = 0;
89 /* fix the user desc */
90 if (req->rq_pack_udesc) {
91 ghdr->gh_flags |= LUSTRE_GSS_PACK_USER;
93 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
95 pud->pud_uid = pud->pud_fsuid = uid;
96 pud->pud_gid = pud->pud_fsgid = gid;
102 /* new clients are expected to set KCSUM flag */
103 ghdr->gh_flags |= LUSTRE_GSS_PACK_KCSUM;
105 /* security payload */
106 p = lustre_msg_buf(msg, offset, 0);
107 size = msg->lm_buflens[offset];
110 /* 1. lustre svc type */
112 *p++ = cpu_to_le32(lustre_srv);
116 obj.len = strlen(imp->imp_obd->u.cli.cl_target_uuid.uuid) + 1;
117 obj.data = imp->imp_obd->u.cli.cl_target_uuid.uuid;
118 if (rawobj_serialize(&obj, &p, &size))
121 /* 3. reverse context handle. actually only needed by root user,
122 * but we send it anyway. */
123 gsec = sec2gsec(req->rq_cli_ctx->cc_sec);
124 obj.len = sizeof(gsec->gs_rvs_hdl);
125 obj.data = (__u8 *) &gsec->gs_rvs_hdl;
126 if (rawobj_serialize(&obj, &p, &size))
129 /* 4. now the token */
130 LASSERT(size >= (sizeof(__u32) + token_size));
131 *p++ = cpu_to_le32(((__u32) token_size));
132 if (copy_from_user(p, token, token_size)) {
133 CERROR("can't copy token\n");
136 size -= sizeof(__u32) + round_up(token_size, 4);
138 req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, offset,
139 msg->lm_buflens[offset] - size, 0);
144 int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed,
145 char __user *outbuf, long outlen)
147 struct gss_rep_header *ghdr;
148 __u32 obj_len, round_len;
149 __u32 status, effective = 0;
151 if (msg->lm_bufcount != 3) {
152 CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
156 ghdr = (struct gss_rep_header *) gss_swab_header(msg, 0, swabbed);
158 CERROR("unable to extract gss reply header\n");
162 if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
163 CERROR("invalid gss version %u\n", ghdr->gh_version);
167 if (outlen < (4 + 2) * 4 + round_up(ghdr->gh_handle.len, 4) +
168 round_up(msg->lm_buflens[2], 4)) {
169 CERROR("output buffer size %ld too small\n", outlen);
176 if (copy_to_user(outbuf, &status, 4))
179 if (copy_to_user(outbuf, &ghdr->gh_major, 4))
182 if (copy_to_user(outbuf, &ghdr->gh_minor, 4))
185 if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4))
191 obj_len = ghdr->gh_handle.len;
192 round_len = (obj_len + 3) & ~3;
193 if (copy_to_user(outbuf, &obj_len, 4))
196 if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len))
199 effective += 4 + round_len;
202 obj_len = msg->lm_buflens[2];
203 round_len = (obj_len + 3) & ~3;
204 if (copy_to_user(outbuf, &obj_len, 4))
207 if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len))
210 effective += 4 + round_len;
215 int gss_do_ctx_init_rpc(char __user *buffer, unsigned long count)
217 struct obd_import *imp, *imp0;
218 struct ptlrpc_request *req;
219 struct lgssd_ioctl_param param;
220 struct obd_device *obd;
225 if (count != sizeof(param)) {
226 CERROR("ioctl size %lu, expect %lu, please check lgss_keyring version\n",
227 count, (unsigned long) sizeof(param));
230 if (copy_from_user(¶m, buffer, sizeof(param))) {
231 CERROR("failed copy data from lgssd\n");
235 if (param.version != GSSD_INTERFACE_VERSION) {
236 CERROR("gssd interface version %d (expect %d)\n",
237 param.version, GSSD_INTERFACE_VERSION);
242 if (strncpy_from_user(obdname, (const char __user *)param.uuid,
243 sizeof(obdname)) <= 0) {
244 CERROR("Invalid obdname pointer\n");
248 obd = class_name2obd(obdname);
250 CERROR("no such obd %s\n", obdname);
254 if (unlikely(!obd->obd_set_up)) {
255 CERROR("obd %s not setup\n", obdname);
259 spin_lock(&obd->obd_dev_lock);
260 if (obd->obd_stopping) {
261 CERROR("obd %s has stopped\n", obdname);
262 spin_unlock(&obd->obd_dev_lock);
266 if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
267 strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
268 strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME) &&
269 strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
270 strcmp(obd->obd_type->typ_name, LUSTRE_OSP_NAME)) {
271 CERROR("obd %s is not a client device\n", obdname);
272 spin_unlock(&obd->obd_dev_lock);
275 spin_unlock(&obd->obd_dev_lock);
277 with_imp_locked(obd, imp0, rc)
278 imp = class_import_get(imp0);
280 CERROR("obd %s: import has gone\n", obd->obd_name);
284 if (imp->imp_deactive) {
285 CERROR("import has been deactivated\n");
286 class_import_put(imp);
290 req = ptlrpc_request_alloc_pack(imp, &RQF_SEC_CTX, LUSTRE_OBD_VERSION,
293 param.status = -ENOMEM;
297 if (req->rq_cli_ctx->cc_sec->ps_id != param.secid) {
298 CWARN("original secid %d, now has changed to %d, cancel this negotiation\n",
299 param.secid, req->rq_cli_ctx->cc_sec->ps_id);
300 param.status = -EINVAL;
305 rc = ctx_init_pack_request(imp, req,
307 param.uid, param.gid,
308 param.send_token_size,
309 (char __user *)param.send_token);
315 ptlrpc_request_set_replen(req);
317 rc = ptlrpc_queue_wait(req);
319 /* If any _real_ denial be made, we expect server return
320 * -EACCES reply or return success but indicate gss error
321 * inside reply messsage. All other errors are treated as
322 * timeout, caller might try the negotiation repeatedly,
323 * leave recovery decisions to general ptlrpc layer.
325 * FIXME maybe some other error code shouldn't be treated
330 param.status = -ETIMEDOUT;
334 LASSERT(req->rq_repdata);
335 lsize = ctx_init_parse_reply(req->rq_repdata,
336 req_capsule_rep_need_swab(&req->rq_pill),
337 (char __user *)param.reply_buf,
338 param.reply_buf_size);
340 param.status = (int) lsize;
345 param.reply_length = lsize;
348 if (copy_to_user(buffer, ¶m, sizeof(param)))
353 class_import_put(imp);
354 ptlrpc_req_finished(req);
358 int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
360 struct ptlrpc_cli_ctx *ctx = &gctx->gc_base;
361 struct obd_import *imp = ctx->cc_sec->ps_import;
362 struct ptlrpc_request *req;
363 struct ptlrpc_user_desc *pud;
367 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
369 if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
370 CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
371 "don't send destroy rpc\n", ctx,
372 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
378 CDEBUG(D_SEC, "%s ctx %p idx %#llx (%u->%s)\n",
379 sec_is_reverse(ctx->cc_sec) ?
380 "server finishing reverse" : "client finishing forward",
381 ctx, gss_handle_to_u64(&gctx->gc_handle),
382 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
384 gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;
386 req = ptlrpc_request_alloc(imp, &RQF_SEC_CTX);
388 CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n",
389 ctx, ctx->cc_vcred.vc_uid);
390 GOTO(out, rc = -ENOMEM);
393 rc = ptlrpc_request_bufs_pack(req, LUSTRE_OBD_VERSION, SEC_CTX_FINI,
398 /* fix the user desc */
399 if (req->rq_pack_udesc) {
400 /* we rely the fact that this request is in AUTH mode,
401 * and user_desc at offset 2. */
402 pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud));
404 pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid;
405 pud->pud_gid = pud->pud_fsgid = ctx->cc_vcred.vc_gid;
407 pud->pud_ngroups = 0;
410 req->rq_phase = RQ_PHASE_RPC;
411 rc = ptl_send_rpc(req, 1);
413 CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx,
414 ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc);
417 ptlrpc_req_finished(req);
422 int __init gss_init_cli_upcall(void)
427 void gss_exit_cli_upcall(void)