4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef _LUSTRE_SEC_H_
34 #define _LUSTRE_SEC_H_
36 /** \defgroup sptlrpc sptlrpc
46 struct ptlrpc_request;
47 struct ptlrpc_reply_state;
48 struct ptlrpc_bulk_desc;
59 struct ptlrpc_sec_policy;
60 struct ptlrpc_sec_cops;
61 struct ptlrpc_sec_sops;
63 struct ptlrpc_svc_ctx;
64 struct ptlrpc_cli_ctx;
65 struct ptlrpc_ctx_ops;
69 * \addtogroup flavor flavor
71 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
72 * are unused, must be set to 0 for future expansion.
74 * ------------------------------------------------------------------------
75 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
76 * ------------------------------------------------------------------------
86 SPTLRPC_POLICY_NULL = 0,
87 SPTLRPC_POLICY_PLAIN = 1,
88 SPTLRPC_POLICY_GSS = 2,
92 enum sptlrpc_mech_null {
93 SPTLRPC_MECH_NULL = 0,
94 SPTLRPC_MECH_NULL_MAX,
97 enum sptlrpc_mech_plain {
98 SPTLRPC_MECH_PLAIN = 0,
99 SPTLRPC_MECH_PLAIN_MAX,
102 enum sptlrpc_mech_gss {
103 SPTLRPC_MECH_GSS_NULL = 0,
104 SPTLRPC_MECH_GSS_KRB5 = 1,
105 SPTLRPC_MECH_GSS_SK = 2,
106 SPTLRPC_MECH_GSS_MAX,
109 enum sptlrpc_service_type {
110 SPTLRPC_SVC_NULL = 0, /**< no security */
111 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
112 SPTLRPC_SVC_INTG = 2, /**< integrity */
113 SPTLRPC_SVC_PRIV = 3, /**< privacy */
117 enum sptlrpc_bulk_type {
118 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
119 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
123 enum sptlrpc_bulk_service {
124 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
125 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
126 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
127 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
128 SPTLRPC_BULK_SVC_MAX,
132 * compose/extract macros
134 #define FLVR_POLICY_OFFSET (0)
135 #define FLVR_MECH_OFFSET (4)
136 #define FLVR_SVC_OFFSET (8)
137 #define FLVR_BULK_TYPE_OFFSET (12)
138 #define FLVR_BULK_SVC_OFFSET (16)
140 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
141 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
142 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
143 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
144 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
145 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
150 #define SPTLRPC_FLVR_POLICY(flavor) \
151 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
152 #define SPTLRPC_FLVR_MECH(flavor) \
153 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
154 #define SPTLRPC_FLVR_SVC(flavor) \
155 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
156 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
157 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
158 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
159 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
161 #define SPTLRPC_FLVR_BASE(flavor) \
162 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
163 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
164 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
169 #define MAKE_BASE_SUBFLVR(mech, svc) \
171 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
173 #define SPTLRPC_SUBFLVR_GSSNULL \
174 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_NULL, SPTLRPC_SVC_NULL)
175 #define SPTLRPC_SUBFLVR_KRB5N \
176 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
177 #define SPTLRPC_SUBFLVR_KRB5A \
178 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
179 #define SPTLRPC_SUBFLVR_KRB5I \
180 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
181 #define SPTLRPC_SUBFLVR_KRB5P \
182 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
183 #define SPTLRPC_SUBFLVR_SKN \
184 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_NULL)
185 #define SPTLRPC_SUBFLVR_SKA \
186 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_AUTH)
187 #define SPTLRPC_SUBFLVR_SKI \
188 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_INTG)
189 #define SPTLRPC_SUBFLVR_SKPI \
190 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_PRIV)
195 #define SPTLRPC_FLVR_NULL \
196 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
199 SPTLRPC_BULK_DEFAULT, \
200 SPTLRPC_BULK_SVC_NULL)
201 #define SPTLRPC_FLVR_PLAIN \
202 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
203 SPTLRPC_MECH_PLAIN, \
206 SPTLRPC_BULK_SVC_INTG)
207 #define SPTLRPC_FLVR_GSSNULL \
208 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
209 SPTLRPC_MECH_GSS_NULL, \
211 SPTLRPC_BULK_DEFAULT, \
212 SPTLRPC_BULK_SVC_NULL)
213 #define SPTLRPC_FLVR_KRB5N \
214 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
215 SPTLRPC_MECH_GSS_KRB5, \
217 SPTLRPC_BULK_DEFAULT, \
218 SPTLRPC_BULK_SVC_NULL)
219 #define SPTLRPC_FLVR_KRB5A \
220 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
221 SPTLRPC_MECH_GSS_KRB5, \
223 SPTLRPC_BULK_DEFAULT, \
224 SPTLRPC_BULK_SVC_NULL)
225 #define SPTLRPC_FLVR_KRB5I \
226 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
227 SPTLRPC_MECH_GSS_KRB5, \
229 SPTLRPC_BULK_DEFAULT, \
230 SPTLRPC_BULK_SVC_INTG)
231 #define SPTLRPC_FLVR_KRB5P \
232 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
233 SPTLRPC_MECH_GSS_KRB5, \
235 SPTLRPC_BULK_DEFAULT, \
236 SPTLRPC_BULK_SVC_PRIV)
237 #define SPTLRPC_FLVR_SKN \
238 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
239 SPTLRPC_MECH_GSS_SK, \
241 SPTLRPC_BULK_DEFAULT, \
242 SPTLRPC_BULK_SVC_NULL)
243 #define SPTLRPC_FLVR_SKA \
244 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
245 SPTLRPC_MECH_GSS_SK, \
247 SPTLRPC_BULK_DEFAULT, \
248 SPTLRPC_BULK_SVC_NULL)
249 #define SPTLRPC_FLVR_SKI \
250 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
251 SPTLRPC_MECH_GSS_SK, \
253 SPTLRPC_BULK_DEFAULT, \
254 SPTLRPC_BULK_SVC_INTG)
255 #define SPTLRPC_FLVR_SKPI \
256 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
257 SPTLRPC_MECH_GSS_SK, \
259 SPTLRPC_BULK_DEFAULT, \
260 SPTLRPC_BULK_SVC_PRIV)
262 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
264 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
265 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
268 * extract the useful part from wire flavor
270 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
274 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
276 LASSERT(svc < SPTLRPC_SVC_MAX);
277 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
278 SPTLRPC_FLVR_MECH(*flvr),
280 SPTLRPC_FLVR_BULK_TYPE(*flvr),
281 SPTLRPC_FLVR_BULK_SVC(*flvr));
284 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
286 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
287 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
288 SPTLRPC_FLVR_MECH(*flvr),
289 SPTLRPC_FLVR_SVC(*flvr),
290 SPTLRPC_FLVR_BULK_TYPE(*flvr),
294 struct bulk_spec_hash {
299 * Full description of flavors being used on a ptlrpc connection, include
300 * both regular RPC and bulk transfer parts.
302 struct sptlrpc_flavor {
304 * wire flavor, should be renamed to sf_wire.
308 * general flags of PTLRPC_SEC_FL_*
312 * rpc flavor specification
315 /* nothing for now */
318 * bulk flavor specification
321 struct bulk_spec_hash hash;
326 * identify the RPC is generated from what part of Lustre. It's encoded into
327 * RPC requests and to be checked by ptlrpc service.
329 enum lustre_sec_part {
338 const char *sptlrpc_part2name(enum lustre_sec_part sp);
339 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
342 * A rule specifies a flavor to be used by a ptlrpc connection between
345 struct sptlrpc_rule {
346 __u32 sr_netid; /* LNET network ID */
347 __u8 sr_from; /* sec_part */
348 __u8 sr_to; /* sec_part */
350 struct sptlrpc_flavor sr_flvr;
354 * A set of rules in memory.
356 * Rules are generated and stored on MGS, and propagated to MDT, OST,
357 * and client when needed.
359 struct sptlrpc_rule_set {
362 struct sptlrpc_rule *srs_rules;
365 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
366 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
368 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
370 memset(set, 0, sizeof(*set));
373 void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
374 int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
375 int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
376 struct sptlrpc_rule *rule);
377 int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
378 enum lustre_sec_part from,
379 enum lustre_sec_part to,
381 struct sptlrpc_flavor *sf);
382 void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
384 int sptlrpc_process_config(struct lustre_cfg *lcfg);
385 void sptlrpc_conf_log_start(const char *logname);
386 void sptlrpc_conf_log_stop(const char *logname);
387 void sptlrpc_conf_log_update_begin(const char *logname);
388 void sptlrpc_conf_log_update_end(const char *logname);
389 void sptlrpc_conf_client_adapt(struct obd_device *obd);
390 int sptlrpc_conf_target_get_rules(struct obd_device *obd,
391 struct sptlrpc_rule_set *rset);
392 void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
393 enum lustre_sec_part from,
395 struct sptlrpc_flavor *flavor);
397 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
398 * and should be enough for other future mechanisms but not sure.
399 * Only used by pre-allocated request/reply pool.
401 #define SPTLRPC_MAX_PAYLOAD (1024)
409 struct ptlrpc_ctx_ops {
411 * To determine whether it's suitable to use the \a ctx for \a vcred.
413 int (*match) (struct ptlrpc_cli_ctx *ctx,
414 struct vfs_cred *vcred);
417 * To bring the \a ctx uptodate.
419 int (*refresh) (struct ptlrpc_cli_ctx *ctx);
422 * Validate the \a ctx.
424 int (*validate) (struct ptlrpc_cli_ctx *ctx);
427 * Force the \a ctx to die.
429 void (*die) (struct ptlrpc_cli_ctx *ctx,
431 int (*display) (struct ptlrpc_cli_ctx *ctx,
432 char *buf, int bufsize);
435 * Sign the request message using \a ctx.
437 * \pre req->rq_reqmsg point to request message.
438 * \pre req->rq_reqlen is the request message length.
439 * \post req->rq_reqbuf point to request message with signature.
440 * \post req->rq_reqdata_len is set to the final request message size.
442 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
444 int (*sign) (struct ptlrpc_cli_ctx *ctx,
445 struct ptlrpc_request *req);
448 * Verify the reply message using \a ctx.
450 * \pre req->rq_repdata point to reply message with signature.
451 * \pre req->rq_repdata_len is the total reply message length.
452 * \post req->rq_repmsg point to reply message without signature.
453 * \post req->rq_replen is the reply message length.
455 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
457 int (*verify) (struct ptlrpc_cli_ctx *ctx,
458 struct ptlrpc_request *req);
461 * Encrypt the request message using \a ctx.
463 * \pre req->rq_reqmsg point to request message in clear text.
464 * \pre req->rq_reqlen is the request message length.
465 * \post req->rq_reqbuf point to request message.
466 * \post req->rq_reqdata_len is set to the final request message size.
468 * \see gss_cli_ctx_seal().
470 int (*seal) (struct ptlrpc_cli_ctx *ctx,
471 struct ptlrpc_request *req);
474 * Decrypt the reply message using \a ctx.
476 * \pre req->rq_repdata point to encrypted reply message.
477 * \pre req->rq_repdata_len is the total cipher text length.
478 * \post req->rq_repmsg point to reply message in clear text.
479 * \post req->rq_replen is the reply message length in clear text.
481 * \see gss_cli_ctx_unseal().
483 int (*unseal) (struct ptlrpc_cli_ctx *ctx,
484 struct ptlrpc_request *req);
487 * Wrap bulk request data. This is called before wrapping RPC
490 * \pre bulk buffer is descripted by desc->bd_iov and
491 * desc->bd_iov_count. note for read it's just buffer, no data
492 * need to be sent; for write it contains data in clear text.
493 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
494 * (usually inside of RPC request message).
495 * - encryption: cipher text bulk buffer is descripted by
496 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
497 * count remains the same).
498 * - otherwise: bulk buffer is still desc->bd_iov and
499 * desc->bd_iov_count.
501 * \return 0: success.
502 * \return -ev: error code.
504 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
506 int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
507 struct ptlrpc_request *req,
508 struct ptlrpc_bulk_desc *desc);
511 * Unwrap bulk reply data. This is called after wrapping RPC
514 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
515 * desc->bd_iov_count, according to wrap_bulk().
516 * \post final bulk data in clear text is placed in buffer described
517 * by desc->bd_iov and desc->bd_iov_count.
518 * \return +ve nob of actual bulk data in clear text.
519 * \return -ve error code.
521 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
523 int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
524 struct ptlrpc_request *req,
525 struct ptlrpc_bulk_desc *desc);
528 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
529 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
530 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
531 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
532 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
533 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
535 #define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
536 #define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
537 #define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
538 #define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
539 #define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
540 #define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
542 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
543 PTLRPC_CTX_UPTODATE | \
547 struct ptlrpc_cli_ctx {
548 struct hlist_node cc_cache; /* linked into ctx cache */
549 atomic_t cc_refcount;
550 struct ptlrpc_sec *cc_sec;
551 struct ptlrpc_ctx_ops *cc_ops;
552 time64_t cc_expire; /* in seconds */
553 unsigned int cc_early_expire:1;
554 unsigned long cc_flags;
555 struct vfs_cred cc_vcred;
557 struct list_head cc_req_list; /* waiting reqs linked here */
558 struct list_head cc_gc_chain; /* linked to gc chain */
562 * client side policy operation vector.
564 struct ptlrpc_sec_cops {
566 * Given an \a imp, create and initialize a ptlrpc_sec structure.
567 * \param ctx service context:
568 * - regular import: \a ctx should be NULL;
569 * - reverse import: \a ctx is obtained from incoming request.
570 * \param flavor specify what flavor to use.
572 * When necessary, policy module is responsible for taking reference
575 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
577 struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
578 struct ptlrpc_svc_ctx *ctx,
579 struct sptlrpc_flavor *flavor);
582 * Destructor of ptlrpc_sec. When called, refcount has been dropped
583 * to 0 and all contexts has been destroyed.
585 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
587 void (*destroy_sec) (struct ptlrpc_sec *sec);
590 * Notify that this ptlrpc_sec is going to die. Optionally, policy
591 * module is supposed to set sec->ps_dying and whatever necessary
594 * \see plain_kill_sec(), gss_sec_kill().
596 void (*kill_sec) (struct ptlrpc_sec *sec);
599 * Given \a vcred, lookup and/or create its context. The policy module
600 * is supposed to maintain its own context cache.
601 * XXX currently \a create and \a remove_dead is always 1, perhaps
602 * should be removed completely.
604 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
606 struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
607 struct vfs_cred *vcred,
612 * Called then the reference of \a ctx dropped to 0. The policy module
613 * is supposed to destroy this context or whatever else according to
614 * its cache maintainance mechamism.
616 * \param sync if zero, we shouldn't wait for the context being
617 * destroyed completely.
619 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
621 void (*release_ctx) (struct ptlrpc_sec *sec,
622 struct ptlrpc_cli_ctx *ctx,
626 * Flush the context cache.
628 * \param uid context of which user, -1 means all contexts.
629 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
630 * contexts should be cleared immediately.
631 * \param force if zero, only idle contexts will be flushed.
633 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
635 int (*flush_ctx_cache)
636 (struct ptlrpc_sec *sec,
642 * Called periodically by garbage collector to remove dead contexts
645 * \see gss_sec_gc_ctx_kr().
647 void (*gc_ctx) (struct ptlrpc_sec *sec);
650 * Given an context \a ctx, install a corresponding reverse service
651 * context on client side.
652 * XXX currently it's only used by GSS module, maybe we should remove
653 * this from general API.
655 int (*install_rctx)(struct obd_import *imp,
656 struct ptlrpc_sec *sec,
657 struct ptlrpc_cli_ctx *ctx);
660 * To allocate request buffer for \a req.
662 * \pre req->rq_reqmsg == NULL.
663 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
664 * we are not supposed to free it.
665 * \post if success, req->rq_reqmsg point to a buffer with size
666 * at least \a lustre_msg_size.
668 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
670 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
671 struct ptlrpc_request *req,
672 int lustre_msg_size);
675 * To free request buffer for \a req.
677 * \pre req->rq_reqbuf != NULL.
679 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
681 void (*free_reqbuf) (struct ptlrpc_sec *sec,
682 struct ptlrpc_request *req);
685 * To allocate reply buffer for \a req.
687 * \pre req->rq_repbuf == NULL.
688 * \post if success, req->rq_repbuf point to a buffer with size
689 * req->rq_repbuf_len, the size should be large enough to receive
690 * reply which be transformed from \a lustre_msg_size of clear text.
692 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
694 int (*alloc_repbuf)(struct ptlrpc_sec *sec,
695 struct ptlrpc_request *req,
696 int lustre_msg_size);
699 * To free reply buffer for \a req.
701 * \pre req->rq_repbuf != NULL.
702 * \post req->rq_repbuf == NULL.
703 * \post req->rq_repbuf_len == 0.
705 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
707 void (*free_repbuf) (struct ptlrpc_sec *sec,
708 struct ptlrpc_request *req);
711 * To expand the request buffer of \a req, thus the \a segment in
712 * the request message pointed by req->rq_reqmsg can accommodate
713 * at least \a newsize of data.
715 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
717 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
718 * gss_enlarge_reqbuf().
720 int (*enlarge_reqbuf)
721 (struct ptlrpc_sec *sec,
722 struct ptlrpc_request *req,
723 int segment, int newsize);
727 int (*display) (struct ptlrpc_sec *sec,
728 struct seq_file *seq);
732 * server side policy operation vector.
734 struct ptlrpc_sec_sops {
736 * verify an incoming request.
738 * \pre request message is pointed by req->rq_reqbuf, size is
739 * req->rq_reqdata_len; and the message has been unpacked to
742 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
743 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
744 * req->rq_sp_from is decoded from request.
745 * \retval SECSVC_COMPLETE success, the request has been fully
746 * processed, and reply message has been prepared; req->rq_sp_from is
747 * decoded from request.
748 * \retval SECSVC_DROP failed, this request should be dropped.
750 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
752 int (*accept) (struct ptlrpc_request *req);
755 * Perform security transformation upon reply message.
757 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
759 * \post req->rs_repdata_len is the final message size.
760 * \post req->rq_reply_off is set.
762 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
764 int (*authorize) (struct ptlrpc_request *req);
767 * Invalidate server context \a ctx.
769 * \see gss_svc_invalidate_ctx().
771 void (*invalidate_ctx)
772 (struct ptlrpc_svc_ctx *ctx);
775 * Allocate a ptlrpc_reply_state.
777 * \param msgsize size of the reply message in clear text.
778 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
779 * should simply use it; otherwise we'll responsible for allocating
781 * \post req->rq_reply_state != NULL;
782 * \post req->rq_reply_state->rs_msg != NULL;
784 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
786 int (*alloc_rs) (struct ptlrpc_request *req,
790 * Free a ptlrpc_reply_state.
792 void (*free_rs) (struct ptlrpc_reply_state *rs);
795 * Release the server context \a ctx.
797 * \see gss_svc_free_ctx().
799 void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
802 * Install a reverse context based on the server context \a ctx.
804 * \see gss_svc_install_rctx_kr().
806 int (*install_rctx)(struct obd_import *imp,
807 struct ptlrpc_svc_ctx *ctx);
810 * Prepare buffer for incoming bulk write.
812 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
813 * intended to receive the write.
815 * \see gss_svc_prep_bulk().
817 int (*prep_bulk) (struct ptlrpc_request *req,
818 struct ptlrpc_bulk_desc *desc);
821 * Unwrap the bulk write data.
823 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
825 int (*unwrap_bulk) (struct ptlrpc_request *req,
826 struct ptlrpc_bulk_desc *desc);
829 * Wrap the bulk read data.
831 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
833 int (*wrap_bulk) (struct ptlrpc_request *req,
834 struct ptlrpc_bulk_desc *desc);
837 struct ptlrpc_sec_policy {
838 struct module *sp_owner;
840 __u16 sp_policy; /* policy number */
841 struct ptlrpc_sec_cops *sp_cops; /* client ops */
842 struct ptlrpc_sec_sops *sp_sops; /* server ops */
845 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
846 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
847 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
848 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
849 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
852 * The ptlrpc_sec represents the client side ptlrpc security facilities,
853 * each obd_import (both regular and reverse import) must associate with
856 * \see sptlrpc_import_sec_adapt().
859 struct ptlrpc_sec_policy *ps_policy;
860 atomic_t ps_refcount;
861 /** statistic only */
863 /** unique identifier */
865 struct sptlrpc_flavor ps_flvr;
866 enum lustre_sec_part ps_part;
867 /** after set, no more new context will be created */
868 unsigned int ps_dying:1;
870 struct obd_import *ps_import;
876 struct list_head ps_gc_list;
877 time64_t ps_gc_interval; /* in seconds */
878 time64_t ps_gc_next; /* in seconds */
881 static inline int flvr_is_rootonly(__u32 flavor)
883 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
884 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
885 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
888 static inline int flvr_allows_user_desc(__u32 flavor)
890 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
891 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
892 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
895 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
897 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
900 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
902 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
906 struct ptlrpc_svc_ctx {
907 atomic_t sc_refcount;
908 struct ptlrpc_sec_policy *sc_policy;
912 * user identity descriptor
914 #define LUSTRE_MAX_GROUPS (128)
916 struct ptlrpc_user_desc {
929 enum sptlrpc_bulk_hash_alg {
930 BULK_HASH_ALG_NULL = 0,
931 BULK_HASH_ALG_ADLER32,
935 BULK_HASH_ALG_SHA256,
936 BULK_HASH_ALG_SHA384,
937 BULK_HASH_ALG_SHA512,
941 const char * sptlrpc_get_hash_name(__u8 hash_alg);
942 __u8 sptlrpc_get_hash_alg(const char *algname);
948 struct ptlrpc_bulk_sec_desc {
949 __u8 bsd_version; /* 0 */
950 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
951 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
952 __u8 bsd_flags; /* flags */
953 __u32 bsd_nob; /* nob of bulk data */
954 __u8 bsd_data[0]; /* policy-specific token */
961 struct proc_dir_entry;
962 extern struct proc_dir_entry *sptlrpc_proc_root;
965 * round size up to next power of 2, for slab allocation.
966 * @size must be sane (can't overflow after round up)
968 static inline int size_roundup_power2(int size)
981 * internal support libraries
983 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
984 int segment, int newsize);
989 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
990 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
992 __u32 sptlrpc_name2flavor_base(const char *name);
993 const char *sptlrpc_flavor2name_base(__u32 flvr);
994 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
995 char *buf, int bufsize);
996 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
997 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
999 static inline struct ptlrpc_sec_policy *
1000 sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
1002 __module_get(policy->sp_owner);
1007 sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
1009 module_put(policy->sp_owner);
1016 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
1018 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
1022 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
1024 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
1028 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
1030 return (cli_ctx_status(ctx) != 0);
1034 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
1036 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
1040 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
1042 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
1046 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
1048 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
1052 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
1054 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
1060 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
1061 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
1064 * internal apis which only used by policy impelentation
1066 int sptlrpc_get_next_secid(void);
1067 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
1070 * exported client context api
1072 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
1073 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
1074 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
1075 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
1076 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
1079 * exported client context wrap/buffers
1081 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1082 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1083 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1084 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1085 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1086 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1087 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1088 const struct req_msg_field *field,
1090 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1091 struct ptlrpc_request **req_ret);
1092 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1094 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1097 * exported higher interface of import & request
1099 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1100 struct ptlrpc_svc_ctx *ctx,
1101 struct sptlrpc_flavor *flvr);
1102 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1103 void sptlrpc_import_sec_put(struct obd_import *imp);
1105 int sptlrpc_import_check_ctx(struct obd_import *imp);
1106 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1107 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1108 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1109 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1110 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1111 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1112 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
1113 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1115 int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
1118 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1119 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1120 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
1123 const char * sec2target_str(struct ptlrpc_sec *sec);
1124 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
1129 enum secsvc_accept_res {
1135 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1136 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1137 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1138 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1139 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1140 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1141 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1143 int sptlrpc_target_export_check(struct obd_export *exp,
1144 struct ptlrpc_request *req);
1145 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1146 struct sptlrpc_rule_set *rset);
1151 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1152 struct ptlrpc_svc_ctx *ctx);
1153 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1154 struct ptlrpc_cli_ctx *ctx);
1156 /* bulk security api */
1157 int sptlrpc_enc_pool_add_user(void);
1158 int sptlrpc_enc_pool_del_user(void);
1159 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc);
1160 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1161 int get_free_pages_in_pool(void);
1162 int pool_is_at_full_capacity(void);
1164 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1165 struct ptlrpc_bulk_desc *desc);
1166 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1167 struct ptlrpc_bulk_desc *desc,
1169 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1170 struct ptlrpc_bulk_desc *desc);
1171 #ifdef HAVE_SERVER_SUPPORT
1172 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
1173 struct ptlrpc_bulk_desc *desc);
1174 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1175 struct ptlrpc_bulk_desc *desc);
1176 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
1177 struct ptlrpc_bulk_desc *desc);
1180 /* bulk helpers (internal use only by policies) */
1181 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1182 void *buf, int buflen);
1184 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1186 /* user descriptor helpers */
1187 static inline int sptlrpc_user_desc_size(int ngroups)
1189 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1192 int sptlrpc_current_user_desc_size(void);
1193 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1194 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1198 #endif /* _LUSTRE_SEC_H_ */