4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef _LUSTRE_SEC_H_
33 #define _LUSTRE_SEC_H_
35 /** \defgroup sptlrpc sptlrpc
45 struct ptlrpc_request;
46 struct ptlrpc_reply_state;
47 struct ptlrpc_bulk_desc;
58 struct ptlrpc_sec_policy;
59 struct ptlrpc_sec_cops;
60 struct ptlrpc_sec_sops;
62 struct ptlrpc_svc_ctx;
63 struct ptlrpc_cli_ctx;
64 struct ptlrpc_ctx_ops;
68 * \addtogroup flavor flavor
70 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
71 * are unused, must be set to 0 for future expansion.
73 * ------------------------------------------------------------------------
74 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
75 * ------------------------------------------------------------------------
85 SPTLRPC_POLICY_NULL = 0,
86 SPTLRPC_POLICY_PLAIN = 1,
87 SPTLRPC_POLICY_GSS = 2,
91 enum sptlrpc_mech_null {
92 SPTLRPC_MECH_NULL = 0,
93 SPTLRPC_MECH_NULL_MAX,
96 enum sptlrpc_mech_plain {
97 SPTLRPC_MECH_PLAIN = 0,
98 SPTLRPC_MECH_PLAIN_MAX,
101 enum sptlrpc_mech_gss {
102 SPTLRPC_MECH_GSS_NULL = 0,
103 SPTLRPC_MECH_GSS_KRB5 = 1,
104 SPTLRPC_MECH_GSS_SK = 2,
105 SPTLRPC_MECH_GSS_MAX,
108 enum sptlrpc_service_type {
109 SPTLRPC_SVC_NULL = 0, /**< no security */
110 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
111 SPTLRPC_SVC_INTG = 2, /**< integrity */
112 SPTLRPC_SVC_PRIV = 3, /**< privacy */
116 enum sptlrpc_bulk_type {
117 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
118 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
122 enum sptlrpc_bulk_service {
123 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
124 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
125 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
126 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
127 SPTLRPC_BULK_SVC_MAX,
131 * compose/extract macros
133 #define FLVR_POLICY_OFFSET (0)
134 #define FLVR_MECH_OFFSET (4)
135 #define FLVR_SVC_OFFSET (8)
136 #define FLVR_BULK_TYPE_OFFSET (12)
137 #define FLVR_BULK_SVC_OFFSET (16)
139 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
140 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
141 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
142 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
143 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
144 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
149 #define SPTLRPC_FLVR_POLICY(flavor) \
150 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
151 #define SPTLRPC_FLVR_MECH(flavor) \
152 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
153 #define SPTLRPC_FLVR_SVC(flavor) \
154 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
155 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
156 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
157 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
158 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
160 #define SPTLRPC_FLVR_BASE(flavor) \
161 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
162 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
163 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
168 #define MAKE_BASE_SUBFLVR(mech, svc) \
170 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
172 #define SPTLRPC_SUBFLVR_GSSNULL \
173 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_NULL, SPTLRPC_SVC_NULL)
174 #define SPTLRPC_SUBFLVR_KRB5N \
175 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
176 #define SPTLRPC_SUBFLVR_KRB5A \
177 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
178 #define SPTLRPC_SUBFLVR_KRB5I \
179 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
180 #define SPTLRPC_SUBFLVR_KRB5P \
181 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
182 #define SPTLRPC_SUBFLVR_SKN \
183 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_NULL)
184 #define SPTLRPC_SUBFLVR_SKA \
185 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_AUTH)
186 #define SPTLRPC_SUBFLVR_SKI \
187 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_INTG)
188 #define SPTLRPC_SUBFLVR_SKPI \
189 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_PRIV)
194 #define SPTLRPC_FLVR_NULL \
195 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
198 SPTLRPC_BULK_DEFAULT, \
199 SPTLRPC_BULK_SVC_NULL)
200 #define SPTLRPC_FLVR_PLAIN \
201 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
202 SPTLRPC_MECH_PLAIN, \
205 SPTLRPC_BULK_SVC_INTG)
206 #define SPTLRPC_FLVR_GSSNULL \
207 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
208 SPTLRPC_MECH_GSS_NULL, \
210 SPTLRPC_BULK_DEFAULT, \
211 SPTLRPC_BULK_SVC_NULL)
212 #define SPTLRPC_FLVR_KRB5N \
213 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
214 SPTLRPC_MECH_GSS_KRB5, \
216 SPTLRPC_BULK_DEFAULT, \
217 SPTLRPC_BULK_SVC_NULL)
218 #define SPTLRPC_FLVR_KRB5A \
219 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
220 SPTLRPC_MECH_GSS_KRB5, \
222 SPTLRPC_BULK_DEFAULT, \
223 SPTLRPC_BULK_SVC_NULL)
224 #define SPTLRPC_FLVR_KRB5I \
225 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
226 SPTLRPC_MECH_GSS_KRB5, \
228 SPTLRPC_BULK_DEFAULT, \
229 SPTLRPC_BULK_SVC_INTG)
230 #define SPTLRPC_FLVR_KRB5P \
231 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
232 SPTLRPC_MECH_GSS_KRB5, \
234 SPTLRPC_BULK_DEFAULT, \
235 SPTLRPC_BULK_SVC_PRIV)
236 #define SPTLRPC_FLVR_SKN \
237 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
238 SPTLRPC_MECH_GSS_SK, \
240 SPTLRPC_BULK_DEFAULT, \
241 SPTLRPC_BULK_SVC_NULL)
242 #define SPTLRPC_FLVR_SKA \
243 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
244 SPTLRPC_MECH_GSS_SK, \
246 SPTLRPC_BULK_DEFAULT, \
247 SPTLRPC_BULK_SVC_NULL)
248 #define SPTLRPC_FLVR_SKI \
249 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
250 SPTLRPC_MECH_GSS_SK, \
252 SPTLRPC_BULK_DEFAULT, \
253 SPTLRPC_BULK_SVC_INTG)
254 #define SPTLRPC_FLVR_SKPI \
255 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
256 SPTLRPC_MECH_GSS_SK, \
258 SPTLRPC_BULK_DEFAULT, \
259 SPTLRPC_BULK_SVC_PRIV)
261 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
263 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
264 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
267 * extract the useful part from wire flavor
269 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
273 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
275 LASSERT(svc < SPTLRPC_SVC_MAX);
276 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
277 SPTLRPC_FLVR_MECH(*flvr), svc,
278 SPTLRPC_FLVR_BULK_TYPE(*flvr),
279 SPTLRPC_FLVR_BULK_SVC(*flvr));
282 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
284 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
285 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
286 SPTLRPC_FLVR_MECH(*flvr),
287 SPTLRPC_FLVR_SVC(*flvr),
288 SPTLRPC_FLVR_BULK_TYPE(*flvr), svc);
291 struct bulk_spec_hash {
296 * Full description of flavors being used on a ptlrpc connection, include
297 * both regular RPC and bulk transfer parts.
299 struct sptlrpc_flavor {
301 * wire flavor, should be renamed to sf_wire.
305 * general flags of PTLRPC_SEC_FL_*
309 * rpc flavor specification
312 /* nothing for now */
315 * bulk flavor specification
318 struct bulk_spec_hash hash;
323 * identify the RPC is generated from what part of Lustre. It's encoded into
324 * RPC requests and to be checked by ptlrpc service.
326 enum lustre_sec_part {
335 const char *sptlrpc_part2name(enum lustre_sec_part sp);
336 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
339 * A rule specifies a flavor to be used by a ptlrpc connection between
342 struct sptlrpc_rule {
343 __u32 sr_netid; /* LNET network ID */
344 __u8 sr_from; /* sec_part */
345 __u8 sr_to; /* sec_part */
347 struct sptlrpc_flavor sr_flvr;
351 * A set of rules in memory.
353 * Rules are generated and stored on MGS, and propagated to MDT, OST,
354 * and client when needed.
356 struct sptlrpc_rule_set {
359 struct sptlrpc_rule *srs_rules;
362 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
363 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
365 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
367 memset(set, 0, sizeof(*set));
370 void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
371 int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
372 int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
373 struct sptlrpc_rule *rule);
374 int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
375 enum lustre_sec_part from,
376 enum lustre_sec_part to,
377 struct lnet_nid *nid,
378 struct sptlrpc_flavor *sf);
379 void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
381 int sptlrpc_process_config(struct lustre_cfg *lcfg);
382 void sptlrpc_conf_log_start(const char *logname);
383 void sptlrpc_conf_log_stop(const char *logname);
384 void sptlrpc_conf_log_update_begin(const char *logname);
385 void sptlrpc_conf_log_update_end(const char *logname);
386 void sptlrpc_conf_client_adapt(struct obd_device *obd);
387 int sptlrpc_conf_target_get_rules(struct obd_device *obd,
388 struct sptlrpc_rule_set *rset);
389 void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
390 enum lustre_sec_part from,
391 struct lnet_nid *nid,
392 struct sptlrpc_flavor *flavor);
394 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
395 * and should be enough for other future mechanisms but not sure.
396 * Only used by pre-allocated request/reply pool.
398 #define SPTLRPC_MAX_PAYLOAD (1024)
406 struct ptlrpc_ctx_ops {
408 * To determine whether it's suitable to use the \a ctx for \a vcred.
410 int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
413 * To bring the \a ctx uptodate.
415 int (*refresh)(struct ptlrpc_cli_ctx *ctx);
418 * Validate the \a ctx.
420 int (*validate)(struct ptlrpc_cli_ctx *ctx);
423 * Force the \a ctx to die.
425 void (*die)(struct ptlrpc_cli_ctx *ctx, int grace);
426 int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
429 * Sign the request message using \a ctx.
431 * \pre req->rq_reqmsg point to request message.
432 * \pre req->rq_reqlen is the request message length.
433 * \post req->rq_reqbuf point to request message with signature.
434 * \post req->rq_reqdata_len is set to the final request message size.
436 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
438 int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
441 * Verify the reply message using \a ctx.
443 * \pre req->rq_repdata point to reply message with signature.
444 * \pre req->rq_repdata_len is the total reply message length.
445 * \post req->rq_repmsg point to reply message without signature.
446 * \post req->rq_replen is the reply message length.
448 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
450 int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
453 * Encrypt the request message using \a ctx.
455 * \pre req->rq_reqmsg point to request message in clear text.
456 * \pre req->rq_reqlen is the request message length.
457 * \post req->rq_reqbuf point to request message.
458 * \post req->rq_reqdata_len is set to the final request message size.
460 * \see gss_cli_ctx_seal().
462 int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
465 * Decrypt the reply message using \a ctx.
467 * \pre req->rq_repdata point to encrypted reply message.
468 * \pre req->rq_repdata_len is the total cipher text length.
469 * \post req->rq_repmsg point to reply message in clear text.
470 * \post req->rq_replen is the reply message length in clear text.
472 * \see gss_cli_ctx_unseal().
474 int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
477 * Wrap bulk request data. This is called before wrapping RPC
480 * \pre bulk buffer is descripted by desc->bd_iov and
481 * desc->bd_iov_count. note for read it's just buffer, no data
482 * need to be sent; for write it contains data in clear text.
483 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
484 * (usually inside of RPC request message).
485 * - encryption: cipher text bulk buffer is descripted by
486 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
487 * count remains the same).
488 * - otherwise: bulk buffer is still desc->bd_iov and
489 * desc->bd_iov_count.
491 * \return 0: success.
492 * \return -ev: error code.
494 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
496 int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx,
497 struct ptlrpc_request *req,
498 struct ptlrpc_bulk_desc *desc);
501 * Unwrap bulk reply data. This is called after wrapping RPC
504 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
505 * desc->bd_iov_count, according to wrap_bulk().
506 * \post final bulk data in clear text is placed in buffer described
507 * by desc->bd_iov and desc->bd_iov_count.
508 * \return +ve nob of actual bulk data in clear text.
509 * \return -ve error code.
511 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
513 int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx,
514 struct ptlrpc_request *req,
515 struct ptlrpc_bulk_desc *desc);
518 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
519 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
520 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
521 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
522 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
523 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
525 #define PTLRPC_CTX_NEW BIT(PTLRPC_CTX_NEW_BIT)
526 #define PTLRPC_CTX_UPTODATE BIT(PTLRPC_CTX_UPTODATE_BIT)
527 #define PTLRPC_CTX_DEAD BIT(PTLRPC_CTX_DEAD_BIT)
528 #define PTLRPC_CTX_ERROR BIT(PTLRPC_CTX_ERROR_BIT)
529 #define PTLRPC_CTX_CACHED BIT(PTLRPC_CTX_CACHED_BIT)
530 #define PTLRPC_CTX_ETERNAL BIT(PTLRPC_CTX_ETERNAL_BIT)
532 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_UPTODATE | \
536 struct ptlrpc_cli_ctx {
537 struct hlist_node cc_cache; /* linked into ctx cache */
538 atomic_t cc_refcount;
539 struct ptlrpc_sec *cc_sec;
540 struct ptlrpc_ctx_ops *cc_ops;
541 time64_t cc_expire; /* in seconds */
542 unsigned int cc_early_expire:1;
543 unsigned long cc_flags;
544 struct vfs_cred cc_vcred;
546 struct list_head cc_req_list; /* waiting reqs linked here */
547 struct list_head cc_gc_chain; /* linked to gc chain */
551 * client side policy operation vector.
553 struct ptlrpc_sec_cops {
555 * Given an \a imp, create and initialize a ptlrpc_sec structure.
556 * \param ctx service context:
557 * - regular import: \a ctx should be NULL;
558 * - reverse import: \a ctx is obtained from incoming request.
559 * \param flavor specify what flavor to use.
561 * When necessary, policy module is responsible for taking reference
564 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
566 struct ptlrpc_sec *(*create_sec)(struct obd_import *imp,
567 struct ptlrpc_svc_ctx *ctx,
568 struct sptlrpc_flavor *flavor);
571 * Destructor of ptlrpc_sec. When called, refcount has been dropped
572 * to 0 and all contexts has been destroyed.
574 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
576 void (*destroy_sec)(struct ptlrpc_sec *sec);
579 * Notify that this ptlrpc_sec is going to die. Optionally, policy
580 * module is supposed to set sec->ps_dying and whatever necessary
583 * \see plain_kill_sec(), gss_sec_kill().
585 void (*kill_sec)(struct ptlrpc_sec *sec);
588 * Given \a vcred, lookup and/or create its context. The policy module
589 * is supposed to maintain its own context cache.
590 * XXX currently \a create and \a remove_dead is always 1, perhaps
591 * should be removed completely.
593 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
595 struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec,
596 struct vfs_cred *vcred,
597 int create, int remove_dead);
600 * Called then the reference of \a ctx dropped to 0. The policy module
601 * is supposed to destroy this context or whatever else according to
602 * its cache maintainance mechamism.
604 * \param sync if zero, we shouldn't wait for the context being
605 * destroyed completely.
607 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
609 void (*release_ctx)(struct ptlrpc_sec *sec,
610 struct ptlrpc_cli_ctx *ctx, int sync);
613 * Flush the context cache.
615 * \param uid context of which user, -1 means all contexts.
616 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
617 * contexts should be cleared immediately.
618 * \param force if zero, only idle contexts will be flushed.
620 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
622 int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, int grace,
626 * Called periodically by garbage collector to remove dead contexts
629 * \see gss_sec_gc_ctx_kr().
631 void (*gc_ctx)(struct ptlrpc_sec *sec);
634 * Given an context \a ctx, install a corresponding reverse service
635 * context on client side.
636 * XXX currently it's only used by GSS module, maybe we should remove
637 * this from general API.
639 int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec,
640 struct ptlrpc_cli_ctx *ctx);
643 * To allocate request buffer for \a req.
645 * \pre req->rq_reqmsg == NULL.
646 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
647 * we are not supposed to free it.
648 * \post if success, req->rq_reqmsg point to a buffer with size
649 * at least \a lustre_msg_size.
651 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
653 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
654 struct ptlrpc_request *req, int lustre_msg_size);
657 * To free request buffer for \a req.
659 * \pre req->rq_reqbuf != NULL.
661 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
663 void (*free_reqbuf)(struct ptlrpc_sec *sec,
664 struct ptlrpc_request *req);
667 * To allocate reply buffer for \a req.
669 * \pre req->rq_repbuf == NULL.
670 * \post if success, req->rq_repbuf point to a buffer with size
671 * req->rq_repbuf_len, the size should be large enough to receive
672 * reply which be transformed from \a lustre_msg_size of clear text.
674 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
676 int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
677 int lustre_msg_size);
680 * To free reply buffer for \a req.
682 * \pre req->rq_repbuf != NULL.
683 * \post req->rq_repbuf == NULL.
684 * \post req->rq_repbuf_len == 0.
686 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
688 void (*free_repbuf)(struct ptlrpc_sec *sec,
689 struct ptlrpc_request *req);
692 * To expand the request buffer of \a req, thus the \a segment in
693 * the request message pointed by req->rq_reqmsg can accommodate
694 * at least \a newsize of data.
696 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
698 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
699 * gss_enlarge_reqbuf().
701 int (*enlarge_reqbuf)(struct ptlrpc_sec *sec,
702 struct ptlrpc_request *req, int segment,
707 int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq);
711 * server side policy operation vector.
713 struct ptlrpc_sec_sops {
715 * verify an incoming request.
717 * \pre request message is pointed by req->rq_reqbuf, size is
718 * req->rq_reqdata_len; and the message has been unpacked to
721 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
722 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
723 * req->rq_sp_from is decoded from request.
724 * \retval SECSVC_COMPLETE success, the request has been fully
725 * processed, and reply message has been prepared; req->rq_sp_from is
726 * decoded from request.
727 * \retval SECSVC_DROP failed, this request should be dropped.
729 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
731 int (*accept)(struct ptlrpc_request *req);
734 * Perform security transformation upon reply message.
736 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
738 * \post req->rs_repdata_len is the final message size.
739 * \post req->rq_reply_off is set.
741 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
743 int (*authorize)(struct ptlrpc_request *req);
746 * Invalidate server context \a ctx.
748 * \see gss_svc_invalidate_ctx().
750 void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx);
753 * Allocate a ptlrpc_reply_state.
755 * \param msgsize size of the reply message in clear text.
756 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
757 * should simply use it; otherwise we'll responsible for allocating
759 * \post req->rq_reply_state != NULL;
760 * \post req->rq_reply_state->rs_msg != NULL;
762 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
764 int (*alloc_rs)(struct ptlrpc_request *req, int msgsize);
767 * Free a ptlrpc_reply_state.
769 void (*free_rs)(struct ptlrpc_reply_state *rs);
772 * Release the server context \a ctx.
774 * \see gss_svc_free_ctx().
776 void (*free_ctx)(struct ptlrpc_svc_ctx *ctx);
779 * Install a reverse context based on the server context \a ctx.
781 * \see gss_svc_install_rctx_kr().
783 int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx);
786 * Prepare buffer for incoming bulk write.
788 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
789 * intended to receive the write.
791 * \see gss_svc_prep_bulk().
793 int (*prep_bulk)(struct ptlrpc_request *req,
794 struct ptlrpc_bulk_desc *desc);
797 * Unwrap the bulk write data.
799 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
801 int (*unwrap_bulk)(struct ptlrpc_request *req,
802 struct ptlrpc_bulk_desc *desc);
805 * Wrap the bulk read data.
807 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
809 int (*wrap_bulk)(struct ptlrpc_request *req,
810 struct ptlrpc_bulk_desc *desc);
813 struct ptlrpc_sec_policy {
814 struct module *sp_owner;
816 __u16 sp_policy; /* policy number */
817 struct ptlrpc_sec_cops *sp_cops; /* client ops */
818 struct ptlrpc_sec_sops *sp_sops; /* server ops */
821 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
822 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
823 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
824 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
825 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
827 struct sptlrpc_sepol {
828 struct rcu_head ssp_rcu;
830 /** mtime of SELinux policy file */
833 * SELinux policy info
834 * sepol string format is:
835 * <mode>:<policy name>:<policy version>:<policy hash>
837 __u32 ssp_sepol_size;
842 * The ptlrpc_sec represents the client side ptlrpc security facilities,
843 * each obd_import (both regular and reverse import) must associate with
846 * \see sptlrpc_import_sec_adapt().
849 struct ptlrpc_sec_policy *ps_policy;
850 atomic_t ps_refcount;
851 /** statistic only */
853 /** unique identifier */
855 struct sptlrpc_flavor ps_flvr;
856 enum lustre_sec_part ps_part;
857 /** after set, no more new context will be created */
858 unsigned int ps_dying:1;
860 struct obd_import *ps_import;
862 /** next check time of SELinux policy file */
863 ktime_t ps_sepol_checknext;
864 /** SELinux policy file information */
865 struct sptlrpc_sepol *ps_sepol;
870 struct list_head ps_gc_list;
871 time64_t ps_gc_interval; /* in seconds */
872 time64_t ps_gc_next; /* in seconds */
875 static inline int flvr_is_rootonly(__u32 flavor)
877 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
878 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
879 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
882 static inline int flvr_allows_user_desc(__u32 flavor)
884 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
885 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
886 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
889 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
891 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
894 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
896 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
900 struct ptlrpc_svc_ctx {
901 atomic_t sc_refcount;
902 struct ptlrpc_sec_policy *sc_policy;
906 * user identity descriptor
908 #define LUSTRE_MAX_GROUPS (128)
910 struct ptlrpc_user_desc {
923 enum sptlrpc_bulk_hash_alg {
924 BULK_HASH_ALG_NULL = 0,
925 BULK_HASH_ALG_ADLER32,
929 BULK_HASH_ALG_SHA256,
930 BULK_HASH_ALG_SHA384,
931 BULK_HASH_ALG_SHA512,
935 const char *sptlrpc_get_hash_name(__u8 hash_alg);
936 __u8 sptlrpc_get_hash_alg(const char *algname);
942 struct ptlrpc_bulk_sec_desc {
943 __u8 bsd_version; /* 0 */
944 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
945 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
946 __u8 bsd_flags; /* flags */
947 __u32 bsd_nob; /* nob of bulk data */
948 __u8 bsd_data[0]; /* policy-specific token */
951 extern struct dentry *sptlrpc_debugfs_dir;
952 extern struct proc_dir_entry *sptlrpc_lprocfs_dir;
955 * round size up to next power of 2, for slab allocation.
956 * @size must be sane (can't overflow after round up)
958 static inline int size_roundup_power2(int size)
971 * internal support libraries
973 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, int segment,
979 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
980 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
982 __u32 sptlrpc_name2flavor_base(const char *name);
983 const char *sptlrpc_flavor2name_base(__u32 flvr);
984 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf, char *buf,
986 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
987 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
989 static inline struct ptlrpc_sec_policy *
990 sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
992 __module_get(policy->sp_owner);
997 sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
999 module_put(policy->sp_owner);
1006 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
1008 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
1012 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
1014 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
1018 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
1020 return (cli_ctx_status(ctx) != 0);
1024 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
1026 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
1030 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
1032 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
1036 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
1038 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
1042 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
1044 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
1050 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
1051 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
1054 * internal apis which only used by policy impelentation
1056 int sptlrpc_get_next_secid(void);
1057 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
1060 * exported client context api
1062 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
1063 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
1064 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
1065 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
1066 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
1069 * exported client context wrap/buffers
1071 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1072 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1073 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1074 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1075 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1076 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1077 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1078 const struct req_msg_field *field,
1080 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1081 struct ptlrpc_request **req_ret);
1082 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1083 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1085 static inline size_t sptlrpc_sepol_size(struct sptlrpc_sepol *sepol)
1087 return sepol ? sepol->ssp_sepol_size : 0;
1090 void sptlrpc_sepol_put(struct sptlrpc_sepol *pol);
1091 struct sptlrpc_sepol *sptlrpc_sepol_get_cached(struct ptlrpc_sec *imp_sec);
1092 struct sptlrpc_sepol *sptlrpc_sepol_get(struct ptlrpc_request *req);
1095 * exported higher interface of import & request
1097 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1098 struct ptlrpc_svc_ctx *ctx,
1099 struct sptlrpc_flavor *flvr);
1100 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1101 void sptlrpc_import_sec_put(struct obd_import *imp);
1102 int lprocfs_srpc_serverctx_seq_show(struct seq_file *m, void *data);
1104 int sptlrpc_import_check_ctx(struct obd_import *imp);
1105 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1106 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1107 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1108 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1109 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1110 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1111 int sptlrpc_export_update_ctx(struct obd_export *exp);
1112 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
1113 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1115 int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
1118 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1119 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1120 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
1123 const char *sec2target_str(struct ptlrpc_sec *sec);
1124 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *obd);
1129 enum secsvc_accept_res {
1135 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1136 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1137 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1138 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1139 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1140 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1141 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1143 int sptlrpc_target_export_check(struct obd_export *exp,
1144 struct ptlrpc_request *req);
1145 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1146 struct sptlrpc_rule_set *rset);
1149 * context and reverse context
1151 #define GSS_SEQ_WIN (2048)
1152 #define GSS_SEQ_WIN_MAIN GSS_SEQ_WIN
1153 #define GSS_SEQ_WIN_BACK (128)
1154 #define GSS_SEQ_REPACK_THRESHOLD (GSS_SEQ_WIN_MAIN / 2 + \
1155 GSS_SEQ_WIN_MAIN / 4)
1157 struct gss_svc_seq_data {
1158 spinlock_t ssd_lock;
1160 * highest sequence number seen so far, for main and back window
1165 * main and back window
1166 * for i such that ssd_max - GSS_SEQ_WIN < i <= ssd_max, the i-th bit
1167 * of ssd_win is nonzero iff sequence number i has been seen already.
1169 unsigned long ssd_win_main[GSS_SEQ_WIN_MAIN/BITS_PER_LONG];
1170 unsigned long ssd_win_back[GSS_SEQ_WIN_BACK/BITS_PER_LONG];
1173 struct gss_svc_ctx {
1174 struct gss_ctx *gsc_mechctx;
1175 struct gss_svc_seq_data gsc_seqdata;
1176 rawobj_t gsc_rvs_hdl;
1180 uid_t gsc_mapped_uid;
1181 unsigned int gsc_usr_root:1,
1188 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1189 struct ptlrpc_svc_ctx *ctx);
1190 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1191 struct ptlrpc_cli_ctx *ctx);
1193 /* bulk security api */
1194 #define PAGES_POOL 0
1195 int sptlrpc_enc_pool_add_user(void);
1196 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc);
1197 int sptlrpc_enc_pool_get_pages_array(struct page **pa, unsigned int count);
1198 int sptlrpc_enc_pool_get_buf(void **buf, unsigned int size_bits);
1199 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1200 void sptlrpc_enc_pool_put_pages_array(struct page **pa, unsigned int count);
1201 void sptlrpc_enc_pool_put_buf(void *buf, unsigned int size_bits);
1202 int sptlrpc_enc_pool_get_free_pages(unsigned int pool);
1203 int pool_is_at_full_capacity(void);
1205 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1206 struct ptlrpc_bulk_desc *desc);
1207 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1208 struct ptlrpc_bulk_desc *desc, int nob);
1209 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1210 struct ptlrpc_bulk_desc *desc);
1211 #ifdef HAVE_SERVER_SUPPORT
1212 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
1213 struct ptlrpc_bulk_desc *desc);
1214 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1215 struct ptlrpc_bulk_desc *desc);
1216 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
1217 struct ptlrpc_bulk_desc *desc);
1220 /* bulk helpers (internal use only by policies) */
1221 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1222 void *buf, int buflen);
1224 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1226 /* user descriptor helpers */
1227 static inline int sptlrpc_user_desc_size(int ngroups)
1229 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1232 int sptlrpc_current_user_desc_size(void);
1233 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1234 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1238 #endif /* _LUSTRE_SEC_H_ */