4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #ifndef _LUSTRE_SEC_H_
33 #define _LUSTRE_SEC_H_
35 /** \defgroup sptlrpc sptlrpc
45 struct ptlrpc_request;
46 struct ptlrpc_reply_state;
47 struct ptlrpc_bulk_desc;
58 struct ptlrpc_sec_policy;
59 struct ptlrpc_sec_cops;
60 struct ptlrpc_sec_sops;
62 struct ptlrpc_svc_ctx;
63 struct ptlrpc_cli_ctx;
64 struct ptlrpc_ctx_ops;
68 * \addtogroup flavor flavor
70 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
71 * are unused, must be set to 0 for future expansion.
73 * ------------------------------------------------------------------------
74 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
75 * ------------------------------------------------------------------------
85 SPTLRPC_POLICY_NULL = 0,
86 SPTLRPC_POLICY_PLAIN = 1,
87 SPTLRPC_POLICY_GSS = 2,
91 enum sptlrpc_mech_null {
92 SPTLRPC_MECH_NULL = 0,
93 SPTLRPC_MECH_NULL_MAX,
96 enum sptlrpc_mech_plain {
97 SPTLRPC_MECH_PLAIN = 0,
98 SPTLRPC_MECH_PLAIN_MAX,
101 enum sptlrpc_mech_gss {
102 SPTLRPC_MECH_GSS_NULL = 0,
103 SPTLRPC_MECH_GSS_KRB5 = 1,
104 SPTLRPC_MECH_GSS_SK = 2,
105 SPTLRPC_MECH_GSS_MAX,
108 enum sptlrpc_service_type {
109 SPTLRPC_SVC_NULL = 0, /**< no security */
110 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
111 SPTLRPC_SVC_INTG = 2, /**< integrity */
112 SPTLRPC_SVC_PRIV = 3, /**< privacy */
116 enum sptlrpc_bulk_type {
117 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
118 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
122 enum sptlrpc_bulk_service {
123 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
124 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
125 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
126 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
127 SPTLRPC_BULK_SVC_MAX,
131 * compose/extract macros
133 #define FLVR_POLICY_OFFSET (0)
134 #define FLVR_MECH_OFFSET (4)
135 #define FLVR_SVC_OFFSET (8)
136 #define FLVR_BULK_TYPE_OFFSET (12)
137 #define FLVR_BULK_SVC_OFFSET (16)
139 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
140 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
141 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
142 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
143 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
144 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
149 #define SPTLRPC_FLVR_POLICY(flavor) \
150 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
151 #define SPTLRPC_FLVR_MECH(flavor) \
152 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
153 #define SPTLRPC_FLVR_SVC(flavor) \
154 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
155 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
156 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
157 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
158 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
160 #define SPTLRPC_FLVR_BASE(flavor) \
161 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
162 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
163 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
168 #define MAKE_BASE_SUBFLVR(mech, svc) \
170 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
172 #define SPTLRPC_SUBFLVR_GSSNULL \
173 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_NULL, SPTLRPC_SVC_NULL)
174 #define SPTLRPC_SUBFLVR_KRB5N \
175 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
176 #define SPTLRPC_SUBFLVR_KRB5A \
177 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
178 #define SPTLRPC_SUBFLVR_KRB5I \
179 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
180 #define SPTLRPC_SUBFLVR_KRB5P \
181 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
182 #define SPTLRPC_SUBFLVR_SKN \
183 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_NULL)
184 #define SPTLRPC_SUBFLVR_SKA \
185 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_AUTH)
186 #define SPTLRPC_SUBFLVR_SKI \
187 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_INTG)
188 #define SPTLRPC_SUBFLVR_SKPI \
189 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_PRIV)
194 #define SPTLRPC_FLVR_NULL \
195 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
198 SPTLRPC_BULK_DEFAULT, \
199 SPTLRPC_BULK_SVC_NULL)
200 #define SPTLRPC_FLVR_PLAIN \
201 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
202 SPTLRPC_MECH_PLAIN, \
205 SPTLRPC_BULK_SVC_INTG)
206 #define SPTLRPC_FLVR_GSSNULL \
207 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
208 SPTLRPC_MECH_GSS_NULL, \
210 SPTLRPC_BULK_DEFAULT, \
211 SPTLRPC_BULK_SVC_NULL)
212 #define SPTLRPC_FLVR_KRB5N \
213 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
214 SPTLRPC_MECH_GSS_KRB5, \
216 SPTLRPC_BULK_DEFAULT, \
217 SPTLRPC_BULK_SVC_NULL)
218 #define SPTLRPC_FLVR_KRB5A \
219 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
220 SPTLRPC_MECH_GSS_KRB5, \
222 SPTLRPC_BULK_DEFAULT, \
223 SPTLRPC_BULK_SVC_NULL)
224 #define SPTLRPC_FLVR_KRB5I \
225 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
226 SPTLRPC_MECH_GSS_KRB5, \
228 SPTLRPC_BULK_DEFAULT, \
229 SPTLRPC_BULK_SVC_INTG)
230 #define SPTLRPC_FLVR_KRB5P \
231 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
232 SPTLRPC_MECH_GSS_KRB5, \
234 SPTLRPC_BULK_DEFAULT, \
235 SPTLRPC_BULK_SVC_PRIV)
236 #define SPTLRPC_FLVR_SKN \
237 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
238 SPTLRPC_MECH_GSS_SK, \
240 SPTLRPC_BULK_DEFAULT, \
241 SPTLRPC_BULK_SVC_NULL)
242 #define SPTLRPC_FLVR_SKA \
243 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
244 SPTLRPC_MECH_GSS_SK, \
246 SPTLRPC_BULK_DEFAULT, \
247 SPTLRPC_BULK_SVC_NULL)
248 #define SPTLRPC_FLVR_SKI \
249 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
250 SPTLRPC_MECH_GSS_SK, \
252 SPTLRPC_BULK_DEFAULT, \
253 SPTLRPC_BULK_SVC_INTG)
254 #define SPTLRPC_FLVR_SKPI \
255 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
256 SPTLRPC_MECH_GSS_SK, \
258 SPTLRPC_BULK_DEFAULT, \
259 SPTLRPC_BULK_SVC_PRIV)
261 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
263 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
264 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
267 * extract the useful part from wire flavor
269 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
273 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
275 LASSERT(svc < SPTLRPC_SVC_MAX);
276 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
277 SPTLRPC_FLVR_MECH(*flvr), svc,
278 SPTLRPC_FLVR_BULK_TYPE(*flvr),
279 SPTLRPC_FLVR_BULK_SVC(*flvr));
282 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
284 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
285 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
286 SPTLRPC_FLVR_MECH(*flvr),
287 SPTLRPC_FLVR_SVC(*flvr),
288 SPTLRPC_FLVR_BULK_TYPE(*flvr), svc);
291 struct bulk_spec_hash {
296 * Full description of flavors being used on a ptlrpc connection, include
297 * both regular RPC and bulk transfer parts.
299 struct sptlrpc_flavor {
301 * wire flavor, should be renamed to sf_wire.
305 * general flags of PTLRPC_SEC_FL_*
309 * rpc flavor specification
312 /* nothing for now */
315 * bulk flavor specification
318 struct bulk_spec_hash hash;
323 * identify the RPC is generated from what part of Lustre. It's encoded into
324 * RPC requests and to be checked by ptlrpc service.
326 enum lustre_sec_part {
335 const char *sptlrpc_part2name(enum lustre_sec_part sp);
336 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
339 * A rule specifies a flavor to be used by a ptlrpc connection between
342 struct sptlrpc_rule {
343 __u32 sr_netid; /* LNET network ID */
344 __u8 sr_from; /* sec_part */
345 __u8 sr_to; /* sec_part */
347 struct sptlrpc_flavor sr_flvr;
351 * A set of rules in memory.
353 * Rules are generated and stored on MGS, and propagated to MDT, OST,
354 * and client when needed.
356 struct sptlrpc_rule_set {
359 struct sptlrpc_rule *srs_rules;
362 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
363 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
365 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
367 memset(set, 0, sizeof(*set));
370 void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
371 int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
372 int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
373 struct sptlrpc_rule *rule);
374 int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
375 enum lustre_sec_part from,
376 enum lustre_sec_part to,
377 struct lnet_nid *nid,
378 struct sptlrpc_flavor *sf);
379 void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
381 int sptlrpc_process_config(struct lustre_cfg *lcfg);
382 void sptlrpc_conf_log_start(const char *logname);
383 void sptlrpc_conf_log_stop(const char *logname);
384 void sptlrpc_conf_log_update_begin(const char *logname);
385 void sptlrpc_conf_log_update_end(const char *logname);
386 void sptlrpc_conf_client_adapt(struct obd_device *obd);
387 int sptlrpc_conf_target_get_rules(struct obd_device *obd,
388 struct sptlrpc_rule_set *rset);
389 void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
390 enum lustre_sec_part from,
391 struct lnet_nid *nid,
392 struct sptlrpc_flavor *flavor);
394 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
395 * and should be enough for other future mechanisms but not sure.
396 * Only used by pre-allocated request/reply pool.
398 #define SPTLRPC_MAX_PAYLOAD (1024)
406 struct ptlrpc_ctx_ops {
408 * To determine whether it's suitable to use the \a ctx for \a vcred.
410 int (*match)(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
413 * To bring the \a ctx uptodate.
415 int (*refresh)(struct ptlrpc_cli_ctx *ctx);
418 * Validate the \a ctx.
420 int (*validate)(struct ptlrpc_cli_ctx *ctx);
423 * Force the \a ctx to die.
425 void (*die)(struct ptlrpc_cli_ctx *ctx, int grace);
426 int (*display)(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
429 * Sign the request message using \a ctx.
431 * \pre req->rq_reqmsg point to request message.
432 * \pre req->rq_reqlen is the request message length.
433 * \post req->rq_reqbuf point to request message with signature.
434 * \post req->rq_reqdata_len is set to the final request message size.
436 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
438 int (*sign)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
441 * Verify the reply message using \a ctx.
443 * \pre req->rq_repdata point to reply message with signature.
444 * \pre req->rq_repdata_len is the total reply message length.
445 * \post req->rq_repmsg point to reply message without signature.
446 * \post req->rq_replen is the reply message length.
448 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
450 int (*verify)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
453 * Encrypt the request message using \a ctx.
455 * \pre req->rq_reqmsg point to request message in clear text.
456 * \pre req->rq_reqlen is the request message length.
457 * \post req->rq_reqbuf point to request message.
458 * \post req->rq_reqdata_len is set to the final request message size.
460 * \see gss_cli_ctx_seal().
462 int (*seal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
465 * Decrypt the reply message using \a ctx.
467 * \pre req->rq_repdata point to encrypted reply message.
468 * \pre req->rq_repdata_len is the total cipher text length.
469 * \post req->rq_repmsg point to reply message in clear text.
470 * \post req->rq_replen is the reply message length in clear text.
472 * \see gss_cli_ctx_unseal().
474 int (*unseal)(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
477 * Wrap bulk request data. This is called before wrapping RPC
480 * \pre bulk buffer is descripted by desc->bd_iov and
481 * desc->bd_iov_count. note for read it's just buffer, no data
482 * need to be sent; for write it contains data in clear text.
483 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
484 * (usually inside of RPC request message).
485 * - encryption: cipher text bulk buffer is descripted by
486 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
487 * count remains the same).
488 * - otherwise: bulk buffer is still desc->bd_iov and
489 * desc->bd_iov_count.
491 * \return 0: success.
492 * \return -ev: error code.
494 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
496 int (*wrap_bulk)(struct ptlrpc_cli_ctx *ctx,
497 struct ptlrpc_request *req,
498 struct ptlrpc_bulk_desc *desc);
501 * Unwrap bulk reply data. This is called after wrapping RPC
504 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
505 * desc->bd_iov_count, according to wrap_bulk().
506 * \post final bulk data in clear text is placed in buffer described
507 * by desc->bd_iov and desc->bd_iov_count.
508 * \return +ve nob of actual bulk data in clear text.
509 * \return -ve error code.
511 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
513 int (*unwrap_bulk)(struct ptlrpc_cli_ctx *ctx,
514 struct ptlrpc_request *req,
515 struct ptlrpc_bulk_desc *desc);
518 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
519 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
520 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
521 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
522 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
523 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
525 #define PTLRPC_CTX_NEW BIT(PTLRPC_CTX_NEW_BIT)
526 #define PTLRPC_CTX_UPTODATE BIT(PTLRPC_CTX_UPTODATE_BIT)
527 #define PTLRPC_CTX_DEAD BIT(PTLRPC_CTX_DEAD_BIT)
528 #define PTLRPC_CTX_ERROR BIT(PTLRPC_CTX_ERROR_BIT)
529 #define PTLRPC_CTX_CACHED BIT(PTLRPC_CTX_CACHED_BIT)
530 #define PTLRPC_CTX_ETERNAL BIT(PTLRPC_CTX_ETERNAL_BIT)
532 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_UPTODATE | \
536 struct ptlrpc_cli_ctx {
537 struct hlist_node cc_cache; /* linked into ctx cache */
538 atomic_t cc_refcount;
539 struct ptlrpc_sec *cc_sec;
540 struct ptlrpc_ctx_ops *cc_ops;
541 time64_t cc_expire; /* in seconds */
542 unsigned int cc_early_expire:1;
543 unsigned long cc_flags;
544 struct vfs_cred cc_vcred;
546 int cc_impgen; /* import gen at ctx create */
547 __u32 cc_impconncnt; /* import conn cnt at create */
548 struct list_head cc_req_list; /* waiting reqs linked here */
549 struct list_head cc_gc_chain; /* linked to gc chain */
553 * client side policy operation vector.
555 struct ptlrpc_sec_cops {
557 * Given an \a imp, create and initialize a ptlrpc_sec structure.
558 * \param ctx service context:
559 * - regular import: \a ctx should be NULL;
560 * - reverse import: \a ctx is obtained from incoming request.
561 * \param flavor specify what flavor to use.
563 * When necessary, policy module is responsible for taking reference
566 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
568 struct ptlrpc_sec *(*create_sec)(struct obd_import *imp,
569 struct ptlrpc_svc_ctx *ctx,
570 struct sptlrpc_flavor *flavor);
573 * Destructor of ptlrpc_sec. When called, refcount has been dropped
574 * to 0 and all contexts has been destroyed.
576 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
578 void (*destroy_sec)(struct ptlrpc_sec *sec);
581 * Notify that this ptlrpc_sec is going to die. Optionally, policy
582 * module is supposed to set sec->ps_dying and whatever necessary
585 * \see plain_kill_sec(), gss_sec_kill().
587 void (*kill_sec)(struct ptlrpc_sec *sec);
590 * Given \a vcred, lookup and/or create its context. The policy module
591 * is supposed to maintain its own context cache.
592 * XXX currently \a create and \a remove_dead is always 1, perhaps
593 * should be removed completely.
595 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
597 struct ptlrpc_cli_ctx *(*lookup_ctx)(struct ptlrpc_sec *sec,
598 struct vfs_cred *vcred,
599 int create, int remove_dead);
602 * Called then the reference of \a ctx dropped to 0. The policy module
603 * is supposed to destroy this context or whatever else according to
604 * its cache maintainance mechamism.
606 * \param sync if zero, we shouldn't wait for the context being
607 * destroyed completely.
609 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
611 void (*release_ctx)(struct ptlrpc_sec *sec,
612 struct ptlrpc_cli_ctx *ctx, int sync);
615 * Flush the context cache.
617 * \param uid context of which user, -1 means all contexts.
618 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
619 * contexts should be cleared immediately.
620 * \param force if zero, only idle contexts will be flushed.
622 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
624 int (*flush_ctx_cache)(struct ptlrpc_sec *sec, uid_t uid, int grace,
628 * Called periodically by garbage collector to remove dead contexts
631 * \see gss_sec_gc_ctx_kr().
633 void (*gc_ctx)(struct ptlrpc_sec *sec);
636 * Given an context \a ctx, install a corresponding reverse service
637 * context on client side.
638 * XXX currently it's only used by GSS module, maybe we should remove
639 * this from general API.
641 int (*install_rctx)(struct obd_import *imp, struct ptlrpc_sec *sec,
642 struct ptlrpc_cli_ctx *ctx);
645 * To allocate request buffer for \a req.
647 * \pre req->rq_reqmsg == NULL.
648 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
649 * we are not supposed to free it.
650 * \post if success, req->rq_reqmsg point to a buffer with size
651 * at least \a lustre_msg_size.
653 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
655 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
656 struct ptlrpc_request *req, int lustre_msg_size);
659 * To free request buffer for \a req.
661 * \pre req->rq_reqbuf != NULL.
663 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
665 void (*free_reqbuf)(struct ptlrpc_sec *sec,
666 struct ptlrpc_request *req);
669 * To allocate reply buffer for \a req.
671 * \pre req->rq_repbuf == NULL.
672 * \post if success, req->rq_repbuf point to a buffer with size
673 * req->rq_repbuf_len, the size should be large enough to receive
674 * reply which be transformed from \a lustre_msg_size of clear text.
676 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
678 int (*alloc_repbuf)(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
679 int lustre_msg_size);
682 * To free reply buffer for \a req.
684 * \pre req->rq_repbuf != NULL.
685 * \post req->rq_repbuf == NULL.
686 * \post req->rq_repbuf_len == 0.
688 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
690 void (*free_repbuf)(struct ptlrpc_sec *sec,
691 struct ptlrpc_request *req);
694 * To expand the request buffer of \a req, thus the \a segment in
695 * the request message pointed by req->rq_reqmsg can accommodate
696 * at least \a newsize of data.
698 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
700 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
701 * gss_enlarge_reqbuf().
703 int (*enlarge_reqbuf)(struct ptlrpc_sec *sec,
704 struct ptlrpc_request *req, int segment,
709 int (*display)(struct ptlrpc_sec *sec, struct seq_file *seq);
713 * server side policy operation vector.
715 struct ptlrpc_sec_sops {
717 * verify an incoming request.
719 * \pre request message is pointed by req->rq_reqbuf, size is
720 * req->rq_reqdata_len; and the message has been unpacked to
723 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
724 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
725 * req->rq_sp_from is decoded from request.
726 * \retval SECSVC_COMPLETE success, the request has been fully
727 * processed, and reply message has been prepared; req->rq_sp_from is
728 * decoded from request.
729 * \retval SECSVC_DROP failed, this request should be dropped.
731 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
733 int (*accept)(struct ptlrpc_request *req);
736 * Perform security transformation upon reply message.
738 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
740 * \post req->rs_repdata_len is the final message size.
741 * \post req->rq_reply_off is set.
743 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
745 int (*authorize)(struct ptlrpc_request *req);
748 * Invalidate server context \a ctx.
750 * \see gss_svc_invalidate_ctx().
752 void (*invalidate_ctx)(struct ptlrpc_svc_ctx *ctx);
755 * Allocate a ptlrpc_reply_state.
757 * \param msgsize size of the reply message in clear text.
758 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
759 * should simply use it; otherwise we'll responsible for allocating
761 * \post req->rq_reply_state != NULL;
762 * \post req->rq_reply_state->rs_msg != NULL;
764 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
766 int (*alloc_rs)(struct ptlrpc_request *req, int msgsize);
769 * Free a ptlrpc_reply_state.
771 void (*free_rs)(struct ptlrpc_reply_state *rs);
774 * Release the server context \a ctx.
776 * \see gss_svc_free_ctx().
778 void (*free_ctx)(struct ptlrpc_svc_ctx *ctx);
781 * Install a reverse context based on the server context \a ctx.
783 * \see gss_svc_install_rctx_kr().
785 int (*install_rctx)(struct obd_import *imp, struct ptlrpc_svc_ctx *ctx);
788 * Prepare buffer for incoming bulk write.
790 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
791 * intended to receive the write.
793 * \see gss_svc_prep_bulk().
795 int (*prep_bulk)(struct ptlrpc_request *req,
796 struct ptlrpc_bulk_desc *desc);
799 * Unwrap the bulk write data.
801 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
803 int (*unwrap_bulk)(struct ptlrpc_request *req,
804 struct ptlrpc_bulk_desc *desc);
807 * Wrap the bulk read data.
809 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
811 int (*wrap_bulk)(struct ptlrpc_request *req,
812 struct ptlrpc_bulk_desc *desc);
815 struct ptlrpc_sec_policy {
816 struct module *sp_owner;
818 __u16 sp_policy; /* policy number */
819 struct ptlrpc_sec_cops *sp_cops; /* client ops */
820 struct ptlrpc_sec_sops *sp_sops; /* server ops */
823 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
824 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
825 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
826 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
827 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
829 struct sptlrpc_sepol {
830 struct rcu_head ssp_rcu;
832 /** mtime of SELinux policy file */
835 * SELinux policy info
836 * sepol string format is:
837 * <mode>:<policy name>:<policy version>:<policy hash>
839 __u32 ssp_sepol_size;
844 * The ptlrpc_sec represents the client side ptlrpc security facilities,
845 * each obd_import (both regular and reverse import) must associate with
848 * \see sptlrpc_import_sec_adapt().
851 struct ptlrpc_sec_policy *ps_policy;
852 atomic_t ps_refcount;
853 /** statistic only */
855 /** unique identifier */
857 struct sptlrpc_flavor ps_flvr;
858 enum lustre_sec_part ps_part;
859 /** after set, no more new context will be created */
860 unsigned int ps_dying:1;
862 struct obd_import *ps_import;
864 /** next check time of SELinux policy file */
865 ktime_t ps_sepol_checknext;
866 /** SELinux policy file information */
867 struct sptlrpc_sepol *ps_sepol;
872 struct list_head ps_gc_list;
873 time64_t ps_gc_interval; /* in seconds */
874 time64_t ps_gc_next; /* in seconds */
877 static inline int flvr_is_rootonly(__u32 flavor)
879 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
880 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
881 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
884 static inline int flvr_allows_user_desc(__u32 flavor)
886 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
887 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
888 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
891 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
893 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
896 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
898 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
902 struct ptlrpc_svc_ctx {
903 atomic_t sc_refcount;
904 struct ptlrpc_sec_policy *sc_policy;
908 * user identity descriptor
910 #define LUSTRE_MAX_GROUPS (128)
912 struct ptlrpc_user_desc {
925 enum sptlrpc_bulk_hash_alg {
926 BULK_HASH_ALG_NULL = 0,
927 BULK_HASH_ALG_ADLER32,
931 BULK_HASH_ALG_SHA256,
932 BULK_HASH_ALG_SHA384,
933 BULK_HASH_ALG_SHA512,
937 const char *sptlrpc_get_hash_name(__u8 hash_alg);
938 __u8 sptlrpc_get_hash_alg(const char *algname);
944 struct ptlrpc_bulk_sec_desc {
945 __u8 bsd_version; /* 0 */
946 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
947 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
948 __u8 bsd_flags; /* flags */
949 __u32 bsd_nob; /* nob of bulk data */
950 __u8 bsd_data[0]; /* policy-specific token */
953 extern struct dentry *sptlrpc_debugfs_dir;
954 extern struct proc_dir_entry *sptlrpc_lprocfs_dir;
957 * round size up to next power of 2, for slab allocation.
958 * @size must be sane (can't overflow after round up)
960 static inline int size_roundup_power2(int size)
973 * internal support libraries
975 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg, int segment,
981 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
982 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
984 __u32 sptlrpc_name2flavor_base(const char *name);
985 const char *sptlrpc_flavor2name_base(__u32 flvr);
986 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf, char *buf,
988 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
989 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
991 static inline struct ptlrpc_sec_policy *
992 sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
994 __module_get(policy->sp_owner);
999 sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
1001 module_put(policy->sp_owner);
1008 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
1010 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
1014 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
1016 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
1020 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
1022 return (cli_ctx_status(ctx) != 0);
1026 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
1028 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
1032 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
1034 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
1038 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
1040 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
1044 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
1046 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
1052 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
1053 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
1056 * internal apis which only used by policy impelentation
1058 int sptlrpc_get_next_secid(void);
1059 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
1062 * exported client context api
1064 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
1065 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
1066 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
1067 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
1068 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
1071 * exported client context wrap/buffers
1073 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1074 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1075 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1076 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1077 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1078 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1079 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1080 const struct req_msg_field *field,
1082 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1083 struct ptlrpc_request **req_ret);
1084 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1085 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1087 static inline size_t sptlrpc_sepol_size(struct sptlrpc_sepol *sepol)
1089 return sepol ? sepol->ssp_sepol_size : 0;
1092 void sptlrpc_sepol_put(struct sptlrpc_sepol *pol);
1093 struct sptlrpc_sepol *sptlrpc_sepol_get_cached(struct ptlrpc_sec *imp_sec);
1094 struct sptlrpc_sepol *sptlrpc_sepol_get(struct ptlrpc_request *req);
1097 * exported higher interface of import & request
1099 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1100 struct ptlrpc_svc_ctx *ctx,
1101 struct sptlrpc_flavor *flvr);
1102 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1103 void sptlrpc_import_sec_put(struct obd_import *imp);
1104 int lprocfs_srpc_serverctx_seq_show(struct seq_file *m, void *data);
1106 int sptlrpc_import_check_ctx(struct obd_import *imp);
1107 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1108 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1109 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1110 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1111 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1112 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1113 int sptlrpc_export_update_ctx(struct obd_export *exp);
1114 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req,
1115 struct ptlrpc_sec *sec);
1116 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1118 int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
1121 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1122 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1123 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
1126 const char *sec2target_str(struct ptlrpc_sec *sec);
1127 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *obd);
1132 enum secsvc_accept_res {
1138 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1139 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1140 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1141 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1142 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1143 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1144 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1146 int sptlrpc_target_export_check(struct obd_export *exp,
1147 struct ptlrpc_request *req);
1148 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1149 struct sptlrpc_rule_set *rset);
1152 * context and reverse context
1154 #define GSS_SEQ_WIN (2048)
1155 #define GSS_SEQ_WIN_MAIN GSS_SEQ_WIN
1156 #define GSS_SEQ_WIN_BACK (128)
1157 #define GSS_SEQ_REPACK_THRESHOLD (GSS_SEQ_WIN_MAIN / 2 + \
1158 GSS_SEQ_WIN_MAIN / 4)
1160 struct gss_svc_seq_data {
1161 spinlock_t ssd_lock;
1163 * highest sequence number seen so far, for main and back window
1168 * main and back window
1169 * for i such that ssd_max - GSS_SEQ_WIN < i <= ssd_max, the i-th bit
1170 * of ssd_win is nonzero iff sequence number i has been seen already.
1172 unsigned long ssd_win_main[GSS_SEQ_WIN_MAIN/BITS_PER_LONG];
1173 unsigned long ssd_win_back[GSS_SEQ_WIN_BACK/BITS_PER_LONG];
1176 struct gss_svc_ctx {
1177 struct gss_ctx *gsc_mechctx;
1178 struct gss_svc_seq_data gsc_seqdata;
1179 rawobj_t gsc_rvs_hdl;
1183 uid_t gsc_mapped_uid;
1184 unsigned int gsc_usr_root:1,
1191 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1192 struct ptlrpc_svc_ctx *ctx);
1193 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1194 struct ptlrpc_cli_ctx *ctx);
1196 /* bulk security api */
1197 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1198 struct ptlrpc_bulk_desc *desc);
1199 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1200 struct ptlrpc_bulk_desc *desc, int nob);
1201 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1202 struct ptlrpc_bulk_desc *desc);
1203 #ifdef HAVE_SERVER_SUPPORT
1204 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
1205 struct ptlrpc_bulk_desc *desc);
1206 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1207 struct ptlrpc_bulk_desc *desc);
1208 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
1209 struct ptlrpc_bulk_desc *desc);
1212 /* bulk helpers (internal use only by policies) */
1213 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1214 void *buf, int buflen);
1216 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1218 /* user descriptor helpers */
1219 static inline int sptlrpc_user_desc_size(int ngroups)
1221 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1224 int sptlrpc_current_user_desc_size(void);
1225 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1226 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1230 #endif /* _LUSTRE_SEC_H_ */