4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #ifndef _LUSTRE_SEC_H_
34 #define _LUSTRE_SEC_H_
36 /** \defgroup sptlrpc sptlrpc
46 struct ptlrpc_request;
47 struct ptlrpc_reply_state;
48 struct ptlrpc_bulk_desc;
58 struct ptlrpc_sec_policy;
59 struct ptlrpc_sec_cops;
60 struct ptlrpc_sec_sops;
62 struct ptlrpc_svc_ctx;
63 struct ptlrpc_cli_ctx;
64 struct ptlrpc_ctx_ops;
67 * \addtogroup flavor flavor
69 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
70 * are unused, must be set to 0 for future expansion.
72 * ------------------------------------------------------------------------
73 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
74 * ------------------------------------------------------------------------
84 SPTLRPC_POLICY_NULL = 0,
85 SPTLRPC_POLICY_PLAIN = 1,
86 SPTLRPC_POLICY_GSS = 2,
90 enum sptlrpc_mech_null {
91 SPTLRPC_MECH_NULL = 0,
92 SPTLRPC_MECH_NULL_MAX,
95 enum sptlrpc_mech_plain {
96 SPTLRPC_MECH_PLAIN = 0,
97 SPTLRPC_MECH_PLAIN_MAX,
100 enum sptlrpc_mech_gss {
101 SPTLRPC_MECH_GSS_NULL = 0,
102 SPTLRPC_MECH_GSS_KRB5 = 1,
103 SPTLRPC_MECH_GSS_SK = 2,
104 SPTLRPC_MECH_GSS_MAX,
107 enum sptlrpc_service_type {
108 SPTLRPC_SVC_NULL = 0, /**< no security */
109 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
110 SPTLRPC_SVC_INTG = 2, /**< integrity */
111 SPTLRPC_SVC_PRIV = 3, /**< privacy */
115 enum sptlrpc_bulk_type {
116 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
117 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
121 enum sptlrpc_bulk_service {
122 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
123 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
124 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
125 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
126 SPTLRPC_BULK_SVC_MAX,
130 * compose/extract macros
132 #define FLVR_POLICY_OFFSET (0)
133 #define FLVR_MECH_OFFSET (4)
134 #define FLVR_SVC_OFFSET (8)
135 #define FLVR_BULK_TYPE_OFFSET (12)
136 #define FLVR_BULK_SVC_OFFSET (16)
138 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
139 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
140 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
141 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
142 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
143 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
148 #define SPTLRPC_FLVR_POLICY(flavor) \
149 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
150 #define SPTLRPC_FLVR_MECH(flavor) \
151 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
152 #define SPTLRPC_FLVR_SVC(flavor) \
153 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
154 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
155 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
156 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
157 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
159 #define SPTLRPC_FLVR_BASE(flavor) \
160 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
161 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
162 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
167 #define MAKE_BASE_SUBFLVR(mech, svc) \
169 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
171 #define SPTLRPC_SUBFLVR_GSSNULL \
172 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_NULL, SPTLRPC_SVC_NULL)
173 #define SPTLRPC_SUBFLVR_KRB5N \
174 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
175 #define SPTLRPC_SUBFLVR_KRB5A \
176 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
177 #define SPTLRPC_SUBFLVR_KRB5I \
178 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
179 #define SPTLRPC_SUBFLVR_KRB5P \
180 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
181 #define SPTLRPC_SUBFLVR_SKN \
182 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_NULL)
183 #define SPTLRPC_SUBFLVR_SKA \
184 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_AUTH)
185 #define SPTLRPC_SUBFLVR_SKI \
186 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_INTG)
187 #define SPTLRPC_SUBFLVR_SKPI \
188 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_PRIV)
193 #define SPTLRPC_FLVR_NULL \
194 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
197 SPTLRPC_BULK_DEFAULT, \
198 SPTLRPC_BULK_SVC_NULL)
199 #define SPTLRPC_FLVR_PLAIN \
200 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
201 SPTLRPC_MECH_PLAIN, \
204 SPTLRPC_BULK_SVC_INTG)
205 #define SPTLRPC_FLVR_GSSNULL \
206 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
207 SPTLRPC_MECH_GSS_NULL, \
209 SPTLRPC_BULK_DEFAULT, \
210 SPTLRPC_BULK_SVC_NULL)
211 #define SPTLRPC_FLVR_KRB5N \
212 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
213 SPTLRPC_MECH_GSS_KRB5, \
215 SPTLRPC_BULK_DEFAULT, \
216 SPTLRPC_BULK_SVC_NULL)
217 #define SPTLRPC_FLVR_KRB5A \
218 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
219 SPTLRPC_MECH_GSS_KRB5, \
221 SPTLRPC_BULK_DEFAULT, \
222 SPTLRPC_BULK_SVC_NULL)
223 #define SPTLRPC_FLVR_KRB5I \
224 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
225 SPTLRPC_MECH_GSS_KRB5, \
227 SPTLRPC_BULK_DEFAULT, \
228 SPTLRPC_BULK_SVC_INTG)
229 #define SPTLRPC_FLVR_KRB5P \
230 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
231 SPTLRPC_MECH_GSS_KRB5, \
233 SPTLRPC_BULK_DEFAULT, \
234 SPTLRPC_BULK_SVC_PRIV)
235 #define SPTLRPC_FLVR_SKN \
236 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
237 SPTLRPC_MECH_GSS_SK, \
239 SPTLRPC_BULK_DEFAULT, \
240 SPTLRPC_BULK_SVC_NULL)
241 #define SPTLRPC_FLVR_SKA \
242 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
243 SPTLRPC_MECH_GSS_SK, \
245 SPTLRPC_BULK_DEFAULT, \
246 SPTLRPC_BULK_SVC_NULL)
247 #define SPTLRPC_FLVR_SKI \
248 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
249 SPTLRPC_MECH_GSS_SK, \
251 SPTLRPC_BULK_DEFAULT, \
252 SPTLRPC_BULK_SVC_INTG)
253 #define SPTLRPC_FLVR_SKPI \
254 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
255 SPTLRPC_MECH_GSS_SK, \
257 SPTLRPC_BULK_DEFAULT, \
258 SPTLRPC_BULK_SVC_PRIV)
260 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
262 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
263 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
266 * extract the useful part from wire flavor
268 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
272 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
274 LASSERT(svc < SPTLRPC_SVC_MAX);
275 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
276 SPTLRPC_FLVR_MECH(*flvr),
278 SPTLRPC_FLVR_BULK_TYPE(*flvr),
279 SPTLRPC_FLVR_BULK_SVC(*flvr));
282 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
284 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
285 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
286 SPTLRPC_FLVR_MECH(*flvr),
287 SPTLRPC_FLVR_SVC(*flvr),
288 SPTLRPC_FLVR_BULK_TYPE(*flvr),
292 struct bulk_spec_hash {
297 * Full description of flavors being used on a ptlrpc connection, include
298 * both regular RPC and bulk transfer parts.
300 struct sptlrpc_flavor {
302 * wire flavor, should be renamed to sf_wire.
306 * general flags of PTLRPC_SEC_FL_*
310 * rpc flavor specification
313 /* nothing for now */
316 * bulk flavor specification
319 struct bulk_spec_hash hash;
324 * identify the RPC is generated from what part of Lustre. It's encoded into
325 * RPC requests and to be checked by ptlrpc service.
327 enum lustre_sec_part {
336 const char *sptlrpc_part2name(enum lustre_sec_part sp);
337 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
340 * A rule specifies a flavor to be used by a ptlrpc connection between
343 struct sptlrpc_rule {
344 __u32 sr_netid; /* LNET network ID */
345 __u8 sr_from; /* sec_part */
346 __u8 sr_to; /* sec_part */
348 struct sptlrpc_flavor sr_flvr;
352 * A set of rules in memory.
354 * Rules are generated and stored on MGS, and propagated to MDT, OST,
355 * and client when needed.
357 struct sptlrpc_rule_set {
360 struct sptlrpc_rule *srs_rules;
363 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
364 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
366 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
368 memset(set, 0, sizeof(*set));
371 void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
372 int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
373 int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
374 struct sptlrpc_rule *rule);
375 int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
376 enum lustre_sec_part from,
377 enum lustre_sec_part to,
379 struct sptlrpc_flavor *sf);
380 void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
382 int sptlrpc_process_config(struct lustre_cfg *lcfg);
383 void sptlrpc_conf_log_start(const char *logname);
384 void sptlrpc_conf_log_stop(const char *logname);
385 void sptlrpc_conf_log_update_begin(const char *logname);
386 void sptlrpc_conf_log_update_end(const char *logname);
387 void sptlrpc_conf_client_adapt(struct obd_device *obd);
388 int sptlrpc_conf_target_get_rules(struct obd_device *obd,
389 struct sptlrpc_rule_set *rset,
391 void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
392 enum lustre_sec_part from,
394 struct sptlrpc_flavor *flavor);
396 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
397 * and should be enough for other future mechanisms but not sure.
398 * Only used by pre-allocated request/reply pool.
400 #define SPTLRPC_MAX_PAYLOAD (1024)
408 struct ptlrpc_ctx_ops {
410 * To determine whether it's suitable to use the \a ctx for \a vcred.
412 int (*match) (struct ptlrpc_cli_ctx *ctx,
413 struct vfs_cred *vcred);
416 * To bring the \a ctx uptodate.
418 int (*refresh) (struct ptlrpc_cli_ctx *ctx);
421 * Validate the \a ctx.
423 int (*validate) (struct ptlrpc_cli_ctx *ctx);
426 * Force the \a ctx to die.
428 void (*die) (struct ptlrpc_cli_ctx *ctx,
430 int (*display) (struct ptlrpc_cli_ctx *ctx,
431 char *buf, int bufsize);
434 * Sign the request message using \a ctx.
436 * \pre req->rq_reqmsg point to request message.
437 * \pre req->rq_reqlen is the request message length.
438 * \post req->rq_reqbuf point to request message with signature.
439 * \post req->rq_reqdata_len is set to the final request message size.
441 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
443 int (*sign) (struct ptlrpc_cli_ctx *ctx,
444 struct ptlrpc_request *req);
447 * Verify the reply message using \a ctx.
449 * \pre req->rq_repdata point to reply message with signature.
450 * \pre req->rq_repdata_len is the total reply message length.
451 * \post req->rq_repmsg point to reply message without signature.
452 * \post req->rq_replen is the reply message length.
454 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
456 int (*verify) (struct ptlrpc_cli_ctx *ctx,
457 struct ptlrpc_request *req);
460 * Encrypt the request message using \a ctx.
462 * \pre req->rq_reqmsg point to request message in clear text.
463 * \pre req->rq_reqlen is the request message length.
464 * \post req->rq_reqbuf point to request message.
465 * \post req->rq_reqdata_len is set to the final request message size.
467 * \see gss_cli_ctx_seal().
469 int (*seal) (struct ptlrpc_cli_ctx *ctx,
470 struct ptlrpc_request *req);
473 * Decrypt the reply message using \a ctx.
475 * \pre req->rq_repdata point to encrypted reply message.
476 * \pre req->rq_repdata_len is the total cipher text length.
477 * \post req->rq_repmsg point to reply message in clear text.
478 * \post req->rq_replen is the reply message length in clear text.
480 * \see gss_cli_ctx_unseal().
482 int (*unseal) (struct ptlrpc_cli_ctx *ctx,
483 struct ptlrpc_request *req);
486 * Wrap bulk request data. This is called before wrapping RPC
489 * \pre bulk buffer is descripted by desc->bd_iov and
490 * desc->bd_iov_count. note for read it's just buffer, no data
491 * need to be sent; for write it contains data in clear text.
492 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
493 * (usually inside of RPC request message).
494 * - encryption: cipher text bulk buffer is descripted by
495 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
496 * count remains the same).
497 * - otherwise: bulk buffer is still desc->bd_iov and
498 * desc->bd_iov_count.
500 * \return 0: success.
501 * \return -ev: error code.
503 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
505 int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
506 struct ptlrpc_request *req,
507 struct ptlrpc_bulk_desc *desc);
510 * Unwrap bulk reply data. This is called after wrapping RPC
513 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
514 * desc->bd_iov_count, according to wrap_bulk().
515 * \post final bulk data in clear text is placed in buffer described
516 * by desc->bd_iov and desc->bd_iov_count.
517 * \return +ve nob of actual bulk data in clear text.
518 * \return -ve error code.
520 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
522 int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
523 struct ptlrpc_request *req,
524 struct ptlrpc_bulk_desc *desc);
527 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
528 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
529 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
530 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
531 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
532 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
534 #define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
535 #define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
536 #define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
537 #define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
538 #define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
539 #define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
541 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
542 PTLRPC_CTX_UPTODATE | \
546 struct ptlrpc_cli_ctx {
547 struct hlist_node cc_cache; /* linked into ctx cache */
548 atomic_t cc_refcount;
549 struct ptlrpc_sec *cc_sec;
550 struct ptlrpc_ctx_ops *cc_ops;
551 cfs_time_t cc_expire; /* in seconds */
552 unsigned int cc_early_expire:1;
553 unsigned long cc_flags;
554 struct vfs_cred cc_vcred;
556 struct list_head cc_req_list; /* waiting reqs linked here */
557 struct list_head cc_gc_chain; /* linked to gc chain */
561 * client side policy operation vector.
563 struct ptlrpc_sec_cops {
565 * Given an \a imp, create and initialize a ptlrpc_sec structure.
566 * \param ctx service context:
567 * - regular import: \a ctx should be NULL;
568 * - reverse import: \a ctx is obtained from incoming request.
569 * \param flavor specify what flavor to use.
571 * When necessary, policy module is responsible for taking reference
574 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
576 struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
577 struct ptlrpc_svc_ctx *ctx,
578 struct sptlrpc_flavor *flavor);
581 * Destructor of ptlrpc_sec. When called, refcount has been dropped
582 * to 0 and all contexts has been destroyed.
584 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
586 void (*destroy_sec) (struct ptlrpc_sec *sec);
589 * Notify that this ptlrpc_sec is going to die. Optionally, policy
590 * module is supposed to set sec->ps_dying and whatever necessary
593 * \see plain_kill_sec(), gss_sec_kill().
595 void (*kill_sec) (struct ptlrpc_sec *sec);
598 * Given \a vcred, lookup and/or create its context. The policy module
599 * is supposed to maintain its own context cache.
600 * XXX currently \a create and \a remove_dead is always 1, perhaps
601 * should be removed completely.
603 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
605 struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
606 struct vfs_cred *vcred,
611 * Called then the reference of \a ctx dropped to 0. The policy module
612 * is supposed to destroy this context or whatever else according to
613 * its cache maintainance mechamism.
615 * \param sync if zero, we shouldn't wait for the context being
616 * destroyed completely.
618 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
620 void (*release_ctx) (struct ptlrpc_sec *sec,
621 struct ptlrpc_cli_ctx *ctx,
625 * Flush the context cache.
627 * \param uid context of which user, -1 means all contexts.
628 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
629 * contexts should be cleared immediately.
630 * \param force if zero, only idle contexts will be flushed.
632 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
634 int (*flush_ctx_cache)
635 (struct ptlrpc_sec *sec,
641 * Called periodically by garbage collector to remove dead contexts
644 * \see gss_sec_gc_ctx_kr().
646 void (*gc_ctx) (struct ptlrpc_sec *sec);
649 * Given an context \a ctx, install a corresponding reverse service
650 * context on client side.
651 * XXX currently it's only used by GSS module, maybe we should remove
652 * this from general API.
654 int (*install_rctx)(struct obd_import *imp,
655 struct ptlrpc_sec *sec,
656 struct ptlrpc_cli_ctx *ctx);
659 * To allocate request buffer for \a req.
661 * \pre req->rq_reqmsg == NULL.
662 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
663 * we are not supposed to free it.
664 * \post if success, req->rq_reqmsg point to a buffer with size
665 * at least \a lustre_msg_size.
667 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
669 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
670 struct ptlrpc_request *req,
671 int lustre_msg_size);
674 * To free request buffer for \a req.
676 * \pre req->rq_reqbuf != NULL.
678 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
680 void (*free_reqbuf) (struct ptlrpc_sec *sec,
681 struct ptlrpc_request *req);
684 * To allocate reply buffer for \a req.
686 * \pre req->rq_repbuf == NULL.
687 * \post if success, req->rq_repbuf point to a buffer with size
688 * req->rq_repbuf_len, the size should be large enough to receive
689 * reply which be transformed from \a lustre_msg_size of clear text.
691 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
693 int (*alloc_repbuf)(struct ptlrpc_sec *sec,
694 struct ptlrpc_request *req,
695 int lustre_msg_size);
698 * To free reply buffer for \a req.
700 * \pre req->rq_repbuf != NULL.
701 * \post req->rq_repbuf == NULL.
702 * \post req->rq_repbuf_len == 0.
704 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
706 void (*free_repbuf) (struct ptlrpc_sec *sec,
707 struct ptlrpc_request *req);
710 * To expand the request buffer of \a req, thus the \a segment in
711 * the request message pointed by req->rq_reqmsg can accommodate
712 * at least \a newsize of data.
714 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
716 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
717 * gss_enlarge_reqbuf().
719 int (*enlarge_reqbuf)
720 (struct ptlrpc_sec *sec,
721 struct ptlrpc_request *req,
722 int segment, int newsize);
726 int (*display) (struct ptlrpc_sec *sec,
727 struct seq_file *seq);
731 * server side policy operation vector.
733 struct ptlrpc_sec_sops {
735 * verify an incoming request.
737 * \pre request message is pointed by req->rq_reqbuf, size is
738 * req->rq_reqdata_len; and the message has been unpacked to
741 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
742 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
743 * req->rq_sp_from is decoded from request.
744 * \retval SECSVC_COMPLETE success, the request has been fully
745 * processed, and reply message has been prepared; req->rq_sp_from is
746 * decoded from request.
747 * \retval SECSVC_DROP failed, this request should be dropped.
749 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
751 int (*accept) (struct ptlrpc_request *req);
754 * Perform security transformation upon reply message.
756 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
758 * \post req->rs_repdata_len is the final message size.
759 * \post req->rq_reply_off is set.
761 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
763 int (*authorize) (struct ptlrpc_request *req);
766 * Invalidate server context \a ctx.
768 * \see gss_svc_invalidate_ctx().
770 void (*invalidate_ctx)
771 (struct ptlrpc_svc_ctx *ctx);
774 * Allocate a ptlrpc_reply_state.
776 * \param msgsize size of the reply message in clear text.
777 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
778 * should simply use it; otherwise we'll responsible for allocating
780 * \post req->rq_reply_state != NULL;
781 * \post req->rq_reply_state->rs_msg != NULL;
783 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
785 int (*alloc_rs) (struct ptlrpc_request *req,
789 * Free a ptlrpc_reply_state.
791 void (*free_rs) (struct ptlrpc_reply_state *rs);
794 * Release the server context \a ctx.
796 * \see gss_svc_free_ctx().
798 void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
801 * Install a reverse context based on the server context \a ctx.
803 * \see gss_svc_install_rctx_kr().
805 int (*install_rctx)(struct obd_import *imp,
806 struct ptlrpc_svc_ctx *ctx);
809 * Prepare buffer for incoming bulk write.
811 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
812 * intended to receive the write.
814 * \see gss_svc_prep_bulk().
816 int (*prep_bulk) (struct ptlrpc_request *req,
817 struct ptlrpc_bulk_desc *desc);
820 * Unwrap the bulk write data.
822 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
824 int (*unwrap_bulk) (struct ptlrpc_request *req,
825 struct ptlrpc_bulk_desc *desc);
828 * Wrap the bulk read data.
830 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
832 int (*wrap_bulk) (struct ptlrpc_request *req,
833 struct ptlrpc_bulk_desc *desc);
836 struct ptlrpc_sec_policy {
837 struct module *sp_owner;
839 __u16 sp_policy; /* policy number */
840 struct ptlrpc_sec_cops *sp_cops; /* client ops */
841 struct ptlrpc_sec_sops *sp_sops; /* server ops */
844 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
845 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
846 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
847 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
848 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
851 * The ptlrpc_sec represents the client side ptlrpc security facilities,
852 * each obd_import (both regular and reverse import) must associate with
855 * \see sptlrpc_import_sec_adapt().
858 struct ptlrpc_sec_policy *ps_policy;
859 atomic_t ps_refcount;
860 /** statistic only */
862 /** unique identifier */
864 struct sptlrpc_flavor ps_flvr;
865 enum lustre_sec_part ps_part;
866 /** after set, no more new context will be created */
867 unsigned int ps_dying:1;
869 struct obd_import *ps_import;
875 struct list_head ps_gc_list;
876 cfs_time_t ps_gc_interval; /* in seconds */
877 cfs_time_t ps_gc_next; /* in seconds */
880 static inline int flvr_is_rootonly(__u32 flavor)
882 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
883 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
884 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
887 static inline int flvr_allows_user_desc(__u32 flavor)
889 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
890 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
891 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
894 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
896 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
899 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
901 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
905 struct ptlrpc_svc_ctx {
906 atomic_t sc_refcount;
907 struct ptlrpc_sec_policy *sc_policy;
911 * user identity descriptor
913 #define LUSTRE_MAX_GROUPS (128)
915 struct ptlrpc_user_desc {
928 enum sptlrpc_bulk_hash_alg {
929 BULK_HASH_ALG_NULL = 0,
930 BULK_HASH_ALG_ADLER32,
934 BULK_HASH_ALG_SHA256,
935 BULK_HASH_ALG_SHA384,
936 BULK_HASH_ALG_SHA512,
940 const char * sptlrpc_get_hash_name(__u8 hash_alg);
941 __u8 sptlrpc_get_hash_alg(const char *algname);
947 struct ptlrpc_bulk_sec_desc {
948 __u8 bsd_version; /* 0 */
949 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
950 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
951 __u8 bsd_flags; /* flags */
952 __u32 bsd_nob; /* nob of bulk data */
953 __u8 bsd_data[0]; /* policy-specific token */
960 struct proc_dir_entry;
961 extern struct proc_dir_entry *sptlrpc_proc_root;
964 * round size up to next power of 2, for slab allocation.
965 * @size must be sane (can't overflow after round up)
967 static inline int size_roundup_power2(int size)
980 * internal support libraries
982 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
983 int segment, int newsize);
988 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
989 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
991 __u32 sptlrpc_name2flavor_base(const char *name);
992 const char *sptlrpc_flavor2name_base(__u32 flvr);
993 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
994 char *buf, int bufsize);
995 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
996 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
998 static inline struct ptlrpc_sec_policy *
999 sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
1001 __module_get(policy->sp_owner);
1006 sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
1008 module_put(policy->sp_owner);
1015 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
1017 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
1021 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
1023 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
1027 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
1029 return (cli_ctx_status(ctx) != 0);
1033 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
1035 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
1039 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
1041 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
1045 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
1047 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
1051 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
1053 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
1059 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
1060 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
1063 * internal apis which only used by policy impelentation
1065 int sptlrpc_get_next_secid(void);
1066 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
1069 * exported client context api
1071 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
1072 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
1073 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
1074 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
1075 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
1078 * exported client context wrap/buffers
1080 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1081 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1082 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1083 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1084 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1085 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1086 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1087 int segment, int newsize);
1088 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1089 struct ptlrpc_request **req_ret);
1090 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1092 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1095 * exported higher interface of import & request
1097 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1098 struct ptlrpc_svc_ctx *ctx,
1099 struct sptlrpc_flavor *flvr);
1100 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1101 void sptlrpc_import_sec_put(struct obd_import *imp);
1103 int sptlrpc_import_check_ctx(struct obd_import *imp);
1104 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1105 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1106 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1107 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1108 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1109 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1110 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
1111 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1113 int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
1116 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1117 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1118 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
1121 const char * sec2target_str(struct ptlrpc_sec *sec);
1122 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
1127 enum secsvc_accept_res {
1133 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1134 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1135 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1136 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1137 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1138 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1139 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1141 int sptlrpc_target_export_check(struct obd_export *exp,
1142 struct ptlrpc_request *req);
1143 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1144 struct sptlrpc_rule_set *rset);
1149 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1150 struct ptlrpc_svc_ctx *ctx);
1151 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1152 struct ptlrpc_cli_ctx *ctx);
1154 /* bulk security api */
1155 int sptlrpc_enc_pool_add_user(void);
1156 int sptlrpc_enc_pool_del_user(void);
1157 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc);
1158 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1159 int get_free_pages_in_pool(void);
1160 int pool_is_at_full_capacity(void);
1162 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1163 struct ptlrpc_bulk_desc *desc);
1164 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1165 struct ptlrpc_bulk_desc *desc,
1167 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1168 struct ptlrpc_bulk_desc *desc);
1169 #ifdef HAVE_SERVER_SUPPORT
1170 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
1171 struct ptlrpc_bulk_desc *desc);
1172 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1173 struct ptlrpc_bulk_desc *desc);
1174 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
1175 struct ptlrpc_bulk_desc *desc);
1178 /* bulk helpers (internal use only by policies) */
1179 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1180 void *buf, int buflen);
1182 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1184 /* user descriptor helpers */
1185 static inline int sptlrpc_user_desc_size(int ngroups)
1187 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1190 int sptlrpc_current_user_desc_size(void);
1191 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1192 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1195 #define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
1196 #define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
1200 #endif /* _LUSTRE_SEC_H_ */