4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #ifndef _LUSTRE_SEC_H_
38 #define _LUSTRE_SEC_H_
40 /** \defgroup sptlrpc sptlrpc
50 struct ptlrpc_request;
51 struct ptlrpc_reply_state;
52 struct ptlrpc_bulk_desc;
62 struct ptlrpc_sec_policy;
63 struct ptlrpc_sec_cops;
64 struct ptlrpc_sec_sops;
66 struct ptlrpc_svc_ctx;
67 struct ptlrpc_cli_ctx;
68 struct ptlrpc_ctx_ops;
71 * \addtogroup flavor flavor
73 * RPC flavor is represented by a 32 bits integer. Currently the high 12 bits
74 * are unused, must be set to 0 for future expansion.
76 * ------------------------------------------------------------------------
77 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) |
78 * ------------------------------------------------------------------------
88 SPTLRPC_POLICY_NULL = 0,
89 SPTLRPC_POLICY_PLAIN = 1,
90 SPTLRPC_POLICY_GSS = 2,
94 enum sptlrpc_mech_null {
95 SPTLRPC_MECH_NULL = 0,
96 SPTLRPC_MECH_NULL_MAX,
99 enum sptlrpc_mech_plain {
100 SPTLRPC_MECH_PLAIN = 0,
101 SPTLRPC_MECH_PLAIN_MAX,
104 enum sptlrpc_mech_gss {
105 SPTLRPC_MECH_GSS_NULL = 0,
106 SPTLRPC_MECH_GSS_KRB5 = 1,
107 SPTLRPC_MECH_GSS_SK = 2,
108 SPTLRPC_MECH_GSS_MAX,
111 enum sptlrpc_service_type {
112 SPTLRPC_SVC_NULL = 0, /**< no security */
113 SPTLRPC_SVC_AUTH = 1, /**< authentication only */
114 SPTLRPC_SVC_INTG = 2, /**< integrity */
115 SPTLRPC_SVC_PRIV = 3, /**< privacy */
119 enum sptlrpc_bulk_type {
120 SPTLRPC_BULK_DEFAULT = 0, /**< follow rpc flavor */
121 SPTLRPC_BULK_HASH = 1, /**< hash integrity */
125 enum sptlrpc_bulk_service {
126 SPTLRPC_BULK_SVC_NULL = 0, /**< no security */
127 SPTLRPC_BULK_SVC_AUTH = 1, /**< authentication only */
128 SPTLRPC_BULK_SVC_INTG = 2, /**< integrity */
129 SPTLRPC_BULK_SVC_PRIV = 3, /**< privacy */
130 SPTLRPC_BULK_SVC_MAX,
134 * compose/extract macros
136 #define FLVR_POLICY_OFFSET (0)
137 #define FLVR_MECH_OFFSET (4)
138 #define FLVR_SVC_OFFSET (8)
139 #define FLVR_BULK_TYPE_OFFSET (12)
140 #define FLVR_BULK_SVC_OFFSET (16)
142 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
143 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
144 ((__u32)(mech) << FLVR_MECH_OFFSET) | \
145 ((__u32)(svc) << FLVR_SVC_OFFSET) | \
146 ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
147 ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
152 #define SPTLRPC_FLVR_POLICY(flavor) \
153 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
154 #define SPTLRPC_FLVR_MECH(flavor) \
155 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
156 #define SPTLRPC_FLVR_SVC(flavor) \
157 ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
158 #define SPTLRPC_FLVR_BULK_TYPE(flavor) \
159 ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
160 #define SPTLRPC_FLVR_BULK_SVC(flavor) \
161 ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
163 #define SPTLRPC_FLVR_BASE(flavor) \
164 ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
165 #define SPTLRPC_FLVR_BASE_SUB(flavor) \
166 ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
171 #define MAKE_BASE_SUBFLVR(mech, svc) \
173 ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
175 #define SPTLRPC_SUBFLVR_GSSNULL \
176 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_NULL, SPTLRPC_SVC_NULL)
177 #define SPTLRPC_SUBFLVR_KRB5N \
178 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
179 #define SPTLRPC_SUBFLVR_KRB5A \
180 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
181 #define SPTLRPC_SUBFLVR_KRB5I \
182 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
183 #define SPTLRPC_SUBFLVR_KRB5P \
184 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
185 #define SPTLRPC_SUBFLVR_SKN \
186 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_NULL)
187 #define SPTLRPC_SUBFLVR_SKA \
188 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_AUTH)
189 #define SPTLRPC_SUBFLVR_SKI \
190 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_INTG)
191 #define SPTLRPC_SUBFLVR_SKPI \
192 MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_SK, SPTLRPC_SVC_PRIV)
197 #define SPTLRPC_FLVR_NULL \
198 MAKE_FLVR(SPTLRPC_POLICY_NULL, \
201 SPTLRPC_BULK_DEFAULT, \
202 SPTLRPC_BULK_SVC_NULL)
203 #define SPTLRPC_FLVR_PLAIN \
204 MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
205 SPTLRPC_MECH_PLAIN, \
208 SPTLRPC_BULK_SVC_INTG)
209 #define SPTLRPC_FLVR_GSSNULL \
210 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
211 SPTLRPC_MECH_GSS_NULL, \
213 SPTLRPC_BULK_DEFAULT, \
214 SPTLRPC_BULK_SVC_NULL)
215 #define SPTLRPC_FLVR_KRB5N \
216 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
217 SPTLRPC_MECH_GSS_KRB5, \
219 SPTLRPC_BULK_DEFAULT, \
220 SPTLRPC_BULK_SVC_NULL)
221 #define SPTLRPC_FLVR_KRB5A \
222 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
223 SPTLRPC_MECH_GSS_KRB5, \
225 SPTLRPC_BULK_DEFAULT, \
226 SPTLRPC_BULK_SVC_NULL)
227 #define SPTLRPC_FLVR_KRB5I \
228 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
229 SPTLRPC_MECH_GSS_KRB5, \
231 SPTLRPC_BULK_DEFAULT, \
232 SPTLRPC_BULK_SVC_INTG)
233 #define SPTLRPC_FLVR_KRB5P \
234 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
235 SPTLRPC_MECH_GSS_KRB5, \
237 SPTLRPC_BULK_DEFAULT, \
238 SPTLRPC_BULK_SVC_PRIV)
239 #define SPTLRPC_FLVR_SKN \
240 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
241 SPTLRPC_MECH_GSS_SK, \
243 SPTLRPC_BULK_DEFAULT, \
244 SPTLRPC_BULK_SVC_NULL)
245 #define SPTLRPC_FLVR_SKA \
246 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
247 SPTLRPC_MECH_GSS_SK, \
249 SPTLRPC_BULK_DEFAULT, \
250 SPTLRPC_BULK_SVC_NULL)
251 #define SPTLRPC_FLVR_SKI \
252 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
253 SPTLRPC_MECH_GSS_SK, \
255 SPTLRPC_BULK_DEFAULT, \
256 SPTLRPC_BULK_SVC_INTG)
257 #define SPTLRPC_FLVR_SKPI \
258 MAKE_FLVR(SPTLRPC_POLICY_GSS, \
259 SPTLRPC_MECH_GSS_SK, \
261 SPTLRPC_BULK_DEFAULT, \
262 SPTLRPC_BULK_SVC_PRIV)
264 #define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
266 #define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
267 #define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
270 * extract the useful part from wire flavor
272 #define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
276 static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
278 LASSERT(svc < SPTLRPC_SVC_MAX);
279 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
280 SPTLRPC_FLVR_MECH(*flvr),
282 SPTLRPC_FLVR_BULK_TYPE(*flvr),
283 SPTLRPC_FLVR_BULK_SVC(*flvr));
286 static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
288 LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
289 *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
290 SPTLRPC_FLVR_MECH(*flvr),
291 SPTLRPC_FLVR_SVC(*flvr),
292 SPTLRPC_FLVR_BULK_TYPE(*flvr),
296 struct bulk_spec_hash {
301 * Full description of flavors being used on a ptlrpc connection, include
302 * both regular RPC and bulk transfer parts.
304 struct sptlrpc_flavor {
306 * wire flavor, should be renamed to sf_wire.
310 * general flags of PTLRPC_SEC_FL_*
314 * rpc flavor specification
317 /* nothing for now */
320 * bulk flavor specification
323 struct bulk_spec_hash hash;
328 * identify the RPC is generated from what part of Lustre. It's encoded into
329 * RPC requests and to be checked by ptlrpc service.
331 enum lustre_sec_part {
340 const char *sptlrpc_part2name(enum lustre_sec_part sp);
341 enum lustre_sec_part sptlrpc_target_sec_part(struct obd_device *obd);
344 * A rule specifies a flavor to be used by a ptlrpc connection between
347 struct sptlrpc_rule {
348 __u32 sr_netid; /* LNET network ID */
349 __u8 sr_from; /* sec_part */
350 __u8 sr_to; /* sec_part */
352 struct sptlrpc_flavor sr_flvr;
356 * A set of rules in memory.
358 * Rules are generated and stored on MGS, and propagated to MDT, OST,
359 * and client when needed.
361 struct sptlrpc_rule_set {
364 struct sptlrpc_rule *srs_rules;
367 int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
368 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
370 static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
372 memset(set, 0, sizeof(*set));
375 void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
376 int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
377 int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
378 struct sptlrpc_rule *rule);
379 int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
380 enum lustre_sec_part from,
381 enum lustre_sec_part to,
383 struct sptlrpc_flavor *sf);
384 void sptlrpc_rule_set_dump(struct sptlrpc_rule_set *set);
386 int sptlrpc_process_config(struct lustre_cfg *lcfg);
387 void sptlrpc_conf_log_start(const char *logname);
388 void sptlrpc_conf_log_stop(const char *logname);
389 void sptlrpc_conf_log_update_begin(const char *logname);
390 void sptlrpc_conf_log_update_end(const char *logname);
391 void sptlrpc_conf_client_adapt(struct obd_device *obd);
392 int sptlrpc_conf_target_get_rules(struct obd_device *obd,
393 struct sptlrpc_rule_set *rset,
395 void sptlrpc_target_choose_flavor(struct sptlrpc_rule_set *rset,
396 enum lustre_sec_part from,
398 struct sptlrpc_flavor *flavor);
400 /* The maximum length of security payload. 1024 is enough for Kerberos 5,
401 * and should be enough for other future mechanisms but not sure.
402 * Only used by pre-allocated request/reply pool.
404 #define SPTLRPC_MAX_PAYLOAD (1024)
412 struct ptlrpc_ctx_ops {
414 * To determine whether it's suitable to use the \a ctx for \a vcred.
416 int (*match) (struct ptlrpc_cli_ctx *ctx,
417 struct vfs_cred *vcred);
420 * To bring the \a ctx uptodate.
422 int (*refresh) (struct ptlrpc_cli_ctx *ctx);
425 * Validate the \a ctx.
427 int (*validate) (struct ptlrpc_cli_ctx *ctx);
430 * Force the \a ctx to die.
432 void (*die) (struct ptlrpc_cli_ctx *ctx,
434 int (*display) (struct ptlrpc_cli_ctx *ctx,
435 char *buf, int bufsize);
438 * Sign the request message using \a ctx.
440 * \pre req->rq_reqmsg point to request message.
441 * \pre req->rq_reqlen is the request message length.
442 * \post req->rq_reqbuf point to request message with signature.
443 * \post req->rq_reqdata_len is set to the final request message size.
445 * \see null_ctx_sign(), plain_ctx_sign(), gss_cli_ctx_sign().
447 int (*sign) (struct ptlrpc_cli_ctx *ctx,
448 struct ptlrpc_request *req);
451 * Verify the reply message using \a ctx.
453 * \pre req->rq_repdata point to reply message with signature.
454 * \pre req->rq_repdata_len is the total reply message length.
455 * \post req->rq_repmsg point to reply message without signature.
456 * \post req->rq_replen is the reply message length.
458 * \see null_ctx_verify(), plain_ctx_verify(), gss_cli_ctx_verify().
460 int (*verify) (struct ptlrpc_cli_ctx *ctx,
461 struct ptlrpc_request *req);
464 * Encrypt the request message using \a ctx.
466 * \pre req->rq_reqmsg point to request message in clear text.
467 * \pre req->rq_reqlen is the request message length.
468 * \post req->rq_reqbuf point to request message.
469 * \post req->rq_reqdata_len is set to the final request message size.
471 * \see gss_cli_ctx_seal().
473 int (*seal) (struct ptlrpc_cli_ctx *ctx,
474 struct ptlrpc_request *req);
477 * Decrypt the reply message using \a ctx.
479 * \pre req->rq_repdata point to encrypted reply message.
480 * \pre req->rq_repdata_len is the total cipher text length.
481 * \post req->rq_repmsg point to reply message in clear text.
482 * \post req->rq_replen is the reply message length in clear text.
484 * \see gss_cli_ctx_unseal().
486 int (*unseal) (struct ptlrpc_cli_ctx *ctx,
487 struct ptlrpc_request *req);
490 * Wrap bulk request data. This is called before wrapping RPC
493 * \pre bulk buffer is descripted by desc->bd_iov and
494 * desc->bd_iov_count. note for read it's just buffer, no data
495 * need to be sent; for write it contains data in clear text.
496 * \post when necessary, ptlrpc_bulk_sec_desc was properly prepared
497 * (usually inside of RPC request message).
498 * - encryption: cipher text bulk buffer is descripted by
499 * desc->bd_enc_iov and desc->bd_iov_count (currently assume iov
500 * count remains the same).
501 * - otherwise: bulk buffer is still desc->bd_iov and
502 * desc->bd_iov_count.
504 * \return 0: success.
505 * \return -ev: error code.
507 * \see plain_cli_wrap_bulk(), gss_cli_ctx_wrap_bulk().
509 int (*wrap_bulk) (struct ptlrpc_cli_ctx *ctx,
510 struct ptlrpc_request *req,
511 struct ptlrpc_bulk_desc *desc);
514 * Unwrap bulk reply data. This is called after wrapping RPC
517 * \pre bulk buffer is descripted by desc->bd_iov/desc->bd_enc_iov and
518 * desc->bd_iov_count, according to wrap_bulk().
519 * \post final bulk data in clear text is placed in buffer described
520 * by desc->bd_iov and desc->bd_iov_count.
521 * \return +ve nob of actual bulk data in clear text.
522 * \return -ve error code.
524 * \see plain_cli_unwrap_bulk(), gss_cli_ctx_unwrap_bulk().
526 int (*unwrap_bulk) (struct ptlrpc_cli_ctx *ctx,
527 struct ptlrpc_request *req,
528 struct ptlrpc_bulk_desc *desc);
531 #define PTLRPC_CTX_NEW_BIT (0) /* newly created */
532 #define PTLRPC_CTX_UPTODATE_BIT (1) /* uptodate */
533 #define PTLRPC_CTX_DEAD_BIT (2) /* mark expired gracefully */
534 #define PTLRPC_CTX_ERROR_BIT (3) /* fatal error (refresh, etc.) */
535 #define PTLRPC_CTX_CACHED_BIT (8) /* in ctx cache (hash etc.) */
536 #define PTLRPC_CTX_ETERNAL_BIT (9) /* always valid */
538 #define PTLRPC_CTX_NEW (1 << PTLRPC_CTX_NEW_BIT)
539 #define PTLRPC_CTX_UPTODATE (1 << PTLRPC_CTX_UPTODATE_BIT)
540 #define PTLRPC_CTX_DEAD (1 << PTLRPC_CTX_DEAD_BIT)
541 #define PTLRPC_CTX_ERROR (1 << PTLRPC_CTX_ERROR_BIT)
542 #define PTLRPC_CTX_CACHED (1 << PTLRPC_CTX_CACHED_BIT)
543 #define PTLRPC_CTX_ETERNAL (1 << PTLRPC_CTX_ETERNAL_BIT)
545 #define PTLRPC_CTX_STATUS_MASK (PTLRPC_CTX_NEW_BIT | \
546 PTLRPC_CTX_UPTODATE | \
550 struct ptlrpc_cli_ctx {
551 struct hlist_node cc_cache; /* linked into ctx cache */
552 atomic_t cc_refcount;
553 struct ptlrpc_sec *cc_sec;
554 struct ptlrpc_ctx_ops *cc_ops;
555 cfs_time_t cc_expire; /* in seconds */
556 unsigned int cc_early_expire:1;
557 unsigned long cc_flags;
558 struct vfs_cred cc_vcred;
560 struct list_head cc_req_list; /* waiting reqs linked here */
561 struct list_head cc_gc_chain; /* linked to gc chain */
565 * client side policy operation vector.
567 struct ptlrpc_sec_cops {
569 * Given an \a imp, create and initialize a ptlrpc_sec structure.
570 * \param ctx service context:
571 * - regular import: \a ctx should be NULL;
572 * - reverse import: \a ctx is obtained from incoming request.
573 * \param flavor specify what flavor to use.
575 * When necessary, policy module is responsible for taking reference
578 * \see null_create_sec(), plain_create_sec(), gss_sec_create_kr().
580 struct ptlrpc_sec * (*create_sec) (struct obd_import *imp,
581 struct ptlrpc_svc_ctx *ctx,
582 struct sptlrpc_flavor *flavor);
585 * Destructor of ptlrpc_sec. When called, refcount has been dropped
586 * to 0 and all contexts has been destroyed.
588 * \see null_destroy_sec(), plain_destroy_sec(), gss_sec_destroy_kr().
590 void (*destroy_sec) (struct ptlrpc_sec *sec);
593 * Notify that this ptlrpc_sec is going to die. Optionally, policy
594 * module is supposed to set sec->ps_dying and whatever necessary
597 * \see plain_kill_sec(), gss_sec_kill().
599 void (*kill_sec) (struct ptlrpc_sec *sec);
602 * Given \a vcred, lookup and/or create its context. The policy module
603 * is supposed to maintain its own context cache.
604 * XXX currently \a create and \a remove_dead is always 1, perhaps
605 * should be removed completely.
607 * \see null_lookup_ctx(), plain_lookup_ctx(), gss_sec_lookup_ctx_kr().
609 struct ptlrpc_cli_ctx * (*lookup_ctx) (struct ptlrpc_sec *sec,
610 struct vfs_cred *vcred,
615 * Called then the reference of \a ctx dropped to 0. The policy module
616 * is supposed to destroy this context or whatever else according to
617 * its cache maintainance mechamism.
619 * \param sync if zero, we shouldn't wait for the context being
620 * destroyed completely.
622 * \see plain_release_ctx(), gss_sec_release_ctx_kr().
624 void (*release_ctx) (struct ptlrpc_sec *sec,
625 struct ptlrpc_cli_ctx *ctx,
629 * Flush the context cache.
631 * \param uid context of which user, -1 means all contexts.
632 * \param grace if zero, the PTLRPC_CTX_UPTODATE_BIT of affected
633 * contexts should be cleared immediately.
634 * \param force if zero, only idle contexts will be flushed.
636 * \see plain_flush_ctx_cache(), gss_sec_flush_ctx_cache_kr().
638 int (*flush_ctx_cache)
639 (struct ptlrpc_sec *sec,
645 * Called periodically by garbage collector to remove dead contexts
648 * \see gss_sec_gc_ctx_kr().
650 void (*gc_ctx) (struct ptlrpc_sec *sec);
653 * Given an context \a ctx, install a corresponding reverse service
654 * context on client side.
655 * XXX currently it's only used by GSS module, maybe we should remove
656 * this from general API.
658 int (*install_rctx)(struct obd_import *imp,
659 struct ptlrpc_sec *sec,
660 struct ptlrpc_cli_ctx *ctx);
663 * To allocate request buffer for \a req.
665 * \pre req->rq_reqmsg == NULL.
666 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
667 * we are not supposed to free it.
668 * \post if success, req->rq_reqmsg point to a buffer with size
669 * at least \a lustre_msg_size.
671 * \see null_alloc_reqbuf(), plain_alloc_reqbuf(), gss_alloc_reqbuf().
673 int (*alloc_reqbuf)(struct ptlrpc_sec *sec,
674 struct ptlrpc_request *req,
675 int lustre_msg_size);
678 * To free request buffer for \a req.
680 * \pre req->rq_reqbuf != NULL.
682 * \see null_free_reqbuf(), plain_free_reqbuf(), gss_free_reqbuf().
684 void (*free_reqbuf) (struct ptlrpc_sec *sec,
685 struct ptlrpc_request *req);
688 * To allocate reply buffer for \a req.
690 * \pre req->rq_repbuf == NULL.
691 * \post if success, req->rq_repbuf point to a buffer with size
692 * req->rq_repbuf_len, the size should be large enough to receive
693 * reply which be transformed from \a lustre_msg_size of clear text.
695 * \see null_alloc_repbuf(), plain_alloc_repbuf(), gss_alloc_repbuf().
697 int (*alloc_repbuf)(struct ptlrpc_sec *sec,
698 struct ptlrpc_request *req,
699 int lustre_msg_size);
702 * To free reply buffer for \a req.
704 * \pre req->rq_repbuf != NULL.
705 * \post req->rq_repbuf == NULL.
706 * \post req->rq_repbuf_len == 0.
708 * \see null_free_repbuf(), plain_free_repbuf(), gss_free_repbuf().
710 void (*free_repbuf) (struct ptlrpc_sec *sec,
711 struct ptlrpc_request *req);
714 * To expand the request buffer of \a req, thus the \a segment in
715 * the request message pointed by req->rq_reqmsg can accommodate
716 * at least \a newsize of data.
718 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
720 * \see null_enlarge_reqbuf(), plain_enlarge_reqbuf(),
721 * gss_enlarge_reqbuf().
723 int (*enlarge_reqbuf)
724 (struct ptlrpc_sec *sec,
725 struct ptlrpc_request *req,
726 int segment, int newsize);
730 int (*display) (struct ptlrpc_sec *sec,
731 struct seq_file *seq);
735 * server side policy operation vector.
737 struct ptlrpc_sec_sops {
739 * verify an incoming request.
741 * \pre request message is pointed by req->rq_reqbuf, size is
742 * req->rq_reqdata_len; and the message has been unpacked to
745 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
746 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
747 * req->rq_sp_from is decoded from request.
748 * \retval SECSVC_COMPLETE success, the request has been fully
749 * processed, and reply message has been prepared; req->rq_sp_from is
750 * decoded from request.
751 * \retval SECSVC_DROP failed, this request should be dropped.
753 * \see null_accept(), plain_accept(), gss_svc_accept_kr().
755 int (*accept) (struct ptlrpc_request *req);
758 * Perform security transformation upon reply message.
760 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
762 * \post req->rs_repdata_len is the final message size.
763 * \post req->rq_reply_off is set.
765 * \see null_authorize(), plain_authorize(), gss_svc_authorize().
767 int (*authorize) (struct ptlrpc_request *req);
770 * Invalidate server context \a ctx.
772 * \see gss_svc_invalidate_ctx().
774 void (*invalidate_ctx)
775 (struct ptlrpc_svc_ctx *ctx);
778 * Allocate a ptlrpc_reply_state.
780 * \param msgsize size of the reply message in clear text.
781 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
782 * should simply use it; otherwise we'll responsible for allocating
784 * \post req->rq_reply_state != NULL;
785 * \post req->rq_reply_state->rs_msg != NULL;
787 * \see null_alloc_rs(), plain_alloc_rs(), gss_svc_alloc_rs().
789 int (*alloc_rs) (struct ptlrpc_request *req,
793 * Free a ptlrpc_reply_state.
795 void (*free_rs) (struct ptlrpc_reply_state *rs);
798 * Release the server context \a ctx.
800 * \see gss_svc_free_ctx().
802 void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
805 * Install a reverse context based on the server context \a ctx.
807 * \see gss_svc_install_rctx_kr().
809 int (*install_rctx)(struct obd_import *imp,
810 struct ptlrpc_svc_ctx *ctx);
813 * Prepare buffer for incoming bulk write.
815 * \pre desc->bd_iov and desc->bd_iov_count describes the buffer
816 * intended to receive the write.
818 * \see gss_svc_prep_bulk().
820 int (*prep_bulk) (struct ptlrpc_request *req,
821 struct ptlrpc_bulk_desc *desc);
824 * Unwrap the bulk write data.
826 * \see plain_svc_unwrap_bulk(), gss_svc_unwrap_bulk().
828 int (*unwrap_bulk) (struct ptlrpc_request *req,
829 struct ptlrpc_bulk_desc *desc);
832 * Wrap the bulk read data.
834 * \see plain_svc_wrap_bulk(), gss_svc_wrap_bulk().
836 int (*wrap_bulk) (struct ptlrpc_request *req,
837 struct ptlrpc_bulk_desc *desc);
840 struct ptlrpc_sec_policy {
841 struct module *sp_owner;
843 __u16 sp_policy; /* policy number */
844 struct ptlrpc_sec_cops *sp_cops; /* client ops */
845 struct ptlrpc_sec_sops *sp_sops; /* server ops */
848 #define PTLRPC_SEC_FL_REVERSE 0x0001 /* reverse sec */
849 #define PTLRPC_SEC_FL_ROOTONLY 0x0002 /* treat everyone as root */
850 #define PTLRPC_SEC_FL_UDESC 0x0004 /* ship udesc */
851 #define PTLRPC_SEC_FL_BULK 0x0008 /* intensive bulk i/o expected */
852 #define PTLRPC_SEC_FL_PAG 0x0010 /* PAG mode */
855 * The ptlrpc_sec represents the client side ptlrpc security facilities,
856 * each obd_import (both regular and reverse import) must associate with
859 * \see sptlrpc_import_sec_adapt().
862 struct ptlrpc_sec_policy *ps_policy;
863 atomic_t ps_refcount;
864 /** statistic only */
866 /** unique identifier */
868 struct sptlrpc_flavor ps_flvr;
869 enum lustre_sec_part ps_part;
870 /** after set, no more new context will be created */
871 unsigned int ps_dying:1;
873 struct obd_import *ps_import;
879 struct list_head ps_gc_list;
880 cfs_time_t ps_gc_interval; /* in seconds */
881 cfs_time_t ps_gc_next; /* in seconds */
884 static inline int flvr_is_rootonly(__u32 flavor)
886 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
887 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
888 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
891 static inline int flvr_allows_user_desc(__u32 flavor)
893 return (SPTLRPC_FLVR_POLICY(flavor) == SPTLRPC_POLICY_GSS &&
894 (SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_NULL ||
895 SPTLRPC_FLVR_MECH(flavor) == SPTLRPC_MECH_GSS_SK));
898 static inline int sec_is_reverse(struct ptlrpc_sec *sec)
900 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE);
903 static inline int sec_is_rootonly(struct ptlrpc_sec *sec)
905 return (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY);
909 struct ptlrpc_svc_ctx {
910 atomic_t sc_refcount;
911 struct ptlrpc_sec_policy *sc_policy;
915 * user identity descriptor
917 #define LUSTRE_MAX_GROUPS (128)
919 struct ptlrpc_user_desc {
932 enum sptlrpc_bulk_hash_alg {
933 BULK_HASH_ALG_NULL = 0,
934 BULK_HASH_ALG_ADLER32,
938 BULK_HASH_ALG_SHA256,
939 BULK_HASH_ALG_SHA384,
940 BULK_HASH_ALG_SHA512,
944 const char * sptlrpc_get_hash_name(__u8 hash_alg);
945 __u8 sptlrpc_get_hash_alg(const char *algname);
951 struct ptlrpc_bulk_sec_desc {
952 __u8 bsd_version; /* 0 */
953 __u8 bsd_type; /* SPTLRPC_BULK_XXX */
954 __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
955 __u8 bsd_flags; /* flags */
956 __u32 bsd_nob; /* nob of bulk data */
957 __u8 bsd_data[0]; /* policy-specific token */
964 struct proc_dir_entry;
965 extern struct proc_dir_entry *sptlrpc_proc_root;
968 * round size up to next power of 2, for slab allocation.
969 * @size must be sane (can't overflow after round up)
971 static inline int size_roundup_power2(int size)
984 * internal support libraries
986 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
987 int segment, int newsize);
992 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
993 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
995 __u32 sptlrpc_name2flavor_base(const char *name);
996 const char *sptlrpc_flavor2name_base(__u32 flvr);
997 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
998 char *buf, int bufsize);
999 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
1000 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
1002 static inline struct ptlrpc_sec_policy *
1003 sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
1005 __module_get(policy->sp_owner);
1010 sptlrpc_policy_put(struct ptlrpc_sec_policy *policy)
1012 module_put(policy->sp_owner);
1019 unsigned long cli_ctx_status(struct ptlrpc_cli_ctx *ctx)
1021 return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
1025 int cli_ctx_is_ready(struct ptlrpc_cli_ctx *ctx)
1027 return (cli_ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
1031 int cli_ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
1033 return (cli_ctx_status(ctx) != 0);
1037 int cli_ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
1039 return ((ctx->cc_flags & PTLRPC_CTX_UPTODATE) != 0);
1043 int cli_ctx_is_error(struct ptlrpc_cli_ctx *ctx)
1045 return ((ctx->cc_flags & PTLRPC_CTX_ERROR) != 0);
1049 int cli_ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
1051 return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
1055 int cli_ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
1057 return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
1063 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec);
1064 void sptlrpc_sec_put(struct ptlrpc_sec *sec);
1067 * internal apis which only used by policy impelentation
1069 int sptlrpc_get_next_secid(void);
1070 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
1073 * exported client context api
1075 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx);
1076 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync);
1077 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
1078 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx);
1079 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
1082 * exported client context wrap/buffers
1084 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1085 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1086 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1087 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1088 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1089 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1090 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1091 int segment, int newsize);
1092 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1093 struct ptlrpc_request **req_ret);
1094 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req);
1096 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1099 * exported higher interface of import & request
1101 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1102 struct ptlrpc_svc_ctx *ctx,
1103 struct sptlrpc_flavor *flvr);
1104 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
1105 void sptlrpc_import_sec_put(struct obd_import *imp);
1107 int sptlrpc_import_check_ctx(struct obd_import *imp);
1108 void sptlrpc_import_flush_root_ctx(struct obd_import *imp);
1109 void sptlrpc_import_flush_my_ctx(struct obd_import *imp);
1110 void sptlrpc_import_flush_all_ctx(struct obd_import *imp);
1111 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1112 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1113 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1114 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
1115 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1117 int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule);
1120 void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec);
1121 void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec);
1122 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx);
1125 const char * sec2target_str(struct ptlrpc_sec *sec);
1126 int sptlrpc_lprocfs_cliobd_attach(struct obd_device *dev);
1131 enum secsvc_accept_res {
1137 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1138 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1139 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1140 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
1141 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1142 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1143 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1145 int sptlrpc_target_export_check(struct obd_export *exp,
1146 struct ptlrpc_request *req);
1147 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1148 struct sptlrpc_rule_set *rset);
1153 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1154 struct ptlrpc_svc_ctx *ctx);
1155 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1156 struct ptlrpc_cli_ctx *ctx);
1158 /* bulk security api */
1159 int sptlrpc_enc_pool_add_user(void);
1160 int sptlrpc_enc_pool_del_user(void);
1161 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc);
1162 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc);
1163 int get_free_pages_in_pool(void);
1164 int pool_is_at_full_capacity(void);
1166 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1167 struct ptlrpc_bulk_desc *desc);
1168 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1169 struct ptlrpc_bulk_desc *desc,
1171 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1172 struct ptlrpc_bulk_desc *desc);
1173 #ifdef HAVE_SERVER_SUPPORT
1174 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
1175 struct ptlrpc_bulk_desc *desc);
1176 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1177 struct ptlrpc_bulk_desc *desc);
1178 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
1179 struct ptlrpc_bulk_desc *desc);
1182 /* bulk helpers (internal use only by policies) */
1183 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
1184 void *buf, int buflen);
1186 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed);
1188 /* user descriptor helpers */
1189 static inline int sptlrpc_user_desc_size(int ngroups)
1191 return sizeof(struct ptlrpc_user_desc) + ngroups * sizeof(__u32);
1194 int sptlrpc_current_user_desc_size(void);
1195 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
1196 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
1199 #define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
1200 #define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
1204 #endif /* _LUSTRE_SEC_H_ */