1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Modified from NFSv4 project for Lustre
5 * Copyright 2004 - 2006, Cluster File Systems, Inc.
7 * Author: Eric Mei <ericm@clusterfs.com>
10 #ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
11 #define __PTLRPC_GSS_GSS_INTERNAL_H_
13 #include <lustre_sec.h>
18 typedef struct netobj_s {
23 #define NETOBJ_EMPTY ((netobj_t) { 0 })
25 typedef struct rawobj_s {
30 #define RAWOBJ_EMPTY ((rawobj_t) { 0, NULL })
32 typedef struct rawobj_buf_s {
39 int rawobj_alloc(rawobj_t *obj, char *buf, int len);
40 void rawobj_free(rawobj_t *obj);
41 int rawobj_equal(rawobj_t *a, rawobj_t *b);
42 int rawobj_dup(rawobj_t *dest, rawobj_t *src);
43 int rawobj_serialize(rawobj_t *obj, __u32 **buf, __u32 *buflen);
44 int rawobj_extract(rawobj_t *obj, __u32 **buf, __u32 *buflen);
45 int rawobj_extract_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen);
46 int rawobj_extract_local(rawobj_t *obj, __u32 **buf, __u32 *buflen);
47 int rawobj_extract_local_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen);
48 int rawobj_from_netobj(rawobj_t *rawobj, netobj_t *netobj);
49 int rawobj_from_netobj_alloc(rawobj_t *obj, netobj_t *netobj);
51 int buffer_extract_bytes(const void **buf, __u32 *buflen,
52 void *res, __u32 reslen);
55 * several timeout values. client refresh upcall timeout we using
56 * default in pipefs implemnetation.
58 #define __TIMEOUT_DELTA (10)
60 #define GSS_SECINIT_RPC_TIMEOUT \
61 (obd_timeout < __TIMEOUT_DELTA ? \
62 __TIMEOUT_DELTA : obd_timeout - __TIMEOUT_DELTA)
64 #define GSS_SECFINI_RPC_TIMEOUT (__TIMEOUT_DELTA)
65 #define GSS_SECSVC_UPCALL_TIMEOUT (GSS_SECINIT_RPC_TIMEOUT)
70 #define GSS_GC_INTERVAL (60 * 60) /* 60 minutes */
73 unsigned long gss_round_ctx_expiry(unsigned long expiry,
74 unsigned long sec_flags)
76 if (sec_flags & PTLRPC_SEC_FL_REVERSE)
79 if (get_seconds() + __TIMEOUT_DELTA <= expiry)
80 return expiry - __TIMEOUT_DELTA;
85 /* we try to force reconnect import 20m eariler than real expiry.
86 * kerberos 5 usually allow 5m time skew, but which is adjustable,
87 * so if we set krb5 to allow > 20m time skew, we have chance that
88 * server's reverse ctx expired but client still hasn't start to
89 * refresh it -- it's BAD. So here we actually put a limit on the
90 * enviroment of krb5 (or other authentication mechanism)
92 #define GSS_MAX_TIME_SKEW (20 * 60)
95 unsigned long gss_round_imp_reconnect(unsigned long expiry)
97 unsigned long now = get_seconds();
98 unsigned long nice = GSS_MAX_TIME_SKEW + __TIMEOUT_DELTA;
100 while (nice && (now + nice >= expiry))
103 return (expiry - nice);
107 * Max encryption element in block cipher algorithms.
109 #define GSS_MAX_CIPHER_BLOCK (16)
112 * XXX make it visible of kernel and lgssd/lsvcgssd
114 #define GSSD_INTERFACE_VERSION (1)
116 #define PTLRPC_GSS_VERSION (1)
119 enum ptlrpc_gss_proc {
120 PTLRPC_GSS_PROC_DATA = 0,
121 PTLRPC_GSS_PROC_INIT = 1,
122 PTLRPC_GSS_PROC_CONTINUE_INIT = 2,
123 PTLRPC_GSS_PROC_DESTROY = 3,
124 PTLRPC_GSS_PROC_ERR = 4,
127 enum ptlrpc_gss_svc {
128 PTLRPC_GSS_SVC_NONE = 1,
129 PTLRPC_GSS_SVC_INTEGRITY = 2,
130 PTLRPC_GSS_SVC_PRIVACY = 3,
133 enum ptlrpc_gss_tgt {
134 LUSTRE_GSS_TGT_MDS = 0,
135 LUSTRE_GSS_TGT_OSS = 1,
139 __u32 import_to_gss_svc(struct obd_import *imp)
141 const char *name = imp->imp_obd->obd_type->typ_name;
143 if (!strcmp(name, LUSTRE_MDC_NAME))
144 return LUSTRE_GSS_TGT_MDS;
145 if (!strcmp(name, LUSTRE_OSC_NAME))
146 return LUSTRE_GSS_TGT_OSS;
152 * following 3 header must have the same size and offset
155 __u32 gh_version; /* gss version */
156 __u32 gh_flags; /* wrap flags */
157 __u32 gh_proc; /* proc */
158 __u32 gh_seq; /* sequence */
159 __u32 gh_svc; /* service */
163 netobj_t gh_handle; /* context handle */
166 struct gss_rep_header {
178 struct gss_err_header {
191 * part of wire context information send from client which be saved and
192 * used later by server.
194 struct gss_wire_ctx {
201 #define PTLRPC_GSS_MAX_HANDLE_SIZE (8)
202 #define PTLRPC_GSS_HEADER_SIZE (sizeof(struct gss_header) + \
203 PTLRPC_GSS_MAX_HANDLE_SIZE)
206 #define GSS_SEQ_WIN (2048)
207 #define GSS_SEQ_WIN_MAIN GSS_SEQ_WIN
208 #define GSS_SEQ_WIN_BACK (128)
209 #define GSS_SEQ_REPACK_THRESHOLD (GSS_SEQ_WIN_MAIN / 2 + \
210 GSS_SEQ_WIN_MAIN / 4)
212 struct gss_svc_seq_data {
215 * highest sequence number seen so far, for main and back window
220 * main and back window
221 * for i such that ssd_max - GSS_SEQ_WIN < i <= ssd_max, the i-th bit
222 * of ssd_win is nonzero iff sequence number i has been seen already.
224 unsigned long ssd_win_main[GSS_SEQ_WIN_MAIN/BITS_PER_LONG];
225 unsigned long ssd_win_back[GSS_SEQ_WIN_BACK/BITS_PER_LONG];
229 unsigned int gsc_usr_root:1,
234 uid_t gsc_mapped_uid;
235 rawobj_t gsc_rvs_hdl;
236 struct gss_svc_seq_data gsc_seqdata;
237 struct gss_ctx *gsc_mechctx;
240 struct gss_svc_reqctx {
241 struct ptlrpc_svc_ctx src_base;
242 struct gss_wire_ctx src_wirectx;
243 struct gss_svc_ctx *src_ctx;
244 unsigned int src_init:1,
251 struct ptlrpc_cli_ctx gc_base;
257 struct gss_ctx *gc_mechctx;
260 struct gss_cli_ctx_keyring {
261 struct gss_cli_ctx gck_base;
263 struct timer_list *gck_timer;
267 struct ptlrpc_sec gs_base;
268 struct gss_api_mech *gs_mech;
273 struct gss_sec_pipefs {
274 struct gss_sec gsp_base;
275 int gsp_chash_size; /* must be 2^n */
276 struct hlist_head gsp_chash[0];
280 * FIXME cleanup the keyring upcall mutexes
282 #define HAVE_KEYRING_UPCALL_SERIALIZED 1
284 struct gss_sec_keyring {
285 struct gss_sec gsk_base;
291 * all contexts listed here. access is protected by sec spinlock.
293 struct hlist_head gsk_clist;
295 * specially point to root ctx (only one at a time). access is
296 * protected by sec spinlock.
298 struct ptlrpc_cli_ctx *gsk_root_ctx;
300 * specially serialize upcalls for root context.
302 struct mutex gsk_root_uc_lock;
304 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
305 struct mutex gsk_uc_lock; /* serialize upcalls */
309 static inline struct gss_cli_ctx *ctx2gctx(struct ptlrpc_cli_ctx *ctx)
311 return container_of(ctx, struct gss_cli_ctx, gc_base);
315 struct gss_cli_ctx_keyring *ctx2gctx_keyring(struct ptlrpc_cli_ctx *ctx)
317 return container_of(ctx2gctx(ctx),
318 struct gss_cli_ctx_keyring, gck_base);
321 static inline struct gss_sec *sec2gsec(struct ptlrpc_sec *sec)
323 return container_of(sec, struct gss_sec, gs_base);
326 static inline struct gss_sec_pipefs *sec2gsec_pipefs(struct ptlrpc_sec *sec)
328 return container_of(sec2gsec(sec), struct gss_sec_pipefs, gsp_base);
331 static inline struct gss_sec_keyring *sec2gsec_keyring(struct ptlrpc_sec *sec)
333 return container_of(sec2gsec(sec), struct gss_sec_keyring, gsk_base);
337 #define GSS_CTX_INIT_MAX_LEN (1024)
340 * This only guaranteed be enough for current krb5 des-cbc-crc . We might
341 * adjust this when new enc type or mech added in.
343 #define GSS_PRIVBUF_PREFIX_LEN (32)
344 #define GSS_PRIVBUF_SUFFIX_LEN (32)
347 struct gss_svc_reqctx *gss_svc_ctx2reqctx(struct ptlrpc_svc_ctx *ctx)
350 return container_of(ctx, struct gss_svc_reqctx, src_base);
354 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
355 int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
356 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
357 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
358 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
359 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
361 int gss_sec_install_rctx(struct obd_import *imp, struct ptlrpc_sec *sec,
362 struct ptlrpc_cli_ctx *ctx);
363 int gss_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
365 void gss_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
366 int gss_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
368 void gss_free_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
369 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
370 int segment, int newsize);
372 int gss_svc_accept(struct ptlrpc_sec_policy *policy,
373 struct ptlrpc_request *req);
374 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx);
375 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
376 int gss_svc_authorize(struct ptlrpc_request *req);
377 void gss_svc_free_rs(struct ptlrpc_reply_state *rs);
378 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx);
380 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
381 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx);
383 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
384 struct ptlrpc_svc_ctx *svc_ctx);
386 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment);
387 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment);
389 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx);
390 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor);
391 int gss_check_seq_num(struct gss_svc_seq_data *sd, __u32 seq_num, int set);
393 int gss_sec_create_common(struct gss_sec *gsec,
394 struct ptlrpc_sec_policy *policy,
395 struct obd_import *imp,
396 struct ptlrpc_svc_ctx *ctx,
398 unsigned long flags);
399 void gss_sec_destroy_common(struct gss_sec *gsec);
401 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
402 struct ptlrpc_cli_ctx *ctx,
403 struct ptlrpc_ctx_ops *ctxops,
404 struct vfs_cred *vcred);
405 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
406 struct ptlrpc_cli_ctx *ctx);
409 extern struct ptlrpc_sec_policy gss_policy_keyring;
410 int __init gss_init_keyring(void);
411 void __exit gss_exit_keyring(void);
414 int __init gss_init_pipefs(void);
415 void __exit gss_exit_pipefs(void);
418 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
419 struct ptlrpc_request *req,
420 struct ptlrpc_bulk_desc *desc);
421 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
422 struct ptlrpc_request *req,
423 struct ptlrpc_bulk_desc *desc);
424 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
425 struct ptlrpc_bulk_desc *desc);
426 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
427 struct ptlrpc_bulk_desc *desc);
429 /* gss_mech_switch.c */
430 int init_kerberos_module(void);
431 void cleanup_kerberos_module(void);
433 /* gss_generic_token.c */
434 int g_token_size(rawobj_t *mech, unsigned int body_size);
435 void g_make_token_header(rawobj_t *mech, int body_size, unsigned char **buf);
436 __u32 g_verify_token_header(rawobj_t *mech, int *body_size,
437 unsigned char **buf_in, int toksize);
440 /* gss_cli_upcall.c */
441 int gss_do_ctx_init_rpc(char *buffer, unsigned long count);
442 int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx);
444 int __init gss_init_cli_upcall(void);
445 void __exit gss_exit_cli_upcall(void);
447 /* gss_svc_upcall.c */
448 __u64 gss_get_next_ctx_index(void);
449 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
450 struct gss_sec *gsec,
451 struct gss_cli_ctx *gctx);
452 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
453 struct gss_svc_reqctx *grctx,
454 struct gss_wire_ctx *gw,
455 struct obd_device *target,
459 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
460 struct gss_wire_ctx *gw);
461 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx);
462 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx);
464 int __init gss_init_svc_upcall(void);
465 void __exit gss_exit_svc_upcall(void);
468 void gss_stat_oos_record_cli(int behind);
469 void gss_stat_oos_record_svc(int phase, int replay);
471 int __init gss_init_lproc(void);
472 void __exit gss_exit_lproc(void);
474 /* gss_krb5_mech.c */
475 int __init init_kerberos_module(void);
476 void __exit cleanup_kerberos_module(void);
481 void __dbg_memdump(char *name, void *ptr, int size)
483 char *buf, *p = (char *) ptr;
484 int bufsize = size * 2 + 1, i;
486 OBD_ALLOC(buf, bufsize);
488 printk("DUMP ERROR: can't alloc %d bytes\n", bufsize);
492 for (i = 0; i < size; i++)
493 sprintf(&buf[i+i], "%02x", (__u8) p[i]);
494 buf[size + size] = '\0';
495 printk("DUMP %s@%p(%d): %s\n", name, ptr, size, buf);
496 OBD_FREE(buf, bufsize);
499 #endif /* __PTLRPC_GSS_GSS_INTERNAL_H_ */