2 * Modified from NFSv4 project for Lustre
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
6 * Copyright (c) 2012, 2016, Intel Corporation.
8 * Author: Eric Mei <ericm@clusterfs.com>
11 #ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
12 #define __PTLRPC_GSS_GSS_INTERNAL_H_
14 #include <crypto/hash.h>
15 #include <libcfs/libcfs_crypto.h>
16 #include <lustre_sec.h>
21 #define NETOBJ_EMPTY ((netobj_t) { 0 })
22 #define RAWOBJ_EMPTY ((rawobj_t) { 0, NULL })
24 typedef struct rawobj_buf_s {
31 int rawobj_empty(rawobj_t *obj);
32 int rawobj_alloc(rawobj_t *obj, char *buf, int len);
33 void rawobj_free(rawobj_t *obj);
34 int rawobj_equal(rawobj_t *a, rawobj_t *b);
35 int rawobj_dup(rawobj_t *dest, rawobj_t *src);
36 int rawobj_serialize(rawobj_t *obj, __u32 **buf, __u32 *buflen);
37 int rawobj_extract(rawobj_t *obj, __u32 **buf, __u32 *buflen);
38 int rawobj_extract_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen);
39 int rawobj_extract_local(rawobj_t *obj, __u32 **buf, __u32 *buflen);
40 int rawobj_extract_local_alloc(rawobj_t *obj, __u32 **buf, __u32 *buflen);
41 int rawobj_from_netobj(rawobj_t *rawobj, netobj_t *netobj);
42 int rawobj_from_netobj_alloc(rawobj_t *obj, netobj_t *netobj);
44 int buffer_extract_bytes(const void **buf, __u32 *buflen,
45 void *res, __u32 reslen);
48 * several timeout values. client refresh upcall timeout we using
49 * default in pipefs implemnetation.
51 #define __TIMEOUT_DELTA (10)
53 #define GSS_SECINIT_RPC_TIMEOUT \
54 (obd_timeout < __TIMEOUT_DELTA ? \
55 __TIMEOUT_DELTA : obd_timeout - __TIMEOUT_DELTA)
57 #define GSS_SECFINI_RPC_TIMEOUT (__TIMEOUT_DELTA)
58 #define GSS_SECSVC_UPCALL_TIMEOUT (GSS_SECINIT_RPC_TIMEOUT)
63 #define GSS_GC_INTERVAL (60 * 60) /* 60 minutes */
65 static inline time64_t gss_round_ctx_expiry(time64_t expiry,
66 unsigned long sec_flags)
68 if (sec_flags & PTLRPC_SEC_FL_REVERSE)
71 if (ktime_get_real_seconds() + __TIMEOUT_DELTA <= expiry)
72 return expiry - __TIMEOUT_DELTA;
78 * Max encryption element in block cipher algorithms.
80 #define GSS_MAX_CIPHER_BLOCK (16)
83 * XXX make it visible of kernel and lgssd/lsvcgssd
86 GSSD_INTERFACE_VERSION_V1 = 1,
87 GSSD_INTERFACE_VERSION_V2 = 2,
88 GSSD_INTERFACE_VERSION = GSSD_INTERFACE_VERSION_V2,
91 #define PTLRPC_GSS_VERSION (1)
94 enum ptlrpc_gss_proc {
95 PTLRPC_GSS_PROC_DATA = 0,
96 PTLRPC_GSS_PROC_INIT = 1,
97 PTLRPC_GSS_PROC_CONTINUE_INIT = 2,
98 PTLRPC_GSS_PROC_DESTROY = 3,
99 PTLRPC_GSS_PROC_ERR = 4,
102 enum ptlrpc_gss_tgt {
103 LUSTRE_GSS_TGT_MGS = 0,
104 LUSTRE_GSS_TGT_MDS = 1,
105 LUSTRE_GSS_TGT_OSS = 2,
108 enum ptlrpc_gss_header_flags {
109 LUSTRE_GSS_PACK_BULK = 1,
110 LUSTRE_GSS_PACK_USER = 2,
111 LUSTRE_GSS_PACK_KCSUM = 4,
115 __u32 import_to_gss_svc(struct obd_import *imp)
117 int cl_sp_to = LUSTRE_SP_ANY;
120 cl_sp_to = imp->imp_obd->u.cli.cl_sp_to;
124 return LUSTRE_GSS_TGT_MDS;
126 return LUSTRE_GSS_TGT_OSS;
129 return LUSTRE_GSS_TGT_MGS;
137 #define PTLRPC_GSS_MAX_HANDLE_SIZE (8)
138 #define PTLRPC_GSS_HEADER_SIZE (sizeof(struct gss_header) + \
139 PTLRPC_GSS_MAX_HANDLE_SIZE)
142 static inline __u64 gss_handle_to_u64(rawobj_t *handle)
144 if (handle->len != PTLRPC_GSS_MAX_HANDLE_SIZE)
146 return *((__u64 *) handle->data);
149 #define GSS_SEQ_WIN (2048)
150 #define GSS_SEQ_WIN_MAIN GSS_SEQ_WIN
151 #define GSS_SEQ_WIN_BACK (128)
152 #define GSS_SEQ_REPACK_THRESHOLD (GSS_SEQ_WIN_MAIN / 2 + \
153 GSS_SEQ_WIN_MAIN / 4)
155 struct gss_svc_seq_data {
158 * highest sequence number seen so far, for main and back window
163 * main and back window
164 * for i such that ssd_max - GSS_SEQ_WIN < i <= ssd_max, the i-th bit
165 * of ssd_win is nonzero iff sequence number i has been seen already.
167 unsigned long ssd_win_main[GSS_SEQ_WIN_MAIN/BITS_PER_LONG];
168 unsigned long ssd_win_back[GSS_SEQ_WIN_BACK/BITS_PER_LONG];
172 struct gss_ctx *gsc_mechctx;
173 struct gss_svc_seq_data gsc_seqdata;
174 rawobj_t gsc_rvs_hdl;
178 uid_t gsc_mapped_uid;
179 unsigned int gsc_usr_root:1,
186 struct gss_svc_reqctx {
187 struct ptlrpc_svc_ctx src_base;
191 struct gss_wire_ctx src_wirectx;
192 struct gss_svc_ctx *src_ctx;
194 * record place of bulk_sec_desc in request/reply buffer
196 struct ptlrpc_bulk_sec_desc *src_reqbsd;
198 struct ptlrpc_bulk_sec_desc *src_repbsd;
203 unsigned int src_init:1,
210 struct ptlrpc_cli_ctx gc_base;
216 struct gss_ctx *gc_mechctx;
217 /* handle for the buddy svc ctx */
218 rawobj_t gc_svc_handle;
221 struct gss_cli_ctx_keyring {
222 struct gss_cli_ctx gck_base;
224 struct timer_list gck_timer;
228 struct ptlrpc_sec gs_base;
229 struct gss_api_mech *gs_mech;
234 struct gss_sec_pipefs {
235 struct gss_sec gsp_base;
236 int gsp_chash_size; /* must be 2^n */
237 struct hlist_head gsp_chash[0];
241 * FIXME cleanup the keyring upcall mutexes
243 #define HAVE_KEYRING_UPCALL_SERIALIZED 1
245 struct gss_sec_keyring {
246 struct gss_sec gsk_base;
248 * all contexts listed here. access is protected by sec spinlock.
250 struct hlist_head gsk_clist;
252 * specially point to root ctx (only one at a time). access is
253 * protected by sec spinlock.
255 struct ptlrpc_cli_ctx *gsk_root_ctx;
257 * specially serialize upcalls for root context.
259 struct mutex gsk_root_uc_lock;
261 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
262 struct mutex gsk_uc_lock; /* serialize upcalls */
266 static inline struct gss_cli_ctx *ctx2gctx(struct ptlrpc_cli_ctx *ctx)
268 return container_of(ctx, struct gss_cli_ctx, gc_base);
272 struct gss_cli_ctx_keyring *ctx2gctx_keyring(struct ptlrpc_cli_ctx *ctx)
274 return container_of(ctx2gctx(ctx),
275 struct gss_cli_ctx_keyring, gck_base);
278 static inline struct gss_sec *sec2gsec(struct ptlrpc_sec *sec)
280 return container_of(sec, struct gss_sec, gs_base);
283 static inline struct gss_sec_pipefs *sec2gsec_pipefs(struct ptlrpc_sec *sec)
285 return container_of(sec2gsec(sec), struct gss_sec_pipefs, gsp_base);
288 static inline struct gss_sec_keyring *sec2gsec_keyring(struct ptlrpc_sec *sec)
290 return container_of(sec2gsec(sec), struct gss_sec_keyring, gsk_base);
293 #ifdef HAVE_CACHE_HASH_SPINLOCK
294 # define sunrpc_cache_lookup(c, i, h) sunrpc_cache_lookup_rcu((c), (i), (h))
295 # define cache_read_lock(cdetail) spin_lock(&((cdetail)->hash_lock))
296 # define cache_read_unlock(cdetail) spin_unlock(&((cdetail)->hash_lock))
297 #else /* ! HAVE_CACHE_HASH_SPINLOCK */
298 # define cache_read_lock(cdetail) read_lock(&((cdetail)->hash_lock))
299 # define cache_read_unlock(cdetail) read_unlock(&((cdetail)->hash_lock))
302 #define GSS_CTX_INIT_MAX_LEN (1024)
305 * This only guaranteed be enough for current krb5 des-cbc-crc . We might
306 * adjust this when new enc type or mech added in.
308 #define GSS_PRIVBUF_PREFIX_LEN (32)
309 #define GSS_PRIVBUF_SUFFIX_LEN (32)
312 struct gss_svc_reqctx *gss_svc_ctx2reqctx(struct ptlrpc_svc_ctx *ctx)
315 return container_of(ctx, struct gss_svc_reqctx, src_base);
319 struct gss_svc_ctx *gss_svc_ctx2gssctx(struct ptlrpc_svc_ctx *ctx)
322 return gss_svc_ctx2reqctx(ctx)->src_ctx;
326 int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred);
327 int gss_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize);
328 int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
329 int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
330 int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
331 int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req);
333 int gss_sec_install_rctx(struct obd_import *imp, struct ptlrpc_sec *sec,
334 struct ptlrpc_cli_ctx *ctx);
335 int gss_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
337 void gss_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
338 int gss_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
340 void gss_free_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req);
341 int gss_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req,
342 int segment, int newsize);
344 int gss_svc_accept(struct ptlrpc_sec_policy *policy,
345 struct ptlrpc_request *req);
346 void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx);
347 int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
348 int gss_svc_authorize(struct ptlrpc_request *req);
349 void gss_svc_free_rs(struct ptlrpc_reply_state *rs);
350 void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx);
352 int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx);
353 int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx);
355 int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx,
356 struct ptlrpc_svc_ctx *svc_ctx);
358 struct gss_header *gss_swab_header(struct lustre_msg *msg, int segment,
360 netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment);
362 void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx);
363 int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor);
364 int gss_check_seq_num(struct gss_svc_seq_data *sd, __u32 seq_num, int set);
366 int gss_sec_create_common(struct gss_sec *gsec,
367 struct ptlrpc_sec_policy *policy,
368 struct obd_import *imp,
369 struct ptlrpc_svc_ctx *ctx,
370 struct sptlrpc_flavor *sf);
371 void gss_sec_destroy_common(struct gss_sec *gsec);
372 void gss_sec_kill(struct ptlrpc_sec *sec);
374 int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
375 struct ptlrpc_cli_ctx *ctx,
376 struct ptlrpc_ctx_ops *ctxops,
377 struct vfs_cred *vcred);
378 int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
379 struct ptlrpc_cli_ctx *ctx);
381 void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize);
384 #ifndef HAVE_GSS_KEYRING
385 static inline int __init gss_init_keyring(void) { return 0; }
386 static inline void __exit gss_exit_keyring(void) { return; }
388 int __init gss_init_keyring(void);
389 void __exit gss_exit_keyring(void);
391 extern unsigned int gss_check_upcall_ns;
394 #ifndef HAVE_GSS_PIPEFS
395 static inline int __init gss_init_pipefs(void) { return 0; }
396 static inline void __exit gss_exit_pipefs(void) { return; }
398 int __init gss_init_pipefs(void);
399 void __exit gss_exit_pipefs(void);
403 int gss_cli_prep_bulk(struct ptlrpc_request *req,
404 struct ptlrpc_bulk_desc *desc);
405 int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
406 struct ptlrpc_request *req,
407 struct ptlrpc_bulk_desc *desc);
408 int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
409 struct ptlrpc_request *req,
410 struct ptlrpc_bulk_desc *desc);
411 int gss_svc_prep_bulk(struct ptlrpc_request *req,
412 struct ptlrpc_bulk_desc *desc);
413 int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
414 struct ptlrpc_bulk_desc *desc);
415 int gss_svc_wrap_bulk(struct ptlrpc_request *req,
416 struct ptlrpc_bulk_desc *desc);
418 /* gss_generic_token.c */
419 int g_token_size(rawobj_t *mech, unsigned int body_size);
420 void g_make_token_header(rawobj_t *mech, int body_size, unsigned char **buf);
421 __u32 g_verify_token_header(rawobj_t *mech, int *body_size,
422 unsigned char **buf_in, int toksize);
425 /* gss_cli_upcall.c */
426 int gss_do_ctx_init_rpc(char __user *buffer, unsigned long count);
427 int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx);
429 int __init gss_init_cli_upcall(void);
430 void gss_exit_cli_upcall(void);
432 /* gss_svc_upcall.c */
433 __u64 gss_get_next_ctx_index(void);
434 int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
435 struct gss_sec *gsec,
436 struct gss_cli_ctx *gctx);
437 int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle);
438 int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx);
439 int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq);
440 int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
441 struct gss_svc_reqctx *grctx,
442 struct gss_wire_ctx *gw,
443 struct obd_device *target,
447 struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
448 struct gss_wire_ctx *gw);
449 void gss_svc_upcall_put_ctx(struct gss_svc_ctx *ctx);
450 void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx);
452 int __init gss_init_svc_upcall(void);
453 void gss_exit_svc_upcall(void);
454 extern unsigned int krb5_allow_old_client_csum;
457 void gss_stat_oos_record_cli(int behind);
458 void gss_stat_oos_record_svc(int phase, int replay);
460 int __init gss_init_tunables(void);
461 void gss_exit_tunables(void);
463 /* gss_null_mech.c */
464 int __init init_null_module(void);
465 void cleanup_null_module(void);
467 /* gss_krb5_mech.c */
468 int __init init_kerberos_module(void);
469 void cleanup_kerberos_module(void);
472 #ifdef HAVE_OPENSSL_SSK
473 int __init init_sk_module(void);
474 void cleanup_sk_module(void);
476 static inline int init_sk_module(void) { return 0; }
477 static inline void cleanup_sk_module(void) { return; }
478 #endif /* HAVE_OPENSSL_SSK */
482 void __dbg_memdump(char *name, void *ptr, int size)
484 char *buf, *p = (char *) ptr;
485 int bufsize = size * 2 + 1, i;
487 OBD_ALLOC(buf, bufsize);
489 CDEBUG(D_ERROR, "DUMP ERROR: can't alloc %d bytes\n", bufsize);
493 for (i = 0; i < size; i++)
494 sprintf(&buf[i+i], "%02x", (__u8) p[i]);
495 buf[size + size] = '\0';
496 LCONSOLE_INFO("DUMP %s@%p(%d): %s\n", name, ptr, size, buf);
497 OBD_FREE(buf, bufsize);
500 static inline unsigned int ll_read_key_usage(struct key *key)
502 #ifdef HAVE_KEY_USAGE_REFCOUNT
503 return refcount_read(&key->usage);
505 return atomic_read(&key->usage);
509 #endif /* __PTLRPC_GSS_GSS_INTERNAL_H_ */