*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
+ * Copyright (c) 2012, 2016, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
#ifndef __PTLRPC_GSS_GSS_INTERNAL_H_
#define __PTLRPC_GSS_GSS_INTERNAL_H_
+#include <linux/crypto.h>
#include <lustre_sec.h>
/*
/*
* XXX make it visible of kernel and lgssd/lsvcgssd
*/
-#define GSSD_INTERFACE_VERSION (1)
+enum {
+ GSSD_INTERFACE_VERSION_V1 = 1,
+ GSSD_INTERFACE_VERSION_V2 = 2,
+ GSSD_INTERFACE_VERSION = GSSD_INTERFACE_VERSION_V2,
+};
#define PTLRPC_GSS_VERSION (1)
static inline
__u32 import_to_gss_svc(struct obd_import *imp)
{
- const char *name = imp->imp_obd->obd_type->typ_name;
-
- if (!strcmp(name, LUSTRE_MGC_NAME))
- return LUSTRE_GSS_TGT_MGS;
- if (!strcmp(name, LUSTRE_MDC_NAME))
- return LUSTRE_GSS_TGT_MDS;
- if (!strcmp(name, LUSTRE_OSC_NAME))
- return LUSTRE_GSS_TGT_OSS;
- LBUG();
- return 0;
+ const char *name = imp->imp_obd->obd_type->typ_name;
+
+ if (!strcmp(name, LUSTRE_MGC_NAME))
+ return LUSTRE_GSS_TGT_MGS;
+ if (!strcmp(name, LUSTRE_MDC_NAME) ||
+ !strcmp(name, LUSTRE_LWP_NAME))
+ return LUSTRE_GSS_TGT_MDS;
+ if (!strcmp(name, LUSTRE_OSC_NAME) ||
+ !strcmp(name, LUSTRE_OSP_NAME))
+ return LUSTRE_GSS_TGT_OSS;
+
+ return 0;
}
/*
GSS_SEQ_WIN_MAIN / 4)
struct gss_svc_seq_data {
- cfs_spinlock_t ssd_lock;
+ spinlock_t ssd_lock;
/*
* highest sequence number seen so far, for main and back window
*/
};
struct gss_cli_ctx {
- struct ptlrpc_cli_ctx gc_base;
- __u32 gc_flavor;
- __u32 gc_proc;
- __u32 gc_win;
- cfs_atomic_t gc_seq;
- rawobj_t gc_handle;
- struct gss_ctx *gc_mechctx;
- /* handle for the buddy svc ctx */
- rawobj_t gc_svc_handle;
+ struct ptlrpc_cli_ctx gc_base;
+ __u32 gc_flavor;
+ __u32 gc_proc;
+ __u32 gc_win;
+ atomic_t gc_seq;
+ rawobj_t gc_handle;
+ struct gss_ctx *gc_mechctx;
+ /* handle for the buddy svc ctx */
+ rawobj_t gc_svc_handle;
};
struct gss_cli_ctx_keyring {
};
struct gss_sec {
- struct ptlrpc_sec gs_base;
- struct gss_api_mech *gs_mech;
- cfs_spinlock_t gs_lock;
- __u64 gs_rvs_hdl;
+ struct ptlrpc_sec gs_base;
+ struct gss_api_mech *gs_mech;
+ spinlock_t gs_lock;
+ __u64 gs_rvs_hdl;
};
struct gss_sec_pipefs {
- struct gss_sec gsp_base;
- int gsp_chash_size; /* must be 2^n */
- cfs_hlist_head_t gsp_chash[0];
+ struct gss_sec gsp_base;
+ int gsp_chash_size; /* must be 2^n */
+ struct hlist_head gsp_chash[0];
};
/*
/*
* all contexts listed here. access is protected by sec spinlock.
*/
- cfs_hlist_head_t gsk_clist;
+ struct hlist_head gsk_clist;
/*
* specially point to root ctx (only one at a time). access is
* protected by sec spinlock.
/*
* specially serialize upcalls for root context.
*/
- cfs_mutex_t gsk_root_uc_lock;
+ struct mutex gsk_root_uc_lock;
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_t gsk_uc_lock; /* serialize upcalls */
+ struct mutex gsk_uc_lock; /* serialize upcalls */
#endif
};
void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize);
/* gss_keyring.c */
+#ifndef HAVE_GSS_KEYRING
+static inline int __init gss_init_keyring(void) { return 0; }
+static inline void __exit gss_exit_keyring(void) { return; }
+#else
int __init gss_init_keyring(void);
void __exit gss_exit_keyring(void);
+#endif
/* gss_pipefs.c */
+#ifndef HAVE_GSS_PIPEFS
+static inline int __init gss_init_pipefs(void) { return 0; }
+static inline void __exit gss_exit_pipefs(void) { return; }
+#else
int __init gss_init_pipefs(void);
void __exit gss_exit_pipefs(void);
+#endif
/* gss_bulk.c */
int gss_cli_prep_bulk(struct ptlrpc_request *req,
int gss_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
-/* gss_mech_switch.c */
-int init_kerberos_module(void);
-void cleanup_kerberos_module(void);
-
/* gss_generic_token.c */
int g_token_size(rawobj_t *mech, unsigned int body_size);
void g_make_token_header(rawobj_t *mech, int body_size, unsigned char **buf);
/* gss_cli_upcall.c */
-int gss_do_ctx_init_rpc(char *buffer, unsigned long count);
+int gss_do_ctx_init_rpc(char __user *buffer, unsigned long count);
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx);
int __init gss_init_cli_upcall(void);
-void __exit gss_exit_cli_upcall(void);
+void gss_exit_cli_upcall(void);
/* gss_svc_upcall.c */
__u64 gss_get_next_ctx_index(void);
void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx);
int __init gss_init_svc_upcall(void);
-void __exit gss_exit_svc_upcall(void);
+void gss_exit_svc_upcall(void);
/* lproc_gss.c */
void gss_stat_oos_record_cli(int behind);
void gss_stat_oos_record_svc(int phase, int replay);
int __init gss_init_lproc(void);
-void __exit gss_exit_lproc(void);
+void gss_exit_lproc(void);
+
+/* gss_null_mech.c */
+int __init init_null_module(void);
+void cleanup_null_module(void);
/* gss_krb5_mech.c */
int __init init_kerberos_module(void);
-void __exit cleanup_kerberos_module(void);
+void cleanup_kerberos_module(void);
+/* gss_sk_mech.c */
+#ifdef HAVE_OPENSSL_SSK
+int __init init_sk_module(void);
+void cleanup_sk_module(void);
+#else
+static inline int init_sk_module(void) { return 0; }
+static inline void cleanup_sk_module(void) { return; }
+#endif /* HAVE_OPENSSL_SSK */
/* debug */
static inline