dnl FIXME
AC_DEFUN([LC_CONFIG_RMTCLIENT],
[LB_LINUX_CONFIG_IM([CRYPTO_AES],[],[
- AC_MSG_ERROR([Lustre remote client require that CONFIG_CRYPTO_AES is enabled in your kernel.])
+ AC_MSG_WARN([Lustre remote client require that CONFIG_CRYPTO_AES is enabled in your kernel.])
])
])
AC_DEFUN([LC_CONFIG_GSS_KEYRING],
[AC_MSG_CHECKING([whether to enable gss keyring backend])
AC_ARG_ENABLE([gss_keyring],
- [AC_HELP_STRING([--disable-gss-keyring],
+ [AC_HELP_STRING([--disable-gss-keyring],
[disable gss keyring backend])],
- [],[enable_gss_keyring='yes'])
+ [],[enable_gss_keyring='yes'])
AC_MSG_RESULT([$enable_gss_keyring])
if test x$enable_gss_keyring != xno; then
- LB_LINUX_CONFIG_IM([KEYS],[],
+ LB_LINUX_CONFIG_IM([KEYS],[],
[AC_MSG_ERROR([GSS keyring backend require that CONFIG_KEYS be enabled in your kernel.])])
- AC_CHECK_LIB([keyutils], [keyctl_search], [],
+ AC_CHECK_LIB([keyutils], [keyctl_search], [],
[AC_MSG_ERROR([libkeyutils is not found, which is required by gss keyring backend])],)
- AC_DEFINE([HAVE_GSS_KEYRING], [1],
+ AC_DEFINE([HAVE_GSS_KEYRING], [1],
[Define this if you enable gss keyring backend])
fi
])
AC_MSG_RESULT([$enable_gss])
if test x$enable_gss == xyes; then
- LC_CONFIG_GSS_KEYRING
+ LC_CONFIG_GSS_KEYRING
LC_CONFIG_SUNRPC
+ AC_DEFINE([HAVE_GSS], [1], [Define this if you enable gss])
+
LB_LINUX_CONFIG_IM([CRYPTO_MD5],[],
[AC_MSG_WARN([kernel MD5 support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_SHA1],[],
+ LB_LINUX_CONFIG_IM([CRYPTO_SHA1],[],
[AC_MSG_WARN([kernel SHA1 support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_SHA256],[],
+ LB_LINUX_CONFIG_IM([CRYPTO_SHA256],[],
[AC_MSG_WARN([kernel SHA256 support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_SHA512],[],
+ LB_LINUX_CONFIG_IM([CRYPTO_SHA512],[],
[AC_MSG_WARN([kernel SHA512 support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_WP512],[],
- [AC_MSG_WARN([kernel WP512 support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_ARC4],[],
- [AC_MSG_WARN([kernel ARC4 support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_DES],[],
- [AC_MSG_WARN([kernel DES support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_TWOFISH],[],
- [AC_MSG_WARN([kernel TWOFISH support is recommended by using GSS.])])
- LB_LINUX_CONFIG_IM([CRYPTO_CAST6],[],
- [AC_MSG_WARN([kernel CAST6 support is recommended by using GSS.])])
-
- AC_CHECK_LIB([gssapi], [gss_init_sec_context],
+
+ AC_CHECK_LIB([gssapi], [gss_init_sec_context],
[GSSAPI_LIBS="$GSSAPI_LDFLAGS -lgssapi"],
[AC_CHECK_LIB([gssglue], [gss_init_sec_context],
[GSSAPI_LIBS="$GSSAPI_LDFLAGS -lgssglue"],
[AC_MSG_ERROR([libgssapi or libgssglue is not found, which is required by GSS.])])],)
- AC_SUBST(GSSAPI_LIBS)
+ AC_SUBST(GSSAPI_LIBS)
- AC_KERBEROS_V5
+ AC_KERBEROS_V5
fi
])
lnet_handle_md_t bd_md_h; /* associated MD */
lnet_nid_t bd_sender; /* stash event::sender */
- cfs_page_t **bd_enc_pages;
#if defined(__KERNEL__)
+ /*
+ * encrypt iov, size is either 0 or bd_iov_count.
+ */
+ lnet_kiov_t *bd_enc_iov;
+
lnet_kiov_t bd_iov[0];
#else
lnet_md_iovec_t bd_iov[0];
SPTLRPC_SVC_MAX,
};
+enum sptlrpc_bulk_type {
+ SPTLRPC_BULK_DEFAULT = 0, /* follow rpc flavor */
+ SPTLRPC_BULK_HASH = 1, /* hash integrity */
+ SPTLRPC_BULK_MAX,
+};
+
+enum sptlrpc_bulk_service {
+ SPTLRPC_BULK_SVC_NULL = 0,
+ SPTLRPC_BULK_SVC_AUTH = 1,
+ SPTLRPC_BULK_SVC_INTG = 2,
+ SPTLRPC_BULK_SVC_PRIV = 3,
+ SPTLRPC_BULK_SVC_MAX,
+};
+
/*
- * rpc flavor compose/extract, represented as 16 bits
+ * rpc flavor compose/extract, represented as 32 bits. currently the
+ * high 12 bits are unused, must be set as 0.
*
- * 4b (reserved) | 4b (svc) | 4b (mech) | 4b (policy)
+ * 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy)
*/
-#define RPC_FLVR_POLICY_OFFSET (0)
-#define RPC_FLVR_MECH_OFFSET (4)
-#define RPC_FLVR_SVC_OFFSET (8)
-
-#define MAKE_RPC_FLVR(policy, mech, svc) \
- (((__u16)(policy) << RPC_FLVR_POLICY_OFFSET) | \
- ((__u16)(mech) << RPC_FLVR_MECH_OFFSET) | \
- ((__u16)(svc) << RPC_FLVR_SVC_OFFSET))
+#define FLVR_POLICY_OFFSET (0)
+#define FLVR_MECH_OFFSET (4)
+#define FLVR_SVC_OFFSET (8)
+#define FLVR_BULK_TYPE_OFFSET (12)
+#define FLVR_BULK_SVC_OFFSET (16)
+
+#define MAKE_FLVR(policy, mech, svc, btype, bsvc) \
+ (((__u32)(policy) << FLVR_POLICY_OFFSET) | \
+ ((__u32)(mech) << FLVR_MECH_OFFSET) | \
+ ((__u32)(svc) << FLVR_SVC_OFFSET) | \
+ ((__u32)(btype) << FLVR_BULK_TYPE_OFFSET) | \
+ ((__u32)(bsvc) << FLVR_BULK_SVC_OFFSET))
-#define MAKE_RPC_SUBFLVR(mech, svc) \
- ((__u16)(mech) | \
- ((__u16)(svc) << (RPC_FLVR_SVC_OFFSET - RPC_FLVR_MECH_OFFSET)))
-
-#define RPC_FLVR_SUB(flavor) \
- ((((__u16)(flavor)) >> RPC_FLVR_MECH_OFFSET) & 0xFF)
-
-#define RPC_FLVR_POLICY(flavor) \
- ((((__u16)(flavor)) >> RPC_FLVR_POLICY_OFFSET) & 0xF)
-#define RPC_FLVR_MECH(flavor) \
- ((((__u16)(flavor)) >> RPC_FLVR_MECH_OFFSET) & 0xF)
-#define RPC_FLVR_SVC(flavor) \
- ((((__u16)(flavor)) >> RPC_FLVR_SVC_OFFSET) & 0xF)
+/*
+ * extraction
+ */
+#define SPTLRPC_FLVR_POLICY(flavor) \
+ ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_MECH(flavor) \
+ ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_SVC(flavor) \
+ ((((__u32)(flavor)) >> FLVR_SVC_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_BULK_TYPE(flavor) \
+ ((((__u32)(flavor)) >> FLVR_BULK_TYPE_OFFSET) & 0xF)
+#define SPTLRPC_FLVR_BULK_SVC(flavor) \
+ ((((__u32)(flavor)) >> FLVR_BULK_SVC_OFFSET) & 0xF)
+
+#define SPTLRPC_FLVR_BASE(flavor) \
+ ((((__u32)(flavor)) >> FLVR_POLICY_OFFSET) & 0xFFF)
+#define SPTLRPC_FLVR_BASE_SUB(flavor) \
+ ((((__u32)(flavor)) >> FLVR_MECH_OFFSET) & 0xFF)
/*
* gss subflavors
*/
+#define MAKE_BASE_SUBFLVR(mech, svc) \
+ ((__u32)(mech) | \
+ ((__u32)(svc) << (FLVR_SVC_OFFSET - FLVR_MECH_OFFSET)))
+
#define SPTLRPC_SUBFLVR_KRB5N \
- MAKE_RPC_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_NULL)
#define SPTLRPC_SUBFLVR_KRB5A \
- MAKE_RPC_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_AUTH)
#define SPTLRPC_SUBFLVR_KRB5I \
- MAKE_RPC_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_INTG)
#define SPTLRPC_SUBFLVR_KRB5P \
- MAKE_RPC_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
+ MAKE_BASE_SUBFLVR(SPTLRPC_MECH_GSS_KRB5, SPTLRPC_SVC_PRIV)
/*
* "end user" flavors
*/
#define SPTLRPC_FLVR_NULL \
- MAKE_RPC_FLVR(SPTLRPC_POLICY_NULL, \
- SPTLRPC_MECH_NULL, \
- SPTLRPC_SVC_NULL)
+ MAKE_FLVR(SPTLRPC_POLICY_NULL, \
+ SPTLRPC_MECH_NULL, \
+ SPTLRPC_SVC_NULL, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_NULL)
#define SPTLRPC_FLVR_PLAIN \
- MAKE_RPC_FLVR(SPTLRPC_POLICY_PLAIN, \
- SPTLRPC_MECH_PLAIN, \
- SPTLRPC_SVC_NULL)
+ MAKE_FLVR(SPTLRPC_POLICY_PLAIN, \
+ SPTLRPC_MECH_PLAIN, \
+ SPTLRPC_SVC_NULL, \
+ SPTLRPC_BULK_HASH, \
+ SPTLRPC_BULK_SVC_INTG)
#define SPTLRPC_FLVR_KRB5N \
- MAKE_RPC_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_NULL)
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_NULL, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_NULL)
#define SPTLRPC_FLVR_KRB5A \
- MAKE_RPC_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_AUTH)
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_AUTH, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_NULL)
#define SPTLRPC_FLVR_KRB5I \
- MAKE_RPC_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_INTG)
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_INTG, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_INTG)
#define SPTLRPC_FLVR_KRB5P \
- MAKE_RPC_FLVR(SPTLRPC_POLICY_GSS, \
- SPTLRPC_MECH_GSS_KRB5, \
- SPTLRPC_SVC_PRIV)
-
-#define SPTLRPC_FLVR_ANY ((__u16) 0xf000)
-#define SPTLRPC_FLVR_INVALID ((__u16) 0xffff)
+ MAKE_FLVR(SPTLRPC_POLICY_GSS, \
+ SPTLRPC_MECH_GSS_KRB5, \
+ SPTLRPC_SVC_PRIV, \
+ SPTLRPC_BULK_DEFAULT, \
+ SPTLRPC_BULK_SVC_PRIV)
#define SPTLRPC_FLVR_DEFAULT SPTLRPC_FLVR_NULL
+#define SPTLRPC_FLVR_INVALID ((__u32) 0xFFFFFFFF)
+#define SPTLRPC_FLVR_ANY ((__u32) 0xFFF00000)
+
/*
- * 32 bits wire flavor (msg->lm_secflvr), lower 12 bits is the rpc flavor,
- * higher 20 bits is not defined right now.
+ * extract the useful part from wire flavor
*/
-#define WIRE_FLVR_RPC(wflvr) (((__u16) (wflvr)) & 0x0FFF)
+#define WIRE_FLVR(wflvr) (((__u32) (wflvr)) & 0x000FFFFF)
-static inline void rpc_flvr_set_svc(__u16 *flvr, __u16 svc)
+static inline void flvr_set_svc(__u32 *flvr, __u32 svc)
{
LASSERT(svc < SPTLRPC_SVC_MAX);
- *flvr = MAKE_RPC_FLVR(RPC_FLVR_POLICY(*flvr),
- RPC_FLVR_MECH(*flvr),
- svc);
+ *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
+ SPTLRPC_FLVR_MECH(*flvr),
+ svc,
+ SPTLRPC_FLVR_BULK_TYPE(*flvr),
+ SPTLRPC_FLVR_BULK_SVC(*flvr));
}
+static inline void flvr_set_bulk_svc(__u32 *flvr, __u32 svc)
+{
+ LASSERT(svc < SPTLRPC_BULK_SVC_MAX);
+ *flvr = MAKE_FLVR(SPTLRPC_FLVR_POLICY(*flvr),
+ SPTLRPC_FLVR_MECH(*flvr),
+ SPTLRPC_FLVR_SVC(*flvr),
+ SPTLRPC_FLVR_BULK_TYPE(*flvr),
+ svc);
+}
+
+struct bulk_spec_hash {
+ __u8 hash_alg;
+};
struct sptlrpc_flavor {
- __u16 sf_rpc; /* rpc flavor */
- __u8 sf_bulk_ciph; /* bulk cipher alg */
- __u8 sf_bulk_hash; /* bulk hash alg */
+ __u32 sf_rpc; /* wire flavor - should be renamed to sf_wire */
__u32 sf_flags; /* general flags */
+ /*
+ * rpc flavor specification
+ */
+ union {
+ /* nothing for now */
+ } u_rpc;
+ /*
+ * bulk flavor specification
+ */
+ union {
+ struct bulk_spec_hash hash;
+ } u_bulk;
};
enum lustre_sec_part {
};
int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr);
+int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr);
static inline void sptlrpc_rule_set_init(struct sptlrpc_rule_set *set)
{
}
void sptlrpc_rule_set_free(struct sptlrpc_rule_set *set);
-int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set, int expand);
+int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *set);
int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *set,
- struct sptlrpc_rule *rule,
- int expand);
+ struct sptlrpc_rule *rule);
int sptlrpc_rule_set_choose(struct sptlrpc_rule_set *rset,
enum lustre_sec_part from,
enum lustre_sec_part to,
int msgsize);
void (*free_rs) (struct ptlrpc_reply_state *rs);
void (*free_ctx) (struct ptlrpc_svc_ctx *ctx);
- /* reverse credential */
+ /* reverse context */
int (*install_rctx)(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx);
/* bulk transform */
+ int (*prep_bulk) (struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
int (*unwrap_bulk) (struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
int (*wrap_bulk) (struct ptlrpc_request *req,
BULK_HASH_ALG_SHA256,
BULK_HASH_ALG_SHA384,
BULK_HASH_ALG_SHA512,
- BULK_HASH_ALG_WP256,
- BULK_HASH_ALG_WP384,
- BULK_HASH_ALG_WP512,
BULK_HASH_ALG_MAX
};
-enum sptlrpc_bulk_cipher_alg {
- BULK_CIPH_ALG_NULL = 0,
- BULK_CIPH_ALG_ARC4,
- BULK_CIPH_ALG_AES128,
- BULK_CIPH_ALG_AES192,
- BULK_CIPH_ALG_AES256,
- BULK_CIPH_ALG_CAST128,
- BULK_CIPH_ALG_CAST256,
- BULK_CIPH_ALG_TWOFISH128,
- BULK_CIPH_ALG_TWOFISH256,
- BULK_CIPH_ALG_MAX
-};
-
struct sptlrpc_hash_type {
char *sht_name;
char *sht_tfm_name;
unsigned int sht_size;
};
-struct sptlrpc_ciph_type {
- char *sct_name;
- char *sct_tfm_name;
- __u32 sct_tfm_flags;
- unsigned int sct_ivsize;
- unsigned int sct_keysize;
-};
-
const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg);
const char * sptlrpc_get_hash_name(__u8 hash_alg);
-const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg);
-const char *sptlrpc_get_ciph_name(__u8 ciph_alg);
+__u8 sptlrpc_get_hash_alg(const char *algname);
-#define CIPHER_MAX_BLKSIZE (16)
-#define CIPHER_MAX_KEYSIZE (64)
+enum {
+ BSD_FL_ERR = 1,
+};
struct ptlrpc_bulk_sec_desc {
- __u8 bsd_version;
- __u8 bsd_flags;
- __u8 bsd_pad[4];
- __u8 bsd_hash_alg; /* hash algorithm */
- __u8 bsd_ciph_alg; /* cipher algorithm */
- __u8 bsd_key[CIPHER_MAX_KEYSIZE]; /* encrypt key seed */
- __u8 bsd_csum[0];
+ __u8 bsd_version; /* 0 */
+ __u8 bsd_type; /* SPTLRPC_BULK_XXX */
+ __u8 bsd_svc; /* SPTLRPC_BULK_SVC_XXXX */
+ __u8 bsd_flags; /* flags */
+ __u32 bsd_nob; /* nob of bulk data */
+ __u8 bsd_data[0]; /* policy-specific token */
};
int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy);
int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy);
-__u16 sptlrpc_name2rpcflavor(const char *name);
-const char *sptlrpc_rpcflavor2name(__u16 flavor);
-int sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
+__u32 sptlrpc_name2flavor_base(const char *name);
+const char *sptlrpc_flavor2name_base(__u32 flvr);
+char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
+ char *buf, int bufsize);
+char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize);
+char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize);
static inline
struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy)
*/
int sptlrpc_import_sec_adapt(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx,
- __u16 rpc_flavor);
+ struct sptlrpc_flavor *flvr);
struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp);
void sptlrpc_import_sec_put(struct obd_import *imp);
int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
- int nob, obd_count pg_count,
- struct brw_page **pga);
+ struct ptlrpc_bulk_desc *desc,
+ int nob);
int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
+int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
+/* bulk helpers (internal use only by policies) */
+int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
+ void *buf, int buflen);
+
+int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset);
+
/* user descriptor helpers */
static inline int sptlrpc_user_desc_size(int ngroups)
{
int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset);
int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset);
-/* bulk helpers (internal use only by policies) */
-int bulk_sec_desc_size(__u8 hash_alg, int request, int read);
-int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset);
-
-int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
- __u32 alg, struct lustre_msg *rmsg, int roff);
-int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
- struct lustre_msg *rmsg, int roff,
- struct lustre_msg *vmsg, int voff);
-int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
- struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
- struct ptlrpc_bulk_sec_desc *bsdr, int rsize);
#define CFS_CAP_CHOWN_MASK (1 << CFS_CAP_CHOWN)
#define CFS_CAP_SYS_RESOURCE_MASK (1 << CFS_CAP_SYS_RESOURCE)
to_be_freed = obd->obd_namespace;
}
+ /*
+ * there's no necessary to hold sem during diconnecting an import,
+ * and actually it may cause deadlock in gss.
+ */
+ up_write(&cli->cl_sem);
rc = ptlrpc_disconnect_import(imp, 0);
+ down_write(&cli->cl_sem);
ptlrpc_invalidate_import(imp);
/* set obd_namespace to NULL only after invalidate, because we can have
else
revimp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
- rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx,
- req->rq_flvr.sf_rpc);
+ rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
if (rc) {
CERROR("Failed to get sec for reverse import: %d\n", rc);
export->exp_imp_reverse = NULL;
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
- GOTO(out, rc);
+ if (rc)
+ GOTO(out, rc);
+
+ rc = sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk);
out:
ptlrpc_req_finished(req);
return rc;
RETURN(rc);
}
+ rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
+ req->rq_bulk->bd_nob_transferred);
+ if (rc < 0) {
+ ptlrpc_req_finished(req);
+ RETURN(rc);
+ }
+
if (req->rq_bulk->bd_nob_transferred != CFS_PAGE_SIZE) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
req->rq_bulk->bd_nob_transferred, CFS_PAGE_SIZE);
}
LASSERT(desc->bd_nob == rdpg->rp_count);
+ rc = sptlrpc_svc_wrap_bulk(req, desc);
+ if (rc)
+ GOTO(free_desc, rc);
+
rc = ptlrpc_start_bulk_transfer(desc);
if (rc)
GOTO(free_desc, rc);
ptlrpc_prep_bulk_page(desc, page, (int)reqbody->size,
(int)reqbody->nlink);
+ rc = sptlrpc_svc_prep_bulk(req, desc);
+ if (rc != 0)
+ GOTO(cleanup_page, rc);
/*
* Check if client was evicted while we were doing i/o before touching
* network.
if (likely(rc == 0)) {
rc = mdt_recovery(info);
if (likely(rc == +1)) {
+ switch (lustre_msg_get_opc(msg)) {
+ case MDS_READPAGE:
+ req->rq_bulk_read = 1;
+ break;
+ case MDS_WRITEPAGE:
+ req->rq_bulk_write = 1;
+ break;
+ }
+
h = mdt_handler_find(lustre_msg_get_opc(msg),
supported);
if (likely(h != NULL)) {
rset = &fsdb->fsdb_srpc_gen;
}
- rc = sptlrpc_rule_set_merge(rset, &rule, 1);
+ rc = sptlrpc_rule_set_merge(rset, &rule);
RETURN(rc);
}
int rc, copy_size;
ENTRY;
+#ifndef HAVE_GSS
+ RETURN(-EINVAL);
+#endif
/* keep a copy of original param, which could be destroied
* during parsing */
copy_size = strlen(param) + 1;
/* size[REQ_REC_OFF] still sizeof (*body) */
if (opc == OST_WRITE) {
if (unlikely(cli->cl_checksum) &&
- req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
+ !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
/* store cl_cksum_type in a local variable since
* it can be changed via lprocfs */
cksum_type_t cksum_type = cli->cl_cksum_type;
sizeof(__u32) * niocount);
} else {
if (unlikely(cli->cl_checksum) &&
- req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
+ !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
body->oa.o_flags = 0;
body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
}
LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
+ if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
+ RETURN(-EAGAIN);
+
if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
check_write_checksum(&body->oa, peer, client_cksum,
body->oa.o_cksum, aa->aa_requested_nob,
cksum_type_unpack(aa->aa_oa->o_flags)))
RETURN(-EAGAIN);
- if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
- RETURN(-EAGAIN);
-
rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
aa->aa_page_count, aa->aa_ppga);
GOTO(out, rc);
}
/* The rest of this function executes only for OST_READs */
+
+ rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
+ if (rc < 0)
+ GOTO(out, rc);
+
if (rc > aa->aa_requested_nob) {
CERROR("Unexpected rc %d (%d requested)\n", rc,
aa->aa_requested_nob);
if (rc < aa->aa_requested_nob)
handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
- if (sptlrpc_cli_unwrap_bulk_read(req, rc, aa->aa_page_count,
- aa->aa_ppga))
- GOTO(out, rc = -EAGAIN);
-
if (body->oa.o_valid & OBD_MD_FLCKSUM) {
static int cksum_counter;
__u32 server_cksum = body->oa.o_cksum;
if (exp->exp_failed)
rc = -ENOTCONN;
else {
- sptlrpc_svc_wrap_bulk(req, desc);
-
- rc = ptlrpc_start_bulk_transfer(desc);
+ rc = sptlrpc_svc_wrap_bulk(req, desc);
+ if (rc == 0)
+ rc = ptlrpc_start_bulk_transfer(desc);
}
if (rc == 0) {
local_nb[i].offset & ~CFS_PAGE_MASK,
local_nb[i].len);
+ rc = sptlrpc_svc_prep_bulk(req, desc);
+ if (rc != 0)
+ GOTO(out_lock, rc);
+
/* Check if client was evicted while we were doing i/o before touching
network */
if (desc->bd_export->exp_failed)
DEBUG_REQ(D_ERROR, req, "Eviction on bulk GET");
rc = -ENOTCONN;
ptlrpc_abort_bulk(desc);
- } else if (!desc->bd_success ||
- desc->bd_nob_transferred != desc->bd_nob) {
- DEBUG_REQ(D_ERROR, req, "%s bulk GET %d(%d)",
- desc->bd_success ?
- "truncated" : "network error on",
- desc->bd_nob_transferred, desc->bd_nob);
+ } else if (!desc->bd_success) {
+ DEBUG_REQ(D_ERROR, req, "network error on bulk GET");
/* XXX should this be a different errno? */
rc = -ETIMEDOUT;
+ } else {
+ rc = sptlrpc_svc_unwrap_bulk(req, desc);
}
} else {
DEBUG_REQ(D_ERROR, req, "ptlrpc_bulk_get failed: rc %d", rc);
}
no_reply = rc != 0;
- if (rc == 0)
- sptlrpc_svc_unwrap_bulk(req, desc);
-
repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
sizeof(*repbody));
memcpy(&repbody->oa, &body->oa, sizeof(repbody->oa));
desc->bd_sender = ev->sender;
}
- sptlrpc_enc_pool_put_pages(desc);
+ /* release the encrypted pages for write */
+ if (desc->bd_req->rq_bulk_write)
+ sptlrpc_enc_pool_put_pages(desc);
/* NB don't unlock till after wakeup; desc can disappear under us
* otherwise */
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *mic_token);
__u32 lgss_verify_mic(
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *mic_token);
__u32 lgss_wrap(
struct gss_ctx *ctx,
rawobj_t *gsshdr,
rawobj_t *token,
rawobj_t *out_msg);
-__u32 lgss_plain_encrypt(
- struct gss_ctx *ctx,
- int decrypt,
- int length,
- void *in_buf,
- void *out_buf);
+__u32 lgss_prep_bulk(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc);
+__u32 lgss_wrap_bulk(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob);
+__u32 lgss_unwrap_bulk(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token);
__u32 lgss_delete_sec_context(
struct gss_ctx **ctx);
int lgss_display(
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *mic_token);
__u32 (*gss_verify_mic)(
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *mic_token);
__u32 (*gss_wrap)(
struct gss_ctx *ctx,
rawobj_t *gsshdr,
rawobj_t *token,
rawobj_t *out_msg);
- __u32 (*gss_plain_encrypt)(
- struct gss_ctx *ctx,
- int decrypt,
- int length,
- void *in_buf,
- void *out_buf);
+ __u32 (*gss_prep_bulk)(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc);
+ __u32 (*gss_wrap_bulk)(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob);
+ __u32 (*gss_unwrap_bulk)(
+ struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token);
void (*gss_delete_sec_context)(
void *ctx);
int (*gss_display)(
#include "gss_internal.h"
#include "gss_api.h"
-static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
-
-static void buf_to_sl(struct scatterlist *sl,
- void *buf, unsigned int len)
-{
- sl->page = virt_to_page(buf);
- sl->offset = offset_in_page(buf);
- sl->length = len;
-}
-
-/*
- * CTS CBC encryption:
- * 1. X(n-1) = P(n-1)
- * 2. E(n-1) = Encrypt(K, X(n-1))
- * 3. C(n) = HEAD(E(n-1))
- * 4. P = P(n) | 0
- * 5. D(n) = E(n-1) XOR P
- * 6. C(n-1) = Encrypt(K, D(n))
- *
- * CTS encryption using standard CBC interface:
- * 1. pad the last partial block with 0.
- * 2. do CBC encryption.
- * 3. swap the last two ciphertext blocks.
- * 4. truncate to original plaintext size.
- */
-static int cbc_cts_encrypt(struct ll_crypto_cipher *tfm,
- struct scatterlist *sld,
- struct scatterlist *sls)
-{
- struct scatterlist slst, sldt;
- struct blkcipher_desc desc;
- void *data;
- __u8 sbuf[CIPHER_MAX_BLKSIZE];
- __u8 dbuf[CIPHER_MAX_BLKSIZE];
- unsigned int blksize, blks, tail;
- int rc;
-
- blksize = ll_crypto_blkcipher_blocksize(tfm);
- blks = sls->length / blksize;
- tail = sls->length % blksize;
- LASSERT(blks > 0 && tail > 0);
-
- /* pad tail block with 0, copy to sbuf */
- data = cfs_kmap(sls->page);
- memcpy(sbuf, data + sls->offset + blks * blksize, tail);
- memset(sbuf + tail, 0, blksize - tail);
- cfs_kunmap(sls->page);
-
- buf_to_sl(&slst, sbuf, blksize);
- buf_to_sl(&sldt, dbuf, blksize);
- desc.tfm = tfm;
- desc.flags = 0;
-
- /* encrypt head */
- rc = ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length - tail);
- if (unlikely(rc)) {
- CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
- return rc;
- }
- /* encrypt tail */
- rc = ll_crypto_blkcipher_encrypt(&desc, &sldt, &slst, blksize);
- if (unlikely(rc)) {
- CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
- return rc;
- }
-
- /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
- data = cfs_kmap(sld->page);
-
- memcpy(data + sld->offset + blks * blksize,
- data + sld->offset + (blks - 1) * blksize, tail);
- memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
- cfs_kunmap(sld->page);
-
- return 0;
-}
-
-/*
- * CTS CBC decryption:
- * 1. D(n) = Decrypt(K, C(n-1))
- * 2. C = C(n) | 0
- * 3. X(n) = D(n) XOR C
- * 4. P(n) = HEAD(X(n))
- * 5. E(n-1) = C(n) | TAIL(X(n))
- * 6. X(n-1) = Decrypt(K, E(n-1))
- * 7. P(n-1) = X(n-1) XOR C(n-2)
- *
- * CTS decryption using standard CBC interface:
- * 1. D(n) = Decrypt(K, C(n-1))
- * 2. C(n) = C(n) | TAIL(D(n))
- * 3. swap the last two ciphertext blocks.
- * 4. do CBC decryption.
- * 5. truncate to original ciphertext size.
- */
-static int cbc_cts_decrypt(struct ll_crypto_cipher *tfm,
- struct scatterlist *sld,
- struct scatterlist *sls)
-{
- struct blkcipher_desc desc;
- struct scatterlist slst, sldt;
- void *data;
- __u8 sbuf[CIPHER_MAX_BLKSIZE];
- __u8 dbuf[CIPHER_MAX_BLKSIZE];
- unsigned int blksize, blks, tail;
- int rc;
-
- blksize = ll_crypto_blkcipher_blocksize(tfm);
- blks = sls->length / blksize;
- tail = sls->length % blksize;
- LASSERT(blks > 0 && tail > 0);
-
- /* save current IV, and set IV to zero */
- ll_crypto_blkcipher_get_iv(tfm, sbuf, blksize);
- ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
-
- /* D(n) = Decrypt(K, C(n-1)) */
- slst = *sls;
- slst.offset += (blks - 1) * blksize;
- slst.length = blksize;
-
- buf_to_sl(&sldt, dbuf, blksize);
- desc.tfm = tfm;
- desc.flags = 0;
-
- rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
- if (unlikely(rc)) {
- CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
- return rc;
- }
-
- /* restore IV */
- ll_crypto_blkcipher_set_iv(tfm, sbuf, blksize);
-
- data = cfs_kmap(sls->page);
- /* C(n) = C(n) | TAIL(D(n)) */
- memcpy(dbuf, data + sls->offset + blks * blksize, tail);
- /* swab C(n) and C(n-1) */
- memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
- memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
- cfs_kunmap(sls->page);
-
- /* do cbc decrypt */
- buf_to_sl(&slst, sbuf, blksize);
- buf_to_sl(&sldt, dbuf, blksize);
-
- /* decrypt head */
- rc = ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length - tail);
- if (unlikely(rc)) {
- CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
- return rc;
- }
- /* decrypt tail */
- rc = ll_crypto_blkcipher_decrypt(&desc, &sldt, &slst, blksize);
- if (unlikely(rc)) {
- CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
- return rc;
- }
-
- /* truncate to original ciphertext size */
- data = cfs_kmap(sld->page);
- memcpy(data + sld->offset + blks * blksize, dbuf, tail);
- cfs_kunmap(sld->page);
-
- return 0;
-}
-
-static inline int do_cts_tfm(struct ll_crypto_cipher *tfm,
- int encrypt,
- struct scatterlist *sld,
- struct scatterlist *sls)
-{
-#ifndef HAVE_ASYNC_BLOCK_CIPHER
- LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
-#endif
-
- if (encrypt)
- return cbc_cts_encrypt(tfm, sld, sls);
- else
- return cbc_cts_decrypt(tfm, sld, sls);
-}
-
-/*
- * normal encrypt/decrypt of data of even blocksize
- */
-static inline int do_cipher_tfm(struct ll_crypto_cipher *tfm,
- int encrypt,
- struct scatterlist *sld,
- struct scatterlist *sls)
-{
- struct blkcipher_desc desc;
- desc.tfm = tfm;
- desc.flags = 0;
- if (encrypt)
- return ll_crypto_blkcipher_encrypt(&desc, sld, sls, sls->length);
- else
- return ll_crypto_blkcipher_decrypt(&desc, sld, sls, sls->length);
-}
-
-static struct ll_crypto_cipher *get_stream_cipher(__u8 *key, unsigned int keylen)
-{
- const struct sptlrpc_ciph_type *ct;
- struct ll_crypto_cipher *tfm;
- int rc;
-
- /* using ARC4, the only stream cipher in linux for now */
- ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
- LASSERT(ct);
-
- tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0);
- if (tfm == NULL) {
- CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
- return NULL;
- }
- LASSERT(ll_crypto_blkcipher_blocksize(tfm));
-
- if (keylen > ct->sct_keysize)
- keylen = ct->sct_keysize;
-
- LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
- LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
-
- rc = ll_crypto_blkcipher_setkey(tfm, key, keylen);
- if (rc) {
- CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
- ll_crypto_free_blkcipher(tfm);
- return NULL;
- }
-
- return tfm;
-}
-
-static int do_bulk_privacy(struct gss_ctx *gctx,
- struct ptlrpc_bulk_desc *desc,
- int encrypt, __u32 alg,
- struct ptlrpc_bulk_sec_desc *bsd)
-{
- const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
- struct ll_crypto_cipher *tfm;
- struct ll_crypto_cipher *stfm = NULL; /* backup stream cipher */
- struct scatterlist sls, sld, *sldp;
- unsigned int blksize, keygen_size;
- int i, rc;
- __u8 key[CIPHER_MAX_KEYSIZE];
-
- LASSERT(ct);
-
- if (encrypt)
- bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
-
- if (alg == BULK_CIPH_ALG_NULL)
- return 0;
-
- if (desc->bd_iov_count <= 0) {
- if (encrypt)
- bsd->bsd_ciph_alg = alg;
- return 0;
- }
-
- tfm = ll_crypto_alloc_blkcipher(ct->sct_tfm_name, 0, 0 );
- if (tfm == NULL) {
- CERROR("Failed to allocate TFM %s\n", ct->sct_name);
- return -ENOMEM;
- }
- blksize = ll_crypto_blkcipher_blocksize(tfm);
-
- LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
- LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
- LASSERT(ct->sct_ivsize == 0 ||
- ll_crypto_blkcipher_ivsize(tfm) == ct->sct_ivsize);
- LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
- LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
-
- /* generate ramdom key seed and compute the secret key based on it.
- * note determined by algorithm which lgss_plain_encrypt use, it
- * might require the key size be its (blocksize * n). so here for
- * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
- keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
- ~(CIPHER_MAX_BLKSIZE - 1);
- if (encrypt) {
- get_random_bytes(bsd->bsd_key, ct->sct_keysize);
- if (ct->sct_keysize < keygen_size)
- memset(bsd->bsd_key + ct->sct_keysize, 0,
- keygen_size - ct->sct_keysize);
- }
-
- rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
- if (rc) {
- CERROR("failed to compute secret key: %d\n", rc);
- goto out;
- }
-
- rc = ll_crypto_blkcipher_setkey(tfm, key, ct->sct_keysize);
- if (rc) {
- CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
- goto out;
- }
-
- /* stream cipher doesn't need iv */
- if (blksize > 1)
- ll_crypto_blkcipher_set_iv(tfm, zero_iv, blksize);
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- sls.page = desc->bd_iov[i].kiov_page;
- sls.offset = desc->bd_iov[i].kiov_offset;
- sls.length = desc->bd_iov[i].kiov_len;
-
- if (unlikely(sls.length == 0)) {
- CWARN("page %d with 0 length data?\n", i);
- continue;
- }
-
- if (unlikely(sls.offset % blksize)) {
- CERROR("page %d with odd offset %u, TFM %s\n",
- i, sls.offset, ct->sct_name);
- rc = -EINVAL;
- goto out;
- }
-
- if (desc->bd_enc_pages) {
- sld.page = desc->bd_enc_pages[i];
- sld.offset = desc->bd_iov[i].kiov_offset;
- sld.length = desc->bd_iov[i].kiov_len;
-
- sldp = &sld;
- } else {
- sldp = &sls;
- }
-
- if (likely(sls.length % blksize == 0)) {
- /* data length is n * blocksize, do the normal tfm */
- rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
- } else if (sls.length < blksize) {
- /* odd data length, and smaller than 1 block, CTS
- * doesn't work in this case because it requires
- * transfer a modified IV to peer. here we use a
- * "backup" stream cipher to do the tfm */
- if (stfm == NULL) {
- stfm = get_stream_cipher(key, ct->sct_keysize);
- if (tfm == NULL) {
- rc = -ENOMEM;
- goto out;
- }
- }
- rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
- } else {
- /* odd data length but > 1 block, do CTS tfm */
- rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
- }
-
- if (unlikely(rc)) {
- CERROR("error %s page %d/%d: %d\n",
- encrypt ? "encrypt" : "decrypt",
- i + 1, desc->bd_iov_count, rc);
- goto out;
- }
-
- if (desc->bd_enc_pages)
- desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
- }
-
- if (encrypt)
- bsd->bsd_ciph_alg = alg;
-
-out:
- if (stfm)
- ll_crypto_free_blkcipher(stfm);
-
- ll_crypto_free_blkcipher(tfm);
- return rc;
-}
-
int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
struct gss_cli_ctx *gctx;
struct lustre_msg *msg;
- struct ptlrpc_bulk_sec_desc *bsdr;
- int offset, rc;
+ struct ptlrpc_bulk_sec_desc *bsd;
+ rawobj_t token;
+ __u32 maj;
+ int offset;
+ int rc;
ENTRY;
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
+ LASSERT(gctx->gc_mechctx);
+
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
LASSERT(req->rq_reqbuf->lm_bufcount >= 3);
msg = req->rq_reqbuf;
LBUG();
}
- /* make checksum */
- rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
- req->rq_flvr.sf_bulk_hash, msg, offset);
- if (rc) {
- CERROR("client bulk %s: failed to generate checksum: %d\n",
- req->rq_bulk_read ? "read" : "write", rc);
- RETURN(rc);
- }
+ bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
+ bsd->bsd_version = 0;
+ bsd->bsd_flags = 0;
+ bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
- if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
RETURN(0);
- /* previous bulk_csum_cli_request() has verified bsdr is good */
- bsdr = lustre_msg_buf(msg, offset, 0);
+ LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+ bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
if (req->rq_bulk_read) {
- bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
- RETURN(0);
- }
-
- /* it turn out to be bulk write */
- rc = sptlrpc_enc_pool_get_pages(desc);
- if (rc) {
- CERROR("bulk write: failed to allocate encryption pages\n");
- RETURN(rc);
- }
+ /*
+ * bulk read: prepare receiving pages only for privacy mode.
+ */
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+ return gss_cli_prep_bulk(req, desc);
+ } else {
+ /*
+ * bulk write: sign or encrypt bulk pages.
+ */
+ bsd->bsd_nob = desc->bd_nob;
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+ /* integrity mode */
+ token.data = bsd->bsd_data;
+ token.len = lustre_msg_buflen(msg, offset) -
+ sizeof(*bsd);
+
+ maj = lgss_get_mic(gctx->gc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov,
+ &token);
+ if (maj != GSS_S_COMPLETE) {
+ CWARN("failed to sign bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ } else {
+ /* privacy mode */
+ if (desc->bd_iov_count == 0)
+ RETURN(0);
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc) {
+ CERROR("bulk write: failed to allocate "
+ "encryption pages: %d\n", rc);
+ RETURN(rc);
+ }
- gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
- LASSERT(gctx->gc_mechctx);
+ token.data = bsd->bsd_data;
+ token.len = lustre_msg_buflen(msg, offset) -
+ sizeof(*bsd);
- rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
- req->rq_flvr.sf_bulk_ciph, bsdr);
- if (rc)
- CERROR("bulk write: client failed to encrypt pages\n");
+ maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
+ if (maj != GSS_S_COMPLETE) {
+ CWARN("fail to encrypt bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ }
+ }
- RETURN(rc);
+ RETURN(0);
}
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
struct gss_cli_ctx *gctx;
struct lustre_msg *rmsg, *vmsg;
struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
- int roff, voff, rc;
+ rawobj_t token;
+ __u32 maj;
+ int roff, voff;
ENTRY;
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
vmsg = req->rq_repdata;
voff = vmsg->lm_bufcount - 1;
LBUG();
}
- if (req->rq_bulk_read) {
- bsdr = lustre_msg_buf(rmsg, roff, 0);
- if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
- goto verify_csum;
-
- bsdv = lustre_msg_buf(vmsg, voff, 0);
- if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
- CERROR("bulk read: cipher algorithm mismatch: client "
- "request %s but server reply with %s. try to "
- "use the new one for decryption\n",
- sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
- sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
+ bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
+ bsdv = lustre_msg_buf(vmsg, voff, sizeof(*bsdv));
+ LASSERT(bsdr && bsdv);
+
+ if (bsdr->bsd_version != bsdv->bsd_version ||
+ bsdr->bsd_type != bsdv->bsd_type ||
+ bsdr->bsd_svc != bsdv->bsd_svc) {
+ CERROR("bulk security descriptor mismatch: "
+ "(%u,%u,%u) != (%u,%u,%u)\n",
+ bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
+ bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
+ RETURN(-EPROTO);
+ }
+
+ LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
+ bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
+ bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
+
+ /*
+ * in privacy mode if return success, make sure bd_nob_transferred
+ * is the actual size of the clear text, otherwise upper layer
+ * may be surprised.
+ */
+ if (req->rq_bulk_write) {
+ if (bsdv->bsd_flags & BSD_FL_ERR) {
+ CERROR("server reported bulk i/o failure\n");
+ RETURN(-EIO);
}
+ if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
+ desc->bd_nob_transferred = desc->bd_nob;
+ } else {
+ /*
+ * bulk read, upon return success, bd_nob_transferred is
+ * the size of plain text actually received.
+ */
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
LASSERT(gctx->gc_mechctx);
- rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
- bsdv->bsd_ciph_alg, bsdv);
- if (rc) {
- CERROR("bulk read: client failed to decrypt data\n");
- RETURN(rc);
+ if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_INTG) {
+ int i, nob;
+
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len + nob >
+ desc->bd_nob_transferred) {
+ desc->bd_iov[i].kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += desc->bd_iov[i].kiov_len;
+ }
+
+ token.data = bsdv->bsd_data;
+ token.len = lustre_msg_buflen(vmsg, voff) -
+ sizeof(*bsdv);
+
+ maj = lgss_verify_mic(gctx->gc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov,
+ &token);
+ if (maj != GSS_S_COMPLETE) {
+ CERROR("failed to verify bulk read: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ } else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
+ desc->bd_nob = bsdv->bsd_nob;
+ if (desc->bd_nob == 0)
+ RETURN(0);
+
+ token.data = bsdv->bsd_data;
+ token.len = lustre_msg_buflen(vmsg, voff) -
+ sizeof(*bsdr);
+
+ maj = lgss_unwrap_bulk(gctx->gc_mechctx, desc, &token);
+ if (maj != GSS_S_COMPLETE) {
+ CERROR("failed to decrypt bulk read: %x\n",
+ maj);
+ RETURN(-EACCES);
+ }
+
+ desc->bd_nob_transferred = desc->bd_nob;
}
}
-verify_csum:
- rc = bulk_csum_cli_reply(desc, req->rq_bulk_read,
- rmsg, roff, vmsg, voff);
+ RETURN(0);
+}
+
+static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
+ struct gss_ctx *mechctx)
+{
+ int rc;
+
+ if (desc->bd_iov_count == 0)
+ return 0;
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc)
+ return rc;
+
+ if (lgss_prep_bulk(mechctx, desc) != GSS_S_COMPLETE)
+ return -EACCES;
+
+ return 0;
+}
+
+int gss_cli_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ int rc;
+ ENTRY;
+
+ LASSERT(req->rq_cli_ctx);
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_read);
+
+ if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
+ RETURN(0);
+
+ rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
+ if (rc)
+ CERROR("bulk read: failed to prepare encryption "
+ "pages: %d\n", rc);
+
+ RETURN(rc);
+}
+
+int gss_svc_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct gss_svc_reqctx *grctx;
+ struct ptlrpc_bulk_sec_desc *bsd;
+ int rc;
+ ENTRY;
+
+ LASSERT(req->rq_svc_ctx);
+ LASSERT(req->rq_pack_bulk);
+ LASSERT(req->rq_bulk_write);
+
+ grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
+ LASSERT(grctx->src_reqbsd);
+ LASSERT(grctx->src_repbsd);
+ LASSERT(grctx->src_ctx);
+ LASSERT(grctx->src_ctx->gsc_mechctx);
+
+ bsd = grctx->src_reqbsd;
+ if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
+ RETURN(0);
+
+ rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
+ if (rc)
+ CERROR("bulk write: failed to prepare encryption "
+ "pages: %d\n", rc);
+
RETURN(rc);
}
struct ptlrpc_bulk_desc *desc)
{
struct gss_svc_reqctx *grctx;
- int rc;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ rawobj_t token;
+ __u32 maj;
ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(grctx->src_ctx);
LASSERT(grctx->src_ctx->gsc_mechctx);
- /* decrypt bulk data if it's encrypted */
- if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
- rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
- grctx->src_reqbsd->bsd_ciph_alg,
- grctx->src_reqbsd);
- if (rc) {
- CERROR("bulk write: server failed to decrypt data\n");
- RETURN(rc);
+ bsdr = grctx->src_reqbsd;
+ bsdv = grctx->src_repbsd;
+
+ /* bsdr has been sanity checked during unpacking */
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ switch (bsdv->bsd_svc) {
+ case SPTLRPC_BULK_SVC_INTG:
+ token.data = bsdr->bsd_data;
+ token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
+
+ maj = lgss_verify_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov, &token);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed to verify bulk signature: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ if (bsdr->bsd_nob != desc->bd_nob) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("prepared nob %d doesn't match the actual "
+ "nob %d\n", desc->bd_nob, bsdr->bsd_nob);
+ RETURN(-EPROTO);
}
- }
- /* verify bulk data checksum */
- rc = bulk_csum_svc(desc, req->rq_bulk_read,
- grctx->src_reqbsd, grctx->src_reqbsd_size,
- grctx->src_repbsd, grctx->src_repbsd_size);
+ if (desc->bd_iov_count == 0) {
+ LASSERT(desc->bd_nob == 0);
+ break;
+ }
- RETURN(rc);
+ token.data = bsdr->bsd_data;
+ token.len = grctx->src_reqbsd_size - sizeof(*bsdr);
+
+ maj = lgss_unwrap_bulk(grctx->src_ctx->gsc_mechctx,
+ desc, &token);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed decrypt bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ break;
+ }
+
+ RETURN(0);
}
int gss_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
struct gss_svc_reqctx *grctx;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ rawobj_t token;
+ __u32 maj;
int rc;
ENTRY;
LASSERT(grctx->src_ctx);
LASSERT(grctx->src_ctx->gsc_mechctx);
- /* generate bulk data checksum */
- rc = bulk_csum_svc(desc, req->rq_bulk_read,
- grctx->src_reqbsd, grctx->src_reqbsd_size,
- grctx->src_repbsd, grctx->src_repbsd_size);
- if (rc)
- RETURN(rc);
-
- /* encrypt bulk data if required */
- if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
- rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
- grctx->src_reqbsd->bsd_ciph_alg,
- grctx->src_repbsd);
- if (rc)
- CERROR("bulk read: server failed to encrypt data: "
- "rc %d\n", rc);
+ bsdr = grctx->src_reqbsd;
+ bsdv = grctx->src_repbsd;
+
+ /* bsdr has been sanity checked during unpacking */
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ switch (bsdv->bsd_svc) {
+ case SPTLRPC_BULK_SVC_INTG:
+ token.data = bsdv->bsd_data;
+ token.len = grctx->src_repbsd_size - sizeof(*bsdv);
+
+ maj = lgss_get_mic(grctx->src_ctx->gsc_mechctx, 0, NULL,
+ desc->bd_iov_count, desc->bd_iov, &token);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed to sign bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ bsdv->bsd_nob = desc->bd_nob;
+
+ if (desc->bd_iov_count == 0) {
+ LASSERT(desc->bd_nob == 0);
+ break;
+ }
+
+ rc = sptlrpc_enc_pool_get_pages(desc);
+ if (rc) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("bulk read: failed to allocate encryption "
+ "pages: %d\n", rc);
+ RETURN(rc);
+ }
+
+ token.data = bsdv->bsd_data;
+ token.len = grctx->src_repbsd_size - sizeof(*bsdv);
+
+ maj = lgss_wrap_bulk(grctx->src_ctx->gsc_mechctx,
+ desc, &token, 1);
+ if (maj != GSS_S_COMPLETE) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("failed to encrypt bulk data: %x\n", maj);
+ RETURN(-EACCES);
+ }
+ break;
}
- RETURN(rc);
+ RETURN(0);
}
void __exit gss_exit_pipefs(void);
/* gss_bulk.c */
+int gss_cli_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
+int gss_svc_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc);
int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc);
int gss_svc_wrap_bulk(struct ptlrpc_request *req,
.authorize = gss_svc_authorize,
.free_rs = gss_svc_free_rs,
.free_ctx = gss_svc_free_ctx,
+ .prep_bulk = gss_svc_prep_bulk,
.unwrap_bulk = gss_svc_unwrap_bulk,
.wrap_bulk = gss_svc_wrap_bulk,
.install_rctx = gss_svc_install_rctx_kr,
}
static
-void buf_to_sg(struct scatterlist *sg, char *ptr, int len)
+void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
{
sg->page = virt_to_page(ptr);
sg->offset = offset_in_page(ptr);
return(ret);
}
+#ifdef HAVE_ASYNC_BLOCK_CIPHER
+
static inline
int krb5_digest_hmac(struct ll_crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
-#ifdef HAVE_ASYNC_BLOCK_CIPHER
{
struct hash_desc desc;
struct scatterlist sg[1];
ll_crypto_hash_update(&desc, sg, msgs[i].len);
}
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+ sg[0].page = iovs[i].kiov_page;
+ sg[0].offset = iovs[i].kiov_offset;
+ sg[0].length = iovs[i].kiov_len;
+ ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ }
+
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
return ll_crypto_hash_final(&desc, cksum->data);
}
-#else /* HAVE_ASYNC_BLOCK_CIPHER */
+
+#else /* ! HAVE_ASYNC_BLOCK_CIPHER */
+
+static inline
+int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+ rawobj_t *key,
+ struct krb5_header *khdr,
+ int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
+ rawobj_t *cksum)
{
struct scatterlist sg[1];
__u32 keylen = key->len, i;
crypto_hmac_update(tfm, sg, 1);
}
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+ sg[0].page = iovs[i].kiov_page;
+ sg[0].offset = iovs[i].kiov_offset;
+ sg[0].length = iovs[i].kiov_len;
+ crypto_hmac_update(tfm, sg, 1);
+ }
+
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
crypto_hmac_update(tfm, sg, 1);
crypto_hmac_final(tfm, key->data, &keylen, cksum->data);
return 0;
}
+
#endif /* HAVE_ASYNC_BLOCK_CIPHER */
static inline
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
struct hash_desc desc;
ll_crypto_hash_update(&desc, sg, msgs[i].len);
}
+ for (i = 0; i < iovcnt; i++) {
+ if (iovs[i].kiov_len == 0)
+ continue;
+ sg[0].page = iovs[i].kiov_page;
+ sg[0].offset = iovs[i].kiov_offset;
+ sg[0].length = iovs[i].kiov_len;
+ ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ }
+
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
+ int iovcnt, lnet_kiov_t *iovs,
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
if (ke->ke_hash_hmac)
rc = krb5_digest_hmac(tfm, &kb->kb_key,
- khdr, msgcnt, msgs, cksum);
+ khdr, msgcnt, msgs, iovcnt, iovs, cksum);
else
rc = krb5_digest_norm(tfm, kb,
- khdr, msgcnt, msgs, cksum);
+ khdr, msgcnt, msgs, iovcnt, iovs, cksum);
if (rc == 0)
code = GSS_S_COMPLETE;
return code;
}
+static void fill_krb5_header(struct krb5_ctx *kctx,
+ struct krb5_header *khdr,
+ int privacy)
+{
+ unsigned char acceptor_flag;
+
+ acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
+
+ if (privacy) {
+ khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
+ khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
+ khdr->kh_ec = cpu_to_be16(0);
+ khdr->kh_rrc = cpu_to_be16(0);
+ } else {
+ khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
+ khdr->kh_flags = acceptor_flag;
+ khdr->kh_ec = cpu_to_be16(0xffff);
+ khdr->kh_rrc = cpu_to_be16(0xffff);
+ }
+
+ khdr->kh_filler = 0xff;
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
+}
+
+static __u32 verify_krb5_header(struct krb5_ctx *kctx,
+ struct krb5_header *khdr,
+ int privacy)
+{
+ unsigned char acceptor_flag;
+ __u16 tok_id, ec_rrc;
+
+ acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
+
+ if (privacy) {
+ tok_id = KG_TOK_WRAP_MSG;
+ ec_rrc = 0x0;
+ } else {
+ tok_id = KG_TOK_MIC_MSG;
+ ec_rrc = 0xffff;
+ }
+
+ /* sanity checks */
+ if (be16_to_cpu(khdr->kh_tok_id) != tok_id) {
+ CERROR("bad token id\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
+ CERROR("bad direction flag\n");
+ return GSS_S_BAD_SIG;
+ }
+ if (privacy && (khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
+ CERROR("missing confidential flag\n");
+ return GSS_S_BAD_SIG;
+ }
+ if (khdr->kh_filler != 0xff) {
+ CERROR("bad filler\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ if (be16_to_cpu(khdr->kh_ec) != ec_rrc ||
+ be16_to_cpu(khdr->kh_rrc) != ec_rrc) {
+ CERROR("bad EC or RRC\n");
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+ return GSS_S_COMPLETE;
+}
+
static
__u32 gss_get_mic_kerberos(struct gss_ctx *gctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
rawobj_t cksum = RAWOBJ_EMPTY;
- __u32 rc = GSS_S_FAILURE;
-
- acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
/* fill krb5 header */
LASSERT(token->len >= sizeof(*khdr));
khdr = (struct krb5_header *) token->data;
-
- khdr->kh_tok_id = cpu_to_be16(KG_TOK_MIC_MSG);
- khdr->kh_flags = acceptor_flag;
- khdr->kh_filler = 0xff;
- khdr->kh_ec = cpu_to_be16(0xffff);
- khdr->kh_rrc = cpu_to_be16(0xffff);
- spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- spin_unlock(&krb5_seq_lock);
+ fill_krb5_header(kctx, khdr, 0);
/* checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
- khdr, msgcnt, msgs, &cksum))
- goto out_err;
+ khdr, msgcnt, msgs, iovcnt, iovs, &cksum))
+ return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
LASSERT(token->len >= sizeof(*khdr) + ke->ke_hash_size);
ke->ke_hash_size);
token->len = sizeof(*khdr) + ke->ke_hash_size;
- rc = GSS_S_COMPLETE;
-out_err:
rawobj_free(&cksum);
- return rc;
+ return GSS_S_COMPLETE;
}
static
__u32 gss_verify_mic_kerberos(struct gss_ctx *gctx,
int msgcnt,
rawobj_t *msgs,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
rawobj_t cksum = RAWOBJ_EMPTY;
- __u32 rc = GSS_S_FAILURE;
-
- acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
+ __u32 major;
if (token->len < sizeof(*khdr)) {
CERROR("short signature: %u\n", token->len);
khdr = (struct krb5_header *) token->data;
- /* sanity checks */
- if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_MIC_MSG) {
- CERROR("bad token id\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
- CERROR("bad direction flag\n");
- return GSS_S_BAD_SIG;
- }
- if (khdr->kh_filler != 0xff) {
- CERROR("bad filler\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if (be16_to_cpu(khdr->kh_ec) != 0xffff ||
- be16_to_cpu(khdr->kh_rrc) != 0xffff) {
- CERROR("bad EC or RRC\n");
- return GSS_S_DEFECTIVE_TOKEN;
+ major = verify_krb5_header(kctx, khdr, 0);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
}
if (token->len < sizeof(*khdr) + ke->ke_hash_size) {
CERROR("short signature: %u, require %d\n",
token->len, (int) sizeof(*khdr) + ke->ke_hash_size);
- goto out;
+ return GSS_S_FAILURE;
}
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyc,
- khdr, msgcnt, msgs, &cksum))
+ khdr, msgcnt, msgs, iovcnt, iovs, &cksum)) {
+ CERROR("failed to make checksum\n");
return GSS_S_FAILURE;
+ }
LASSERT(cksum.len >= ke->ke_hash_size);
if (memcmp(khdr + 1, cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
CERROR("checksum mismatch\n");
- rc = GSS_S_BAD_SIG;
- goto out;
+ rawobj_free(&cksum);
+ return GSS_S_BAD_SIG;
}
- rc = GSS_S_COMPLETE;
-out:
rawobj_free(&cksum);
- return rc;
+ return GSS_S_COMPLETE;
}
static
}
static
+int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+ struct krb5_header *khdr,
+ char *confounder,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ int adj_nob)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int blocksize, i, rc, nob = 0;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+
+ blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ /* encrypt confounder */
+ buf_to_sg(&src, confounder, blocksize);
+ buf_to_sg(&dst, cipher->data, blocksize);
+
+ rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to encrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ /* encrypt clear pages */
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ src.page = desc->bd_iov[i].kiov_page;
+ src.offset = desc->bd_iov[i].kiov_offset;
+ src.length = (desc->bd_iov[i].kiov_len + blocksize - 1) &
+ (~(blocksize - 1));
+
+ if (adj_nob)
+ nob += src.length;
+
+ dst.page = desc->bd_enc_iov[i].kiov_page;
+ dst.offset = src.offset;
+ dst.length = src.length;
+
+ desc->bd_enc_iov[i].kiov_offset = dst.offset;
+ desc->bd_enc_iov[i].kiov_len = dst.length;
+
+ rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to encrypt page: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* encrypt krb5 header */
+ buf_to_sg(&src, khdr, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
+ &dst, &src, sizeof(*khdr));
+ if (rc) {
+ CERROR("error to encrypt krb5 header: %d\n", rc);
+ return rc;
+ }
+
+ if (adj_nob)
+ desc->bd_nob = nob;
+
+ return 0;
+}
+
+/*
+ * desc->bd_nob_transferred is the size of cipher text received.
+ * desc->bd_nob is the target size of plain text supposed to be.
+ */
+static
+int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+ struct krb5_header *khdr,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *cipher,
+ rawobj_t *plain)
+{
+ struct blkcipher_desc ciph_desc;
+ __u8 local_iv[16] = {0};
+ struct scatterlist src, dst;
+ int ct_nob = 0, pt_nob = 0;
+ int blocksize, i, rc;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+ LASSERT(desc->bd_nob_transferred);
+
+ blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ LASSERT(blocksize > 1);
+ LASSERT(cipher->len == blocksize + sizeof(*khdr));
+
+ ciph_desc.tfm = tfm;
+ ciph_desc.info = local_iv;
+ ciph_desc.flags = 0;
+
+ if (desc->bd_nob_transferred % blocksize) {
+ CERROR("odd transferred nob: %d\n", desc->bd_nob_transferred);
+ return -EPROTO;
+ }
+
+ /* decrypt head (confounder) */
+ buf_to_sg(&src, cipher->data, blocksize);
+ buf_to_sg(&dst, plain->data, blocksize);
+
+ rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ if (rc) {
+ CERROR("error to decrypt confounder: %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * decrypt clear pages. note the enc_iov is prepared by prep_bulk()
+ * which already done some sanity checkings.
+ *
+ * desc->bd_nob is the actual plain text size supposed to be
+ * transferred. desc->bd_nob_transferred is the actual cipher
+ * text received.
+ */
+ for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
+ i++) {
+ if (desc->bd_enc_iov[i].kiov_len == 0)
+ continue;
+
+ if (ct_nob + desc->bd_enc_iov[i].kiov_len >
+ desc->bd_nob_transferred)
+ desc->bd_enc_iov[i].kiov_len =
+ desc->bd_nob_transferred - ct_nob;
+
+ desc->bd_iov[i].kiov_len = desc->bd_enc_iov[i].kiov_len;
+ if (pt_nob + desc->bd_enc_iov[i].kiov_len > desc->bd_nob)
+ desc->bd_iov[i].kiov_len = desc->bd_nob - pt_nob;
+
+ src.page = desc->bd_enc_iov[i].kiov_page;
+ src.offset = desc->bd_enc_iov[i].kiov_offset;
+ src.length = desc->bd_enc_iov[i].kiov_len;
+
+ dst = src;
+
+ if (desc->bd_iov[i].kiov_offset % blocksize == 0)
+ dst.page = desc->bd_iov[i].kiov_page;
+
+ rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ src.length);
+ if (rc) {
+ CERROR("error to decrypt page: %d\n", rc);
+ return rc;
+ }
+
+ if (desc->bd_iov[i].kiov_offset % blocksize) {
+ memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) +
+ desc->bd_iov[i].kiov_offset,
+ cfs_page_address(desc->bd_enc_iov[i].kiov_page) +
+ desc->bd_iov[i].kiov_offset,
+ desc->bd_iov[i].kiov_len);
+ }
+
+ ct_nob += desc->bd_enc_iov[i].kiov_len;
+ pt_nob += desc->bd_iov[i].kiov_len;
+ }
+
+ /* decrypt tail (krb5 header) */
+ buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
+ buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
+
+ rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
+ &dst, &src, sizeof(*khdr));
+ if (rc) {
+ CERROR("error to decrypt tail: %d\n", rc);
+ return rc;
+ }
+
+ if (memcmp(cipher->data + blocksize, khdr, sizeof(*khdr))) {
+ CERROR("krb5 header doesn't match\n");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static
__u32 gss_wrap_kerberos(struct gss_ctx *gctx,
rawobj_t *gsshdr,
rawobj_t *msg,
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
int blocksize;
rawobj_t cksum = RAWOBJ_EMPTY;
- rawobj_t data_desc[4], cipher;
+ rawobj_t data_desc[3], cipher;
__u8 conf[GSS_MAX_CIPHER_BLOCK];
- int enc_rc = 0;
+ int rc = 0;
LASSERT(ke);
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
/* fill krb5 header */
LASSERT(token->len >= sizeof(*khdr));
khdr = (struct krb5_header *) token->data;
- acceptor_flag = kctx->kc_initiate ? 0 : FLAG_SENDER_IS_ACCEPTOR;
-
- khdr->kh_tok_id = cpu_to_be16(KG_TOK_WRAP_MSG);
- khdr->kh_flags = acceptor_flag | FLAG_WRAP_CONFIDENTIAL;
- khdr->kh_filler = 0xff;
- khdr->kh_ec = cpu_to_be16(0);
- khdr->kh_rrc = cpu_to_be16(0);
- spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- spin_unlock(&krb5_seq_lock);
+ fill_krb5_header(kctx, khdr, 1);
/* generate confounder */
get_random_bytes(conf, ke->ke_conf_size);
data_desc[1].len = gsshdr->len;
data_desc[2].data = msg->data;
data_desc[2].len = msg->len;
- data_desc[3].data = (__u8 *) khdr;
- data_desc[3].len = sizeof(*khdr);
/* compute checksum */
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
- khdr, 4, data_desc, &cksum))
+ khdr, 3, data_desc, 0, NULL, &cksum))
return GSS_S_FAILURE;
LASSERT(cksum.len >= ke->ke_hash_size);
struct ll_crypto_cipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
- NULL, 1, &cksum, &arc4_keye)) {
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, enc_rc = -EACCES);
+ GOTO(arc4_out, rc = -EACCES);
}
arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (arc4_tfm == NULL) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
- GOTO(arc4_out_key, enc_rc = -EACCES);
+ GOTO(arc4_out_key, rc = -EACCES);
}
if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
- GOTO(arc4_out_tfm, enc_rc = -EACCES);
+ GOTO(arc4_out_tfm, rc = -EACCES);
}
- enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
- 3, data_desc, &cipher, 1);
+ rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
+ 3, data_desc, &cipher, 1);
arc4_out_tfm:
ll_crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
arc4_out:
do {} while(0); /* just to avoid compile warning */
} else {
- enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
- 3, data_desc, &cipher, 1);
+ rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
+ 3, data_desc, &cipher, 1);
+ }
+
+ if (rc != 0) {
+ rawobj_free(&cksum);
+ return GSS_S_FAILURE;
+ }
+
+ /* fill in checksum */
+ LASSERT(token->len >= sizeof(*khdr) + cipher.len + ke->ke_hash_size);
+ memcpy((char *)(khdr + 1) + cipher.len,
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size);
+ rawobj_free(&cksum);
+
+ /* final token length */
+ token->len = sizeof(*khdr) + cipher.len + ke->ke_hash_size;
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ int blocksize, i;
+
+ LASSERT(desc->bd_iov_count);
+ LASSERT(desc->bd_enc_iov);
+ LASSERT(kctx->kc_keye.kb_tfm);
+
+ blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(desc->bd_enc_iov[i].kiov_page);
+ /*
+ * offset should always start at page boundary of either
+ * client or server side.
+ */
+ if (desc->bd_iov[i].kiov_offset & blocksize) {
+ CERROR("odd offset %d in page %d\n",
+ desc->bd_iov[i].kiov_offset, i);
+ return GSS_S_FAILURE;
+ }
+
+ desc->bd_enc_iov[i].kiov_offset = desc->bd_iov[i].kiov_offset;
+ desc->bd_enc_iov[i].kiov_len = (desc->bd_iov[i].kiov_len +
+ blocksize - 1) & (~(blocksize - 1));
+ }
+
+ return GSS_S_COMPLETE;
+}
+
+static
+__u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token, int adj_nob)
+{
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t data_desc[1], cipher;
+ __u8 conf[GSS_MAX_CIPHER_BLOCK];
+ int rc = 0;
+
+ LASSERT(ke);
+ LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
+
+ /*
+ * final token format:
+ * --------------------------------------------------
+ * | krb5 header | head/tail cipher text | checksum |
+ * --------------------------------------------------
+ */
+
+ /* fill krb5 header */
+ LASSERT(token->len >= sizeof(*khdr));
+ khdr = (struct krb5_header *) token->data;
+ fill_krb5_header(kctx, khdr, 1);
+
+ /* generate confounder */
+ get_random_bytes(conf, ke->ke_conf_size);
+
+ /* get encryption blocksize. note kc_keye might not associated with
+ * a tfm, currently only for arcfour-hmac */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+
+ /*
+ * we assume the size of krb5_header (16 bytes) must be n * blocksize.
+ * the bulk token size would be exactly (sizeof(krb5_header) +
+ * blocksize + sizeof(krb5_header) + hashsize)
+ */
+ LASSERT(blocksize <= ke->ke_conf_size);
+ LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+ LASSERT(token->len >= sizeof(*khdr) + blocksize + sizeof(*khdr) + 16);
+
+ /*
+ * clear text layout for checksum:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+
+ /* compute checksum */
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 1, data_desc,
+ desc->bd_iov_count, desc->bd_iov,
+ &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ /*
+ * clear text layout for encryption:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ * | | |
+ * ---------- (cipher pages) |
+ * result token: | |
+ * -------------------------------------------
+ * | krb5 header | cipher text | cipher text |
+ * -------------------------------------------
+ */
+ data_desc[0].data = conf;
+ data_desc[0].len = ke->ke_conf_size;
+
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = blocksize + sizeof(*khdr);
+
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LBUG();
+ rc = 0;
+ } else {
+ rc = krb5_encrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+ conf, desc, &cipher, adj_nob);
}
- if (enc_rc != 0) {
+ if (rc != 0) {
rawobj_free(&cksum);
return GSS_S_FAILURE;
}
struct krb5_ctx *kctx = gctx->internal_ctx_id;
struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
struct krb5_header *khdr;
- unsigned char acceptor_flag;
unsigned char *tmpbuf;
int blocksize, bodysize;
rawobj_t cksum = RAWOBJ_EMPTY;
rawobj_t cipher_in, plain_out;
rawobj_t hash_objs[3];
- __u32 rc = GSS_S_FAILURE, enc_rc = 0;
+ int rc = 0;
+ __u32 major;
LASSERT(ke);
- acceptor_flag = kctx->kc_initiate ? FLAG_SENDER_IS_ACCEPTOR : 0;
-
if (token->len < sizeof(*khdr)) {
CERROR("short signature: %u\n", token->len);
return GSS_S_DEFECTIVE_TOKEN;
khdr = (struct krb5_header *) token->data;
- /* sanity check header */
- if (be16_to_cpu(khdr->kh_tok_id) != KG_TOK_WRAP_MSG) {
- CERROR("bad token id\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if ((khdr->kh_flags & FLAG_SENDER_IS_ACCEPTOR) != acceptor_flag) {
- CERROR("bad direction flag\n");
- return GSS_S_BAD_SIG;
- }
- if ((khdr->kh_flags & FLAG_WRAP_CONFIDENTIAL) == 0) {
- CERROR("missing confidential flag\n");
- return GSS_S_BAD_SIG;
- }
- if (khdr->kh_filler != 0xff) {
- CERROR("bad filler\n");
- return GSS_S_DEFECTIVE_TOKEN;
- }
- if (be16_to_cpu(khdr->kh_ec) != 0x0 ||
- be16_to_cpu(khdr->kh_rrc) != 0x0) {
- CERROR("bad EC or RRC\n");
- return GSS_S_DEFECTIVE_TOKEN;
+ major = verify_krb5_header(kctx, khdr, 1);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
}
/* block size */
if (!tmpbuf)
return GSS_S_FAILURE;
+ major = GSS_S_FAILURE;
+
cipher_in.data = (__u8 *) (khdr + 1);
cipher_in.len = bodysize;
plain_out.data = tmpbuf;
cksum.len = ke->ke_hash_size;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
- NULL, 1, &cksum, &arc4_keye)) {
+ NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
CERROR("failed to obtain arc4 enc key\n");
- GOTO(arc4_out, enc_rc = -EACCES);
+ GOTO(arc4_out, rc = -EACCES);
}
arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (arc4_tfm == NULL) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
- GOTO(arc4_out_key, enc_rc = -EACCES);
+ GOTO(arc4_out_key, rc = -EACCES);
}
if (ll_crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
- GOTO(arc4_out_tfm, enc_rc = -EACCES);
+ GOTO(arc4_out_tfm, rc = -EACCES);
}
- enc_rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
- 1, &cipher_in, &plain_out, 0);
+ rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
+ 1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
ll_crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
arc4_out:
cksum = RAWOBJ_EMPTY;
} else {
- enc_rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
- 1, &cipher_in, &plain_out, 0);
+ rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
+ 1, &cipher_in, &plain_out, 0);
}
- if (enc_rc != 0) {
+ if (rc != 0) {
CERROR("error decrypt\n");
goto out_free;
}
hash_objs[0].data = plain_out.data;
hash_objs[1].len = gsshdr->len;
hash_objs[1].data = gsshdr->data;
- hash_objs[2].len = plain_out.len - ke->ke_conf_size;
+ hash_objs[2].len = plain_out.len - ke->ke_conf_size - sizeof(*khdr);
hash_objs[2].data = plain_out.data + ke->ke_conf_size;
if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
- khdr, 3, hash_objs, &cksum))
+ khdr, 3, hash_objs, 0, NULL, &cksum))
goto out_free;
LASSERT(cksum.len >= ke->ke_hash_size);
if (memcmp((char *)(khdr + 1) + bodysize,
cksum.data + cksum.len - ke->ke_hash_size,
ke->ke_hash_size)) {
- CERROR("cksum mismatch\n");
+ CERROR("checksum mismatch\n");
goto out_free;
}
msg->len = bodysize - ke->ke_conf_size - sizeof(*khdr);
memcpy(msg->data, tmpbuf + ke->ke_conf_size, msg->len);
- rc = GSS_S_COMPLETE;
+ major = GSS_S_COMPLETE;
out_free:
OBD_FREE(tmpbuf, bodysize);
rawobj_free(&cksum);
- return rc;
+ return major;
}
static
-__u32 gss_plain_encrypt_kerberos(struct gss_ctx *ctx,
- int decrypt,
- int length,
- void *in_buf,
- void *out_buf)
+__u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token)
{
- struct krb5_ctx *kctx = ctx->internal_ctx_id;
- __u32 rc;
+ struct krb5_ctx *kctx = gctx->internal_ctx_id;
+ struct krb5_enctype *ke = &enctypes[kctx->kc_enctype];
+ struct krb5_header *khdr;
+ int blocksize;
+ rawobj_t cksum = RAWOBJ_EMPTY;
+ rawobj_t cipher, plain;
+ rawobj_t data_desc[1];
+ int rc;
+ __u32 major;
+
+ LASSERT(ke);
+
+ if (token->len < sizeof(*khdr)) {
+ CERROR("short signature: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ khdr = (struct krb5_header *) token->data;
+
+ major = verify_krb5_header(kctx, khdr, 1);
+ if (major != GSS_S_COMPLETE) {
+ CERROR("bad krb5 header\n");
+ return major;
+ }
+
+ /* block size */
+ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
+ LASSERT(kctx->kc_keye.kb_tfm == NULL);
+ blocksize = 1;
+ LBUG();
+ } else {
+ LASSERT(kctx->kc_keye.kb_tfm);
+ blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ }
+ LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
+
+ /*
+ * token format is expected as:
+ * -----------------------------------------------
+ * | krb5 header | head/tail cipher text | cksum |
+ * -----------------------------------------------
+ */
+ if (token->len < sizeof(*khdr) + blocksize + sizeof(*khdr) +
+ ke->ke_hash_size) {
+ CERROR("short token size: %u\n", token->len);
+ return GSS_S_DEFECTIVE_TOKEN;
+ }
+
+ cipher.data = (__u8 *) (khdr + 1);
+ cipher.len = blocksize + sizeof(*khdr);
+ plain.data = cipher.data;
+ plain.len = cipher.len;
- rc = krb5_encrypt(kctx->kc_keye.kb_tfm, decrypt,
- NULL, in_buf, out_buf, length);
+ rc = krb5_decrypt_bulk(kctx->kc_keye.kb_tfm, khdr,
+ desc, &cipher, &plain);
if (rc)
- CERROR("plain encrypt error: %d\n", rc);
+ return GSS_S_DEFECTIVE_TOKEN;
+
+ /*
+ * verify checksum, compose clear text as layout:
+ * ------------------------------------------
+ * | confounder | clear pages | krb5 header |
+ * ------------------------------------------
+ */
+ data_desc[0].data = plain.data;
+ data_desc[0].len = blocksize;
+
+ if (krb5_make_checksum(kctx->kc_enctype, &kctx->kc_keyi,
+ khdr, 1, data_desc,
+ desc->bd_iov_count, desc->bd_iov,
+ &cksum))
+ return GSS_S_FAILURE;
+ LASSERT(cksum.len >= ke->ke_hash_size);
+
+ if (memcmp(plain.data + blocksize + sizeof(*khdr),
+ cksum.data + cksum.len - ke->ke_hash_size,
+ ke->ke_hash_size)) {
+ CERROR("checksum mismatch\n");
+ rawobj_free(&cksum);
+ return GSS_S_BAD_SIG;
+ }
- return rc;
+ rawobj_free(&cksum);
+ return GSS_S_COMPLETE;
}
int gss_display_kerberos(struct gss_ctx *ctx,
.gss_verify_mic = gss_verify_mic_kerberos,
.gss_wrap = gss_wrap_kerberos,
.gss_unwrap = gss_unwrap_kerberos,
- .gss_plain_encrypt = gss_plain_encrypt_kerberos,
+ .gss_prep_bulk = gss_prep_bulk_kerberos,
+ .gss_wrap_bulk = gss_wrap_bulk_kerberos,
+ .gss_unwrap_bulk = gss_unwrap_bulk_kerberos,
.gss_delete_sec_context = gss_delete_sec_context_kerberos,
.gss_display = gss_display_kerberos,
};
__u32 lgss_get_mic(struct gss_ctx *context_handle,
int msgcnt,
rawobj_t *msg,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *mic_token)
{
LASSERT(context_handle);
->gss_get_mic(context_handle,
msgcnt,
msg,
+ iovcnt,
+ iovs,
mic_token);
}
__u32 lgss_verify_mic(struct gss_ctx *context_handle,
int msgcnt,
rawobj_t *msg,
+ int iovcnt,
+ lnet_kiov_t *iovs,
rawobj_t *mic_token)
{
LASSERT(context_handle);
->gss_verify_mic(context_handle,
msgcnt,
msg,
+ iovcnt,
+ iovs,
mic_token);
}
}
-__u32 lgss_plain_encrypt(struct gss_ctx *ctx,
- int decrypt,
- int length,
- void *in_buf,
- void *out_buf)
+__u32 lgss_prep_bulk(struct gss_ctx *context_handle,
+ struct ptlrpc_bulk_desc *desc)
{
- LASSERT(ctx);
- LASSERT(ctx->mech_type);
- LASSERT(ctx->mech_type->gm_ops);
- LASSERT(ctx->mech_type->gm_ops->gss_plain_encrypt);
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_prep_bulk);
- return ctx->mech_type->gm_ops
- ->gss_plain_encrypt(ctx, decrypt, length, in_buf, out_buf);
+ return context_handle->mech_type->gm_ops
+ ->gss_prep_bulk(context_handle, desc);
+}
+
+__u32 lgss_wrap_bulk(struct gss_ctx *context_handle,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token,
+ int adj_nob)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_wrap_bulk);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_wrap_bulk(context_handle, desc, token, adj_nob);
+}
+
+__u32 lgss_unwrap_bulk(struct gss_ctx *context_handle,
+ struct ptlrpc_bulk_desc *desc,
+ rawobj_t *token)
+{
+ LASSERT(context_handle);
+ LASSERT(context_handle->mech_type);
+ LASSERT(context_handle->mech_type->gm_ops);
+ LASSERT(context_handle->mech_type->gm_ops->gss_unwrap_bulk);
+
+ return context_handle->mech_type->gm_ops
+ ->gss_unwrap_bulk(context_handle, desc, token);
}
/* gss_delete_sec_context: free all resources associated with context_handle.
rawobj_t *handle)
{
struct gss_header *ghdr;
- rawobj_t text[3], mic;
+ rawobj_t text[4], mic;
int textcnt, max_textcnt, mic_idx;
__u32 major;
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_get_mic(mechctx, textcnt, text, &mic);
+ major = lgss_get_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE) {
CERROR("fail to generate MIC: %08x\n", major);
return -EPERM;
struct gss_ctx *mechctx,
__u32 svc)
{
- rawobj_t text[3], mic;
+ rawobj_t text[4], mic;
int textcnt, max_textcnt;
int mic_idx;
__u32 major;
mic.len = msg->lm_buflens[mic_idx];
mic.data = lustre_msg_buf(msg, mic_idx, 0);
- major = lgss_verify_mic(mechctx, textcnt, text, &mic);
+ major = lgss_verify_mic(mechctx, textcnt, text, 0, NULL, &mic);
if (major != GSS_S_COMPLETE)
CERROR("mic verify error: %08x\n", major);
return gss_mech_payload(NULL, msgsize, privacy);
}
+static int gss_cli_bulk_payload(struct ptlrpc_cli_ctx *ctx,
+ struct sptlrpc_flavor *flvr,
+ int reply, int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ LASSERT(SPTLRPC_FLVR_BULK_TYPE(flvr->sf_rpc) == SPTLRPC_BULK_DEFAULT);
+
+ if ((!reply && !read) || (reply && read)) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_cli_payload(ctx, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_cli_payload(ctx, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
+}
+
int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
{
return (ctx->cc_vcred.vc_uid == vcred->vc_uid);
if (req->rq_ctx_init)
RETURN(0);
- svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
if (req->rq_pack_bulk)
flags |= LUSTRE_GSS_PACK_BULK;
if (req->rq_pack_udesc)
gss_header_swabber(ghdr);
major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
- if (major != GSS_S_COMPLETE)
+ if (major != GSS_S_COMPLETE) {
+ CERROR("failed to verify reply: %x\n", major);
RETURN(-EPERM);
+ }
if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
__u32 cksum;
major = gss_unseal_msg(gctx->gc_mechctx, msg,
&msglen, req->rq_repdata_len);
if (major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap reply: %x\n", major);
rc = -EPERM;
break;
}
}
/* bulk checksum is the last segment */
- if (bulk_sec_desc_unpack(msg, msg->lm_bufcount-1))
+ if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1))
RETURN(-EPROTO);
}
struct ptlrpc_sec *sec;
LASSERT(imp);
- LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_GSS);
- gsec->gs_mech = lgss_subflavor_to_mech(RPC_FLVR_SUB(sf->sf_rpc));
+ gsec->gs_mech = lgss_subflavor_to_mech(
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
if (!gsec->gs_mech) {
CERROR("gss backend 0x%x not found\n",
- RPC_FLVR_SUB(sf->sf_rpc));
+ SPTLRPC_FLVR_BASE_SUB(sf->sf_rpc));
return -EOPNOTSUPP;
}
sec->ps_gc_interval = 0;
}
- if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_add_user();
CDEBUG(D_SEC, "create %s%s@%p\n", (svcctx ? "reverse " : ""),
class_import_put(sec->ps_import);
- if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
+ if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_del_user();
EXIT;
}
if (req->rq_pack_bulk) {
- buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 0, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
if (req->rq_pack_udesc)
ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
if (req->rq_pack_bulk)
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ ibuflens[ibufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr, 0,
+ req->rq_bulk_read);
clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
/* to allow append padding during encryption */
struct ptlrpc_request *req,
int msgsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
LASSERT(!req->rq_pack_bulk ||
(req->rq_bulk_read || req->rq_bulk_write));
ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
- privacy = RPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
+ privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
if (!req->rq_clrbuf)
goto release_reqbuf;
txtsize += buflens[1];
if (req->rq_pack_bulk) {
- buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[bufcnt] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
buflens[0] = msgsize;
if (req->rq_pack_bulk)
- buflens[bufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[bufcnt++] = gss_cli_bulk_payload(req->rq_cli_ctx,
+ &req->rq_flvr,
+ 1, req->rq_bulk_read);
txtsize = lustre_msg_size_v2(bufcnt, buflens);
txtsize += GSS_MAX_CIPHER_BLOCK;
struct ptlrpc_request *req,
int msgsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
ENTRY;
LASSERT(!req->rq_pack_bulk ||
struct ptlrpc_request *req,
int segment, int newsize)
{
- int svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
LASSERT(!req->rq_ctx_init && !req->rq_ctx_fini);
}
*major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to verify request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gctx->gsc_reverse == 0 &&
gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
offset++;
}
- /* check bulk cksum data */
+ /* check bulk_sec_desc data */
if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
if (msg->lm_bufcount < (offset + 1)) {
- CERROR("no bulk checksum included\n");
+ CERROR("missing bulk sec descriptor\n");
RETURN(-EINVAL);
}
*major = gss_unseal_msg(gctx->gsc_mechctx, msg,
&msglen, req->rq_reqdata_len);
- if (*major != GSS_S_COMPLETE)
+ if (*major != GSS_S_COMPLETE) {
+ CERROR("failed to unwrap request: %x\n", *major);
RETURN(-EACCES);
+ }
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
return gss_mech_payload(NULL, msgsize, privacy);
}
+static int gss_svc_bulk_payload(struct gss_svc_ctx *gctx,
+ struct sptlrpc_flavor *flvr,
+ int read)
+{
+ int payload = sizeof(struct ptlrpc_bulk_sec_desc);
+
+ if (read) {
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_NULL:
+ break;
+ case SPTLRPC_BULK_SVC_INTG:
+ payload += gss_mech_payload(NULL, 0, 0);
+ break;
+ case SPTLRPC_BULK_SVC_PRIV:
+ payload += gss_mech_payload(NULL, 0, 1);
+ break;
+ case SPTLRPC_BULK_SVC_AUTH:
+ default:
+ LBUG();
+ }
+ }
+
+ return payload;
+}
+
int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
{
struct gss_svc_reqctx *grctx;
RETURN(-EPROTO);
}
- svc = RPC_FLVR_SVC(req->rq_flvr.sf_rpc);
+ svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
early = (req->rq_packed_final == 0);
grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
LASSERT(grctx->src_reqbsd);
bsd_off = ibufcnt;
- ibuflens[ibufcnt++] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_hash_alg,
- 0, req->rq_bulk_read);
+ ibuflens[ibufcnt++] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
}
txtsize = lustre_msg_size_v2(ibufcnt, ibuflens);
LASSERT(grctx->src_reqbsd);
bsd_off = bufcnt;
- buflens[bufcnt] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_hash_alg,
- 0, req->rq_bulk_read);
+ buflens[bufcnt] = gss_svc_bulk_payload(
+ grctx->src_ctx,
+ &req->rq_flvr,
+ req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
bufcnt++;
lustre_msghdr_set_flags(request->rq_reqmsg,
request->rq_import->imp_msghdr_flags);
+ if (request->rq_resend)
+ lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
+
rc = sptlrpc_cli_wrap_request(request);
if (rc)
RETURN(rc);
RETURN(rc);
}
- if (request->rq_resend)
- lustre_msg_add_flags(request->rq_reqmsg, MSG_RESENT);
-
if (!noreply) {
LASSERT (request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
LASSERT (!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
md->options |= LNET_MD_KIOV;
- md->start = &desc->bd_iov[0];
md->length = desc->bd_iov_count;
+ if (desc->bd_enc_iov)
+ md->start = desc->bd_enc_iov;
+ else
+ md->start = desc->bd_iov;
}
void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page,
EXPORT_SYMBOL(sptlrpc_unregister_policy);
static
-struct ptlrpc_sec_policy * sptlrpc_rpcflavor2policy(__u16 flavor)
+struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
{
static DECLARE_MUTEX(load_mutex);
static atomic_t loaded = ATOMIC_INIT(0);
struct ptlrpc_sec_policy *policy;
- __u16 number = RPC_FLVR_POLICY(flavor), flag = 0;
+ __u16 number = SPTLRPC_FLVR_POLICY(flavor);
+ __u16 flag = 0;
if (number >= SPTLRPC_POLICY_MAX)
return NULL;
return policy;
}
-__u16 sptlrpc_name2rpcflavor(const char *name)
+__u32 sptlrpc_name2flavor_base(const char *name)
{
if (!strcmp(name, "null"))
return SPTLRPC_FLVR_NULL;
return SPTLRPC_FLVR_INVALID;
}
-EXPORT_SYMBOL(sptlrpc_name2rpcflavor);
+EXPORT_SYMBOL(sptlrpc_name2flavor_base);
-const char *sptlrpc_rpcflavor2name(__u16 flavor)
+const char *sptlrpc_flavor2name_base(__u32 flvr)
{
- switch (flavor) {
- case SPTLRPC_FLVR_NULL:
+ __u32 base = SPTLRPC_FLVR_BASE(flvr);
+
+ if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
return "null";
- case SPTLRPC_FLVR_PLAIN:
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
return "plain";
- case SPTLRPC_FLVR_KRB5N:
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
return "krb5n";
- case SPTLRPC_FLVR_KRB5A:
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
return "krb5a";
- case SPTLRPC_FLVR_KRB5I:
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
return "krb5i";
- case SPTLRPC_FLVR_KRB5P:
+ else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
return "krb5p";
- default:
- CERROR("invalid rpc flavor 0x%x(p%u,s%u,v%u)\n", flavor,
- RPC_FLVR_POLICY(flavor), RPC_FLVR_MECH(flavor),
- RPC_FLVR_SVC(flavor));
- }
- return "unknown";
+
+ CERROR("invalid wire flavor 0x%x\n", flvr);
+ return "invalid";
}
-EXPORT_SYMBOL(sptlrpc_rpcflavor2name);
+EXPORT_SYMBOL(sptlrpc_flavor2name_base);
-int sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
+char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
+ char *buf, int bufsize)
{
- char *bulk;
-
- if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL)
- bulk = "bulkp";
- else if (sf->sf_bulk_hash != BULK_HASH_ALG_NULL)
- bulk = "bulki";
+ if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
+ snprintf(buf, bufsize, "hash:%s",
+ sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
else
- bulk = "bulkn";
+ snprintf(buf, bufsize, "%s",
+ sptlrpc_flavor2name_base(sf->sf_rpc));
- snprintf(buf, bufsize, "%s-%s:%s/%s",
- sptlrpc_rpcflavor2name(sf->sf_rpc), bulk,
- sptlrpc_get_hash_name(sf->sf_bulk_hash),
- sptlrpc_get_ciph_name(sf->sf_bulk_ciph));
- return 0;
+ buf[bufsize - 1] = '\0';
+ return buf;
+}
+EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
+
+char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
+{
+ snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
+
+ /*
+ * currently we don't support customized bulk specification for
+ * flavors other than plain
+ */
+ if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
+ char bspec[16];
+
+ bspec[0] = '-';
+ sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
+ strncat(buf, bspec, bufsize);
+ }
+
+ buf[bufsize - 1] = '\0';
+ return buf;
}
EXPORT_SYMBOL(sptlrpc_flavor2name);
+char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
+{
+ buf[0] = '\0';
+
+ if (flags & PTLRPC_SEC_FL_REVERSE)
+ strncat(buf, "reverse,", bufsize);
+ if (flags & PTLRPC_SEC_FL_ROOTONLY)
+ strncat(buf, "rootonly,", bufsize);
+ if (flags & PTLRPC_SEC_FL_UDESC)
+ strncat(buf, "udesc,", bufsize);
+ if (flags & PTLRPC_SEC_FL_BULK)
+ strncat(buf, "bulk,", bufsize);
+ if (buf[0] == '\0')
+ strncat(buf, "-,", bufsize);
+
+ buf[bufsize - 1] = '\0';
+ return buf;
+}
+EXPORT_SYMBOL(sptlrpc_secflags2str);
+
/**************************************************
* client context APIs *
**************************************************/
/* special security flags accoding to opcode */
switch (opcode) {
case OST_READ:
+ case MDS_READPAGE:
req->rq_bulk_read = 1;
break;
case OST_WRITE:
+ case MDS_WRITEPAGE:
req->rq_bulk_write = 1;
break;
case SEC_CTX_INIT:
/* force SVC_NULL for context initiation rpc, SVC_INTG for context
* destruction rpc */
if (unlikely(req->rq_ctx_init))
- rpc_flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
+ flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
else if (unlikely(req->rq_ctx_fini))
- rpc_flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
+ flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
/* user descriptor flag, null security can't do it anyway */
if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
/* bulk security flag */
if ((req->rq_bulk_read || req->rq_bulk_write) &&
- (req->rq_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL ||
- req->rq_flvr.sf_bulk_hash != BULK_HASH_ALG_NULL))
+ sptlrpc_flavor_has_bulk(&req->rq_flvr))
req->rq_pack_bulk = 1;
}
void sptlrpc_request_out_callback(struct ptlrpc_request *req)
{
- if (RPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
+ if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
return;
LASSERT(req->rq_clrbuf);
RETURN(rc);
}
- switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
case SPTLRPC_SVC_INTG:
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
int rc;
- __u16 rpc_flvr;
+ __u32 flvr;
ENTRY;
LASSERT(ctx);
}
/* v2 message, check request/reply policy match */
- rpc_flvr = WIRE_FLVR_RPC(req->rq_repdata->lm_secflvr);
+ flvr = WIRE_FLVR(req->rq_repdata->lm_secflvr);
if (req->rq_repdata->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED)
- __swab16s(&rpc_flvr);
+ __swab32s(&flvr);
- if (RPC_FLVR_POLICY(rpc_flvr) !=
- RPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
+ if (SPTLRPC_FLVR_POLICY(flvr) !=
+ SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
CERROR("request policy was %u while reply with %u\n",
- RPC_FLVR_POLICY(req->rq_flvr.sf_rpc),
- RPC_FLVR_POLICY(rpc_flvr));
+ SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc),
+ SPTLRPC_FLVR_POLICY(flvr));
RETURN(-EPROTO);
}
/* do nothing if it's null policy; otherwise unpack the
* wrapper message */
- if (RPC_FLVR_POLICY(rpc_flvr) != SPTLRPC_POLICY_NULL &&
+ if (SPTLRPC_FLVR_POLICY(flvr) != SPTLRPC_POLICY_NULL &&
lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len))
RETURN(-EPROTO);
- switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
+ switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
case SPTLRPC_SVC_INTG:
EXPORT_SYMBOL(sptlrpc_sec_put);
/*
- * it's policy module responsible for taking refrence of import
+ * policy module is responsible for taking refrence of import
*/
static
struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
{
struct ptlrpc_sec_policy *policy;
struct ptlrpc_sec *sec;
+ char str[32];
ENTRY;
if (svc_ctx) {
CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
imp->imp_obd->obd_type->typ_name,
imp->imp_obd->obd_name,
- sptlrpc_rpcflavor2name(sf->sf_rpc));
+ sptlrpc_flavor2name(sf, str, sizeof(str)));
policy = sptlrpc_policy_get(svc_ctx->sc_policy);
sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
imp->imp_obd->obd_type->typ_name,
imp->imp_obd->obd_name,
- sptlrpc_rpcflavor2name(sf->sf_rpc));
+ sptlrpc_flavor2name(sf, str, sizeof(str)));
- policy = sptlrpc_rpcflavor2policy(sf->sf_rpc);
+ policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
if (!policy) {
CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
RETURN(NULL);
}
}
+static inline
+int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
+{
+ return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
+}
+
+static inline
+void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
+{
+ *dst = *src;
+}
+
static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
struct ptlrpc_sec *sec,
struct sptlrpc_flavor *sf)
{
- if (sf->sf_bulk_ciph != sec->ps_flvr.sf_bulk_ciph ||
- sf->sf_bulk_hash != sec->ps_flvr.sf_bulk_hash) {
- CWARN("imp %p (%s->%s): changing bulk flavor %s/%s -> %s/%s\n",
- imp, imp->imp_obd->obd_name,
- obd_uuid2str(&imp->imp_connection->c_remote_uuid),
- sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
- sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
- sptlrpc_get_ciph_name(sf->sf_bulk_ciph),
- sptlrpc_get_hash_name(sf->sf_bulk_hash));
-
- spin_lock(&sec->ps_lock);
- sec->ps_flvr.sf_bulk_ciph = sf->sf_bulk_ciph;
- sec->ps_flvr.sf_bulk_hash = sf->sf_bulk_hash;
- spin_unlock(&sec->ps_lock);
- }
+ char str1[32], str2[32];
- if (!equi(sf->sf_flags & PTLRPC_SEC_FL_UDESC,
- sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC)) {
- CWARN("imp %p (%s->%s): %s shipping user descriptor\n",
- imp, imp->imp_obd->obd_name,
- obd_uuid2str(&imp->imp_connection->c_remote_uuid),
- (sf->sf_flags & PTLRPC_SEC_FL_UDESC) ? "start" : "stop");
+ if (sec->ps_flvr.sf_flags != sf->sf_flags)
+ CWARN("changing sec flags: %s -> %s\n",
+ sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
+ str1, sizeof(str1)),
+ sptlrpc_secflags2str(sf->sf_flags,
+ str2, sizeof(str2)));
- spin_lock(&sec->ps_lock);
- sec->ps_flvr.sf_flags &= ~PTLRPC_SEC_FL_UDESC;
- sec->ps_flvr.sf_flags |= sf->sf_flags & PTLRPC_SEC_FL_UDESC;
- spin_unlock(&sec->ps_lock);
- }
+ spin_lock(&sec->ps_lock);
+ flavor_copy(&sec->ps_flvr, sf);
+ spin_unlock(&sec->ps_lock);
}
/*
- * for normal import, @svc_ctx should be NULL and @rpc_flavor is ignored;
- * for reverse import, @svc_ctx and @rpc_flavor is from incoming request.
+ * for normal import, @svc_ctx should be NULL and @flvr is ignored;
+ * for reverse import, @svc_ctx and @flvr is from incoming request.
*/
int sptlrpc_import_sec_adapt(struct obd_import *imp,
struct ptlrpc_svc_ctx *svc_ctx,
- __u16 rpc_flavor)
+ struct sptlrpc_flavor *flvr)
{
struct ptlrpc_connection *conn;
struct sptlrpc_flavor sf;
struct ptlrpc_sec *sec, *newsec;
enum lustre_sec_part sp;
+ char str[24];
int rc;
might_sleep();
sp = imp->imp_obd->u.cli.cl_sp_me;
} else {
/* reverse import, determine flavor from incoming reqeust */
- sf.sf_rpc = rpc_flavor;
- sf.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
- sf.sf_bulk_hash = BULK_HASH_ALG_NULL;
- sf.sf_flags = PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
+ sf = *flvr;
+
+ if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
+ sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
+ PTLRPC_SEC_FL_ROOTONLY;
sp = sptlrpc_target_sec_part(imp->imp_obd);
}
sec = sptlrpc_import_sec_ref(imp);
if (sec) {
- if (svc_ctx == NULL) {
- /* normal import, only check rpc flavor, if just bulk
- * flavor or flags changed, we can handle it on the fly
- * without switching sec. */
- if (sf.sf_rpc == sec->ps_flvr.sf_rpc) {
- sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
-
- rc = 0;
- goto out;
- }
- } else {
- /* reverse import, do not compare bulk flavor */
- if (sf.sf_rpc == sec->ps_flvr.sf_rpc) {
- rc = 0;
- goto out;
- }
- }
+ char str2[24];
+
+ if (flavor_equal(&sf, &sec->ps_flvr))
+ goto out;
CWARN("%simport %p (%s%s%s): changing flavor "
- "(%s, %s/%s) -> (%s, %s/%s)\n",
- svc_ctx ? "reverse " : "",
+ "%s -> %s\n", svc_ctx ? "reverse " : "",
imp, imp->imp_obd->obd_name,
svc_ctx == NULL ? "->" : "<-",
obd_uuid2str(&conn->c_remote_uuid),
- sptlrpc_rpcflavor2name(sec->ps_flvr.sf_rpc),
- sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
- sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
- sptlrpc_rpcflavor2name(sf.sf_rpc),
- sptlrpc_get_hash_name(sf.sf_bulk_hash),
- sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
+ sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
+ sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
+
+ if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
+ SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
+ SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
+ SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
+ sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
+ goto out;
+ }
} else {
- CWARN("%simport %p (%s%s%s) netid %x: "
- "select initial flavor (%s, %s/%s)\n",
+ CWARN("%simport %p (%s%s%s) netid %x: select flavor %s\n",
svc_ctx == NULL ? "" : "reverse ",
imp, imp->imp_obd->obd_name,
svc_ctx == NULL ? "->" : "<-",
obd_uuid2str(&conn->c_remote_uuid),
LNET_NIDNET(conn->c_self),
- sptlrpc_rpcflavor2name(sf.sf_rpc),
- sptlrpc_get_hash_name(sf.sf_bulk_hash),
- sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
+ sptlrpc_flavor2name(&sf, str, sizeof(str)));
}
mutex_down(&imp->imp_sec_mutex);
return 1;
if ((req->rq_ctx_init || req->rq_ctx_fini) &&
- RPC_FLVR_POLICY(exp->sf_rpc) == RPC_FLVR_POLICY(flvr->sf_rpc) &&
- RPC_FLVR_MECH(exp->sf_rpc) == RPC_FLVR_MECH(flvr->sf_rpc))
+ SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
+ SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
+ SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
return 1;
return 0;
spin_unlock(&exp->exp_lock);
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
- req->rq_svc_ctx, flavor.sf_rpc);
+ req->rq_svc_ctx, &flavor);
}
/* if it equals to the current flavor, we accept it, but need to
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
req->rq_svc_ctx,
- flavor.sf_rpc);
+ &flavor);
} else {
CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
"install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
exp->exp_connection->c_peer.nid,
&new_flvr);
if (exp->exp_flvr_changed ||
- memcmp(&new_flvr, &exp->exp_flvr, sizeof(new_flvr))) {
+ !flavor_equal(&new_flvr, &exp->exp_flvr)) {
exp->exp_flvr_old[1] = new_flvr;
exp->exp_flvr_expire[1] = 0;
exp->exp_flvr_changed = 1;
int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
{
struct ptlrpc_sec_policy *policy;
- struct lustre_msg *msg = req->rq_reqbuf;
- int rc;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ int rc;
ENTRY;
LASSERT(msg);
LASSERT(req->rq_reqmsg == NULL);
LASSERT(req->rq_repmsg == NULL);
+ LASSERT(req->rq_svc_ctx == NULL);
req->rq_sp_from = LUSTRE_SP_ANY;
req->rq_auth_uid = INVALID_UID;
}
/*
- * v2 message.
+ * only expect v2 message.
*/
- if (msg->lm_magic == LUSTRE_MSG_MAGIC_V2)
- req->rq_flvr.sf_rpc = WIRE_FLVR_RPC(msg->lm_secflvr);
- else
- req->rq_flvr.sf_rpc = WIRE_FLVR_RPC(__swab32(msg->lm_secflvr));
+ switch (msg->lm_magic) {
+ case LUSTRE_MSG_MAGIC_V2:
+ req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
+ break;
+ case LUSTRE_MSG_MAGIC_V2_SWABBED:
+ req->rq_flvr.sf_rpc = WIRE_FLVR(__swab32(msg->lm_secflvr));
+ break;
+ default:
+ CERROR("invalid magic %x\n", msg->lm_magic);
+ RETURN(SECSVC_DROP);
+ }
/* unpack the wrapper message if the policy is not null */
- if ((RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) &&
- lustre_unpack_msg(msg, req->rq_reqdata_len))
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
+ lustre_unpack_msg(msg, req->rq_reqdata_len)) {
+ CERROR("invalid wrapper msg format\n");
RETURN(SECSVC_DROP);
+ }
- policy = sptlrpc_rpcflavor2policy(req->rq_flvr.sf_rpc);
+ policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
if (!policy) {
CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
RETURN(SECSVC_DROP);
rc = policy->sp_sops->accept(req);
LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
+ LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
sptlrpc_policy_put(policy);
/* sanity check for the request source */
rc = sptlrpc_svc_check_from(req, rc);
-
- /* FIXME move to proper place */
- if (rc == SECSVC_OK) {
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
-
- if (opc == OST_WRITE)
- req->rq_bulk_write = 1;
- else if (opc == OST_READ)
- req->rq_bulk_read = 1;
- }
-
- LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
RETURN(rc);
}
{
struct ptlrpc_cli_ctx *ctx;
+ LASSERT(req->rq_bulk_read || req->rq_bulk_write);
+
if (!req->rq_pack_bulk)
return 0;
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
ctx = req->rq_cli_ctx;
if (ctx->cc_ops->wrap_bulk)
return ctx->cc_ops->wrap_bulk(ctx, req, desc);
}
EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
-static
-void pga_to_bulk_desc(int nob, obd_count pg_count, struct brw_page **pga,
- struct ptlrpc_bulk_desc *desc)
-{
- int i;
-
- LASSERT(pga);
- LASSERT(*pga);
-
- for (i = 0; i < pg_count && nob > 0; i++) {
-#ifdef __KERNEL__
- desc->bd_iov[i].kiov_page = pga[i]->pg;
- desc->bd_iov[i].kiov_len = pga[i]->count > nob ?
- nob : pga[i]->count;
- desc->bd_iov[i].kiov_offset = pga[i]->off & ~CFS_PAGE_MASK;
-#else
- /* FIXME currently liblustre doesn't support bulk encryption.
- * if we do, check again following may not be right. */
- LASSERTF(0, "Bulk encryption not implemented for liblustre\n");
- desc->bd_iov[i].iov_base = pga[i]->pg->addr;
- desc->bd_iov[i].iov_len = pga[i]->count > nob ?
- nob : pga[i]->count;
-#endif
-
- desc->bd_iov_count++;
- nob -= pga[i]->count;
- }
-}
-
+/*
+ * return nob of actual plain text size received, or error code.
+ */
int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
- int nob, obd_count pg_count,
- struct brw_page **pga)
+ struct ptlrpc_bulk_desc *desc,
+ int nob)
{
- struct ptlrpc_bulk_desc *desc;
- struct ptlrpc_cli_ctx *ctx;
- int rc = 0;
-
- if (!req->rq_pack_bulk)
- return 0;
+ struct ptlrpc_cli_ctx *ctx;
+ int rc;
LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
- OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
- if (desc == NULL) {
- CERROR("out of memory, can't verify bulk read data\n");
- return -ENOMEM;
- }
-
- pga_to_bulk_desc(nob, pg_count, pga, desc);
+ if (!req->rq_pack_bulk)
+ return desc->bd_nob_transferred;
ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->unwrap_bulk)
+ if (ctx->cc_ops->unwrap_bulk) {
rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
-
- OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
-
- return rc;
+ if (rc < 0)
+ return rc;
+ }
+ return desc->bd_nob_transferred;
}
EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
+/*
+ * return 0 for success or error code.
+ */
int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_cli_ctx *ctx;
+ struct ptlrpc_cli_ctx *ctx;
+ int rc;
+
+ LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
if (!req->rq_pack_bulk)
return 0;
- LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
-
ctx = req->rq_cli_ctx;
- if (ctx->cc_ops->unwrap_bulk)
- return ctx->cc_ops->unwrap_bulk(ctx, req, desc);
+ if (ctx->cc_ops->unwrap_bulk) {
+ rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
+ if (rc < 0)
+ return rc;
+ }
+
+ /*
+ * if everything is going right, nob should equals to nob_transferred.
+ * in case of privacy mode, nob_transferred needs to be adjusted.
+ */
+ if (desc->bd_nob != desc->bd_nob_transferred) {
+ CERROR("nob %d doesn't match transferred nob %d",
+ desc->bd_nob, desc->bd_nob_transferred);
+ return -EPROTO;
+ }
return 0;
}
{
struct ptlrpc_svc_ctx *ctx;
+ LASSERT(req->rq_bulk_read);
+
if (!req->rq_pack_bulk)
return 0;
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
ctx = req->rq_svc_ctx;
if (ctx->sc_policy->sp_sops->wrap_bulk)
return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
struct ptlrpc_bulk_desc *desc)
{
struct ptlrpc_svc_ctx *ctx;
+ int rc;
+
+ LASSERT(req->rq_bulk_write);
+
+ if (desc->bd_nob_transferred != desc->bd_nob &&
+ SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_BULK_SVC_PRIV) {
+ DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
+ desc->bd_nob_transferred, desc->bd_nob);
+ return -ETIMEDOUT;
+ }
if (!req->rq_pack_bulk)
return 0;
- LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
ctx = req->rq_svc_ctx;
- if (ctx->sc_policy->sp_sops->unwrap_bulk);
- return ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
+ if (ctx->sc_policy->sp_sops->unwrap_bulk) {
+ rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
+ if (rc)
+ CERROR("error unwrap bulk: %d\n", rc);
+ }
+ /* return 0 to allow reply be sent */
return 0;
}
EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
+int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
+ struct ptlrpc_bulk_desc *desc)
+{
+ struct ptlrpc_svc_ctx *ctx;
+
+ LASSERT(req->rq_bulk_write);
+
+ if (!req->rq_pack_bulk)
+ return 0;
+
+ ctx = req->rq_svc_ctx;
+ if (ctx->sc_policy->sp_sops->prep_bulk)
+ return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
+
+ return 0;
+}
+EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
/****************************************
* user descriptor helpers *
}
EXPORT_SYMBOL(sec2target_str);
+/*
+ * return true if the bulk data is protected
+ */
+int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
+{
+ switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
+ case SPTLRPC_BULK_SVC_INTG:
+ case SPTLRPC_BULK_SVC_PRIV:
+ return 1;
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
+
/****************************************
* crypto API helper/alloc blkciper *
****************************************/
static inline void enc_pools_wakeup(void)
{
+ LASSERT_SPIN_LOCKED(&page_pools.epp_lock);
+ LASSERT(page_pools.epp_waitqlen >= 0);
+
if (unlikely(page_pools.epp_waitqlen)) {
- LASSERT(page_pools.epp_waitqlen > 0);
LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
cfs_waitq_broadcast(&page_pools.epp_waitq);
}
if (page_pools.epp_total_pages < page_needed)
return 1;
- /* if we just did a shrink due to memory tight, we'd better
- * wait a while to grow again.
+ /*
+ * we wanted to return 0 here if there was a shrink just happened
+ * moment ago, but this may cause deadlock if both client and ost
+ * live on single node.
*/
+#if 0
if (now - page_pools.epp_last_shrink < 2)
return 0;
+#endif
/*
* here we perhaps need consider other factors like wait queue
int p_idx, g_idx;
int i;
- LASSERT(desc->bd_max_iov > 0);
- LASSERT(desc->bd_max_iov <= page_pools.epp_max_pages);
+ LASSERT(desc->bd_iov_count > 0);
+ LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
- /* resent bulk, enc pages might have been allocated previously */
- if (desc->bd_enc_pages != NULL)
+ /* resent bulk, enc iov might have been allocated previously */
+ if (desc->bd_enc_iov != NULL)
return 0;
- OBD_ALLOC(desc->bd_enc_pages,
- desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
- if (desc->bd_enc_pages == NULL)
+ OBD_ALLOC(desc->bd_enc_iov,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+ if (desc->bd_enc_iov == NULL)
return -ENOMEM;
spin_lock(&page_pools.epp_lock);
page_pools.epp_st_access++;
again:
- if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
+ if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
if (tick == 0)
tick = cfs_time_current();
now = cfs_time_current_sec();
page_pools.epp_st_missings++;
- page_pools.epp_pages_short += desc->bd_max_iov;
+ page_pools.epp_pages_short += desc->bd_iov_count;
- if (enc_pools_should_grow(desc->bd_max_iov, now)) {
+ if (enc_pools_should_grow(desc->bd_iov_count, now)) {
page_pools.epp_growing = 1;
spin_unlock(&page_pools.epp_lock);
spin_lock(&page_pools.epp_lock);
page_pools.epp_growing = 0;
+
+ enc_pools_wakeup();
} else {
if (++page_pools.epp_waitqlen >
page_pools.epp_st_max_wqlen)
spin_unlock(&page_pools.epp_lock);
cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
- spin_lock(&page_pools.epp_lock);
-
LASSERT(page_pools.epp_waitqlen > 0);
+ spin_lock(&page_pools.epp_lock);
page_pools.epp_waitqlen--;
}
- LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
- page_pools.epp_pages_short -= desc->bd_max_iov;
+ LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
+ page_pools.epp_pages_short -= desc->bd_iov_count;
this_idle = 0;
goto again;
}
/* proceed with rest of allocation */
- page_pools.epp_free_pages -= desc->bd_max_iov;
+ page_pools.epp_free_pages -= desc->bd_iov_count;
p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
- for (i = 0; i < desc->bd_max_iov; i++) {
+ for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
- desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
+ desc->bd_enc_iov[i].kiov_page =
+ page_pools.epp_pools[p_idx][g_idx];
page_pools.epp_pools[p_idx][g_idx] = NULL;
if (++g_idx == PAGES_PER_POOL) {
int p_idx, g_idx;
int i;
- if (desc->bd_enc_pages == NULL)
- return;
- if (desc->bd_max_iov == 0)
+ if (desc->bd_enc_iov == NULL)
return;
+ LASSERT(desc->bd_iov_count > 0);
+
spin_lock(&page_pools.epp_lock);
p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
- LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
+ LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
page_pools.epp_total_pages);
LASSERT(page_pools.epp_pools[p_idx]);
- for (i = 0; i < desc->bd_max_iov; i++) {
- LASSERT(desc->bd_enc_pages[i] != NULL);
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
- page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
+ page_pools.epp_pools[p_idx][g_idx] =
+ desc->bd_enc_iov[i].kiov_page;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
}
}
- page_pools.epp_free_pages += desc->bd_max_iov;
+ page_pools.epp_free_pages += desc->bd_iov_count;
enc_pools_wakeup();
spin_unlock(&page_pools.epp_lock);
- OBD_FREE(desc->bd_enc_pages,
- desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
- desc->bd_enc_pages = NULL;
+ OBD_FREE(desc->bd_enc_iov,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+ desc->bd_enc_iov = NULL;
}
EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
spin_unlock(&page_pools.epp_lock);
if (need_grow) {
- enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES);
+ enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
+ PTLRPC_MAX_BRW_PAGES);
spin_lock(&page_pools.epp_lock);
page_pools.epp_growing = 0;
[BULK_HASH_ALG_SHA256] = { "sha256", "sha256", 32 },
[BULK_HASH_ALG_SHA384] = { "sha384", "sha384", 48 },
[BULK_HASH_ALG_SHA512] = { "sha512", "sha512", 64 },
- [BULK_HASH_ALG_WP256] = { "wp256", "wp256", 32 },
- [BULK_HASH_ALG_WP384] = { "wp384", "wp384", 48 },
- [BULK_HASH_ALG_WP512] = { "wp512", "wp512", 64 },
};
const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
}
EXPORT_SYMBOL(sptlrpc_get_hash_name);
-int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
+__u8 sptlrpc_get_hash_alg(const char *algname)
{
- int size = sizeof(struct ptlrpc_bulk_sec_desc);
-
- LASSERT(hash_alg < BULK_HASH_ALG_MAX);
-
- /* read request don't need extra data */
- if (!(read && request))
- size += hash_types[hash_alg].sht_size;
+ int i;
- return size;
+ for (i = 0; i < BULK_HASH_ALG_MAX; i++)
+ if (!strcmp(hash_types[i].sht_name, algname))
+ break;
+ return i;
}
-EXPORT_SYMBOL(bulk_sec_desc_size);
+EXPORT_SYMBOL(sptlrpc_get_hash_alg);
int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
{
struct ptlrpc_bulk_sec_desc *bsd;
- int size = msg->lm_buflens[offset];
+ int size = msg->lm_buflens[offset];
bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
if (bsd == NULL) {
return -EINVAL;
}
- /* nothing to swab */
+ if (lustre_msg_swabbed(msg)) {
+ __swab32s(&bsd->bsd_nob);
+ }
if (unlikely(bsd->bsd_version != 0)) {
CERROR("Unexpected version %u\n", bsd->bsd_version);
return -EPROTO;
}
- if (unlikely(bsd->bsd_flags != 0)) {
- CERROR("Unexpected flags %x\n", bsd->bsd_flags);
+ if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
+ CERROR("Invalid type %u\n", bsd->bsd_type);
return -EPROTO;
}
- if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
- CERROR("Unsupported checksum algorithm %u\n",
- bsd->bsd_hash_alg);
- return -EINVAL;
- }
+ /* FIXME more sanity check here */
- if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
- CERROR("Unsupported cipher algorithm %u\n",
- bsd->bsd_ciph_alg);
- return -EINVAL;
- }
-
- if (unlikely(size > sizeof(*bsd)) &&
- size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
- CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
- bsd->bsd_hash_alg, size);
- return -EINVAL;
+ if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
+ CERROR("Invalid svc %u\n", bsd->bsd_svc);
+ return -EPROTO;
}
return 0;
return 0;
}
-static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
+int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
+ void *buf, int buflen)
{
struct hash_desc hdesc;
- struct scatterlist *sl;
- int i, rc = 0, bytes = 0;
+ int hashsize;
+ char hashbuf[64];
+ struct scatterlist sl;
+ int i;
- LASSERT(alg > BULK_HASH_ALG_NULL &&
- alg < BULK_HASH_ALG_MAX);
+ LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
+ LASSERT(buflen >= 4);
switch (alg) {
case BULK_HASH_ALG_ADLER32:
CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
return -ENOMEM;
}
+
hdesc.flags = 0;
+ ll_crypto_hash_init(&hdesc);
- OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
- if (sl == NULL) {
- rc = -ENOMEM;
- goto out_tfm;
- }
+ hashsize = ll_crypto_hash_digestsize(hdesc.tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- sl[i].page = desc->bd_iov[i].kiov_page;
- sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
- sl[i].length = desc->bd_iov[i].kiov_len;
- bytes += desc->bd_iov[i].kiov_len;
+ sl.page = desc->bd_iov[i].kiov_page;
+ sl.offset = desc->bd_iov[i].kiov_offset;
+ sl.length = desc->bd_iov[i].kiov_len;
+ ll_crypto_hash_update(&hdesc, &sl, sl.length);
}
- ll_crypto_hash_init(&hdesc);
- ll_crypto_hash_update(&hdesc, sl, bytes);
- ll_crypto_hash_final(&hdesc, buf);
-
- OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
+ if (hashsize > buflen) {
+ ll_crypto_hash_final(&hdesc, hashbuf);
+ memcpy(buf, hashbuf, buflen);
+ } else {
+ ll_crypto_hash_final(&hdesc, buf);
+ }
-out_tfm:
ll_crypto_free_hash(hdesc.tfm);
- return rc;
+ return 0;
}
+EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
#else /* !__KERNEL__ */
-static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
+int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
+ void *buf, int buflen)
{
__u32 csum32;
int i;
}
#endif /* __KERNEL__ */
-
-/*
- * perform algorithm @alg checksum on @desc, store result in @buf.
- * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
- */
-static
-int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
- struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
-{
- int rc;
-
- LASSERT(bsd);
- LASSERT(alg < BULK_HASH_ALG_MAX);
-
- bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
-
- if (alg == BULK_HASH_ALG_NULL)
- return 0;
-
- LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
-
- rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
- if (rc == 0)
- bsd->bsd_hash_alg = alg;
-
- return rc;
-}
-
-static
-int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
- struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
- struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
-{
- char *csum_p;
- char *buf = NULL;
- int csum_size, rc = 0;
-
- LASSERT(bsdv);
- LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
-
- if (bsdr)
- bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
-
- if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
- return 0;
-
- /* for all supported algorithms */
- csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
-
- if (bsdvsize < sizeof(*bsdv) + csum_size) {
- CERROR("verifier size %d too small, require %d\n",
- bsdvsize, (int) sizeof(*bsdv) + csum_size);
- return -EINVAL;
- }
-
- if (bsdr) {
- LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
- csum_p = (char *) bsdr->bsd_csum;
- } else {
- OBD_ALLOC(buf, csum_size);
- if (buf == NULL)
- return -EINVAL;
- csum_p = buf;
- }
-
- rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
-
- if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
- CERROR("BAD %s CHECKSUM (%s), data mutated during "
- "transfer!\n", read ? "READ" : "WRITE",
- hash_types[bsdv->bsd_hash_alg].sht_name);
- rc = -EINVAL;
- } else {
- CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
- read ? "read" : "write",
- hash_types[bsdv->bsd_hash_alg].sht_name);
- }
-
- if (bsdr) {
- bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
- memcpy(bsdr->bsd_csum, csum_p, csum_size);
- } else {
- LASSERT(buf);
- OBD_FREE(buf, csum_size);
- }
-
- return rc;
-}
-
-int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
- __u32 alg, struct lustre_msg *rmsg, int roff)
-{
- struct ptlrpc_bulk_sec_desc *bsdr;
- int rsize, rc = 0;
-
- rsize = rmsg->lm_buflens[roff];
- bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
-
- LASSERT(bsdr);
- LASSERT(rsize >= sizeof(*bsdr));
- LASSERT(alg < BULK_HASH_ALG_MAX);
-
- if (read) {
- bsdr->bsd_hash_alg = alg;
- } else {
- rc = generate_bulk_csum(desc, alg, bsdr, rsize);
- if (rc)
- CERROR("bulk write: client failed to compute "
- "checksum: %d\n", rc);
-
- /* For sending we only compute the wrong checksum instead
- * of corrupting the data so it is still correct on a redo */
- if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
- bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
- bsdr->bsd_csum[0] ^= 0x1;
- }
-
- return rc;
-}
-EXPORT_SYMBOL(bulk_csum_cli_request);
-
-int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
- struct lustre_msg *rmsg, int roff,
- struct lustre_msg *vmsg, int voff)
-{
- struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
- int rsize, vsize;
-
- rsize = rmsg->lm_buflens[roff];
- vsize = vmsg->lm_buflens[voff];
- bsdr = lustre_msg_buf(rmsg, roff, 0);
- bsdv = lustre_msg_buf(vmsg, voff, 0);
-
- if (bsdv == NULL || vsize < sizeof(*bsdv)) {
- CERROR("Invalid checksum verifier from server: size %d\n",
- vsize);
- return -EINVAL;
- }
-
- LASSERT(bsdr);
- LASSERT(rsize >= sizeof(*bsdr));
- LASSERT(vsize >= sizeof(*bsdv));
-
- if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
- CERROR("bulk %s: checksum algorithm mismatch: client request "
- "%s but server reply with %s. try to use the new one "
- "for checksum verification\n",
- read ? "read" : "write",
- hash_types[bsdr->bsd_hash_alg].sht_name,
- hash_types[bsdv->bsd_hash_alg].sht_name);
- }
-
- if (read)
- return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
- else {
- char *cli, *srv, *new = NULL;
- int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
-
- LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
- if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
- return 0;
-
- if (vsize < sizeof(*bsdv) + csum_size) {
- CERROR("verifier size %d too small, require %d\n",
- vsize, (int) sizeof(*bsdv) + csum_size);
- return -EINVAL;
- }
-
- cli = (char *) (bsdr + 1);
- srv = (char *) (bsdv + 1);
-
- if (!memcmp(cli, srv, csum_size)) {
- /* checksum confirmed */
- CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
- hash_types[bsdr->bsd_hash_alg].sht_name);
- return 0;
- }
-
- /* checksum mismatch, re-compute a new one and compare with
- * others, give out proper warnings. */
- OBD_ALLOC(new, csum_size);
- if (new == NULL)
- return -ENOMEM;
-
- do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
-
- if (!memcmp(new, srv, csum_size)) {
- CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
- "on the client after we checksummed them\n",
- hash_types[bsdr->bsd_hash_alg].sht_name);
- } else if (!memcmp(new, cli, csum_size)) {
- CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
- "in transit\n",
- hash_types[bsdr->bsd_hash_alg].sht_name);
- } else {
- CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
- "in transit, and the current page contents "
- "don't match the originals and what the server "
- "received\n",
- hash_types[bsdr->bsd_hash_alg].sht_name);
- }
- OBD_FREE(new, csum_size);
-
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(bulk_csum_cli_reply);
-
-#ifdef __KERNEL__
-static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
-{
- char *ptr;
- unsigned int off, i;
-
- for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_iov[i].kiov_len == 0)
- continue;
-
- ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
- ptr[off] ^= 0x1;
- cfs_kunmap(desc->bd_iov[i].kiov_page);
- return;
- }
-}
-#else
-static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
-{
-}
-#endif /* __KERNEL__ */
-
-int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
- struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
- struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
-{
- int rc;
-
- LASSERT(vsize >= sizeof(*bsdv));
- LASSERT(rsize >= sizeof(*bsdr));
- LASSERT(bsdv && bsdr);
-
- if (read) {
- rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
- if (rc)
- CERROR("bulk read: server failed to generate %s "
- "checksum: %d\n",
- hash_types[bsdv->bsd_hash_alg].sht_name, rc);
-
- /* corrupt the data after we compute the checksum, to
- * simulate an OST->client data error */
- if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
- corrupt_bulk_data(desc);
- } else {
- rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(bulk_csum_svc);
-
-/****************************************
- * Helpers to assist policy modules to *
- * implement encryption funcationality *
- ****************************************/
-
-/* FIXME */
-#ifndef __KERNEL__
-#define CRYPTO_TFM_MODE_ECB (0)
-#define CRYPTO_TFM_MODE_CBC (1)
-#endif
-
-static struct sptlrpc_ciph_type cipher_types[] = {
- [BULK_CIPH_ALG_NULL] = {
- "null", "null", 0, 0, 0
- },
- [BULK_CIPH_ALG_ARC4] = {
- "arc4", "ecb(arc4)", 0, 0, 16
- },
- [BULK_CIPH_ALG_AES128] = {
- "aes128", "cbc(aes)", 0, 16, 16
- },
- [BULK_CIPH_ALG_AES192] = {
- "aes192", "cbc(aes)", 0, 16, 24
- },
- [BULK_CIPH_ALG_AES256] = {
- "aes256", "cbc(aes)", 0, 16, 32
- },
- [BULK_CIPH_ALG_CAST128] = {
- "cast128", "cbc(cast5)", 0, 8, 16
- },
- [BULK_CIPH_ALG_CAST256] = {
- "cast256", "cbc(cast6)", 0, 16, 32
- },
- [BULK_CIPH_ALG_TWOFISH128] = {
- "twofish128", "cbc(twofish)", 0, 16, 16
- },
- [BULK_CIPH_ALG_TWOFISH256] = {
- "twofish256", "cbc(twofish)", 0, 16, 32
- },
-};
-
-const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
-{
- struct sptlrpc_ciph_type *ct;
-
- if (ciph_alg < BULK_CIPH_ALG_MAX) {
- ct = &cipher_types[ciph_alg];
- if (ct->sct_tfm_name)
- return ct;
- }
- return NULL;
-}
-EXPORT_SYMBOL(sptlrpc_get_ciph_type);
-
-const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
-{
- const struct sptlrpc_ciph_type *ct;
-
- ct = sptlrpc_get_ciph_type(ciph_alg);
- if (ct)
- return ct->sct_name;
- else
- return "unknown";
-}
-EXPORT_SYMBOL(sptlrpc_get_ciph_name);
* user supplied flavor string parsing *
****************************************/
-#ifdef HAVE_ADLER
-#define BULK_HASH_ALG_DEFAULT BULK_HASH_ALG_ADLER32
-#else
-#define BULK_HASH_ALG_DEFAULT BULK_HASH_ALG_CRC32
-#endif
-
-typedef enum {
- BULK_TYPE_N = 0,
- BULK_TYPE_I = 1,
- BULK_TYPE_P = 2
-} bulk_type_t;
-
-static void get_default_flavor(struct sptlrpc_flavor *sf)
-{
- sf->sf_rpc = SPTLRPC_FLVR_NULL;
- sf->sf_bulk_ciph = BULK_CIPH_ALG_NULL;
- sf->sf_bulk_hash = BULK_HASH_ALG_NULL;
- sf->sf_flags = 0;
-}
-
-static void get_flavor_by_rpc(struct sptlrpc_flavor *flvr, __u16 rpc_flavor)
-{
- get_default_flavor(flvr);
-
- flvr->sf_rpc = rpc_flavor;
-
- switch (rpc_flavor) {
- case SPTLRPC_FLVR_NULL:
- break;
- case SPTLRPC_FLVR_PLAIN:
- case SPTLRPC_FLVR_KRB5N:
- case SPTLRPC_FLVR_KRB5A:
- flvr->sf_bulk_hash = BULK_HASH_ALG_DEFAULT;
- break;
- case SPTLRPC_FLVR_KRB5P:
- flvr->sf_bulk_ciph = BULK_CIPH_ALG_AES128;
- /* fall through */
- case SPTLRPC_FLVR_KRB5I:
- flvr->sf_bulk_hash = BULK_HASH_ALG_SHA1;
- break;
- default:
- LBUG();
- }
-}
-
-static void get_flavor_by_bulk(struct sptlrpc_flavor *flvr,
- __u16 rpc_flavor, bulk_type_t bulk_type)
-{
- switch (bulk_type) {
- case BULK_TYPE_N:
- flvr->sf_bulk_hash = BULK_HASH_ALG_NULL;
- flvr->sf_bulk_ciph = BULK_CIPH_ALG_NULL;
- break;
- case BULK_TYPE_I:
- switch (rpc_flavor) {
- case SPTLRPC_FLVR_PLAIN:
- case SPTLRPC_FLVR_KRB5N:
- case SPTLRPC_FLVR_KRB5A:
- flvr->sf_bulk_hash = BULK_HASH_ALG_DEFAULT;
- break;
- case SPTLRPC_FLVR_KRB5I:
- case SPTLRPC_FLVR_KRB5P:
- flvr->sf_bulk_hash = BULK_HASH_ALG_SHA1;
- break;
- default:
- LBUG();
- }
- flvr->sf_bulk_ciph = BULK_CIPH_ALG_NULL;
- break;
- case BULK_TYPE_P:
- flvr->sf_bulk_hash = BULK_HASH_ALG_SHA1;
- flvr->sf_bulk_ciph = BULK_CIPH_ALG_AES128;
- break;
- default:
- LBUG();
- }
-}
-
-static __u16 __flavors[] = {
- SPTLRPC_FLVR_NULL,
- SPTLRPC_FLVR_PLAIN,
- SPTLRPC_FLVR_KRB5N,
- SPTLRPC_FLVR_KRB5A,
- SPTLRPC_FLVR_KRB5I,
- SPTLRPC_FLVR_KRB5P,
-};
-
-#define __nflavors ARRAY_SIZE(__flavors)
-
/*
- * flavor string format: rpc[-bulk{n|i|p}[:cksum/enc]]
- * for examples:
- * null
- * plain-bulki
- * krb5p-bulkn
- * krb5i-bulkp
- * krb5i-bulkp:sha512/arc4
+ * format: <base_flavor>[-<bulk_type:alg_spec>]
*/
int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
{
- const char *f;
- char *bulk, *alg, *enc;
- char buf[64];
- bulk_type_t bulk_type;
- __u8 i;
- ENTRY;
+ char buf[32];
+ char *bulk, *alg;
+
+ memset(flvr, 0, sizeof(*flvr));
if (str == NULL || str[0] == '\0') {
flvr->sf_rpc = SPTLRPC_FLVR_INVALID;
- goto out;
+ return 0;
}
- for (i = 0; i < __nflavors; i++) {
- f = sptlrpc_rpcflavor2name(__flavors[i]);
- if (strncmp(str, f, strlen(f)) == 0)
- break;
- }
-
- if (i >= __nflavors)
- GOTO(invalid, -EINVAL);
+ strncpy(buf, str, sizeof(buf));
+ buf[sizeof(buf) - 1] = '\0';
- /* prepare local buffer thus we can modify it as we want */
- strncpy(buf, str, 64);
- buf[64 - 1] = '\0';
-
- /* find bulk string */
bulk = strchr(buf, '-');
if (bulk)
*bulk++ = '\0';
- /* now the first part must equal to rpc flavor name */
- if (strcmp(buf, f) != 0)
- GOTO(invalid, -EINVAL);
-
- get_flavor_by_rpc(flvr, __flavors[i]);
-
- if (bulk == NULL)
- goto out;
-
- /* find bulk algorithm string */
- alg = strchr(bulk, ':');
- if (alg)
- *alg++ = '\0';
-
- /* verify bulk section */
- if (strcmp(bulk, "bulkn") == 0) {
- flvr->sf_bulk_hash = BULK_HASH_ALG_NULL;
- flvr->sf_bulk_ciph = BULK_CIPH_ALG_NULL;
- bulk_type = BULK_TYPE_N;
- } else if (strcmp(bulk, "bulki") == 0)
- bulk_type = BULK_TYPE_I;
- else if (strcmp(bulk, "bulkp") == 0)
- bulk_type = BULK_TYPE_P;
- else
- GOTO(invalid, -EINVAL);
-
- /* null flavor don't support bulk i/p */
- if (__flavors[i] == SPTLRPC_FLVR_NULL && bulk_type != BULK_TYPE_N)
- GOTO(invalid, -EINVAL);
-
- /* plain policy dosen't support bulk p */
- if (__flavors[i] == SPTLRPC_FLVR_PLAIN && bulk_type == BULK_TYPE_P)
- GOTO(invalid, -EINVAL);
-
- get_flavor_by_bulk(flvr, __flavors[i], bulk_type);
-
- if (alg == NULL)
- goto out;
-
- /* find encryption algorithm string */
- enc = strchr(alg, '/');
- if (enc)
- *enc++ = '\0';
-
- /* checksum algorithm */
- for (i = 0; i < BULK_HASH_ALG_MAX; i++) {
- if (strcmp(alg, sptlrpc_get_hash_name(i)) == 0) {
- flvr->sf_bulk_hash = i;
- break;
- }
- }
- if (i >= BULK_HASH_ALG_MAX)
- GOTO(invalid, -EINVAL);
-
- /* privacy algorithm */
- if (enc) {
- for (i = 0; i < BULK_CIPH_ALG_MAX; i++) {
- if (strcmp(enc, sptlrpc_get_ciph_name(i)) == 0) {
- flvr->sf_bulk_ciph = i;
- break;
- }
- }
- if (i >= BULK_CIPH_ALG_MAX)
- GOTO(invalid, -EINVAL);
- }
+ flvr->sf_rpc = sptlrpc_name2flavor_base(buf);
+ if (flvr->sf_rpc == SPTLRPC_FLVR_INVALID)
+ goto err_out;
/*
- * bulk combination sanity checks
+ * currently only base flavor "plain" can have bulk specification.
*/
- if (bulk_type == BULK_TYPE_P &&
- flvr->sf_bulk_ciph == BULK_CIPH_ALG_NULL)
- GOTO(invalid, -EINVAL);
-
- if (bulk_type == BULK_TYPE_I &&
- (flvr->sf_bulk_hash == BULK_HASH_ALG_NULL ||
- flvr->sf_bulk_ciph != BULK_CIPH_ALG_NULL))
- GOTO(invalid, -EINVAL);
+ if (flvr->sf_rpc == SPTLRPC_FLVR_PLAIN) {
+ flvr->u_bulk.hash.hash_alg = BULK_HASH_ALG_ADLER32;
+ if (bulk) {
+ /*
+ * format: plain-hash:<hash_alg>
+ */
+ alg = strchr(bulk, ':');
+ if (alg == NULL)
+ goto err_out;
+ *alg++ = '\0';
+
+ if (strcmp(bulk, "hash"))
+ goto err_out;
+
+ flvr->u_bulk.hash.hash_alg = sptlrpc_get_hash_alg(alg);
+ if (flvr->u_bulk.hash.hash_alg >= BULK_HASH_ALG_MAX)
+ goto err_out;
+ }
- if (bulk_type == BULK_TYPE_N &&
- (flvr->sf_bulk_hash != BULK_HASH_ALG_NULL ||
- flvr->sf_bulk_ciph != BULK_CIPH_ALG_NULL))
- GOTO(invalid, -EINVAL);
+ if (flvr->u_bulk.hash.hash_alg == BULK_HASH_ALG_NULL)
+ flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_NULL);
+ else
+ flvr_set_bulk_svc(&flvr->sf_rpc, SPTLRPC_BULK_SVC_INTG);
+ } else {
+ if (bulk)
+ goto err_out;
+ }
-out:
+ flvr->sf_flags = 0;
return 0;
-invalid:
+
+err_out:
CERROR("invalid flavor string: %s\n", str);
return -EINVAL;
}
* configure rules *
****************************************/
+static void get_default_flavor(struct sptlrpc_flavor *sf)
+{
+ memset(sf, 0, sizeof(*sf));
+
+ sf->sf_rpc = SPTLRPC_FLVR_NULL;
+ sf->sf_flags = 0;
+}
+
static void sptlrpc_rule_init(struct sptlrpc_rule *rule)
{
rule->sr_netid = LNET_NIDNET(LNET_NID_ANY);
/*
* return 0 if the rule set could accomodate one more rule.
- * if @expand != 0, the rule set might be expanded.
*/
-int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset, int expand)
+int sptlrpc_rule_set_expand(struct sptlrpc_rule_set *rset)
{
struct sptlrpc_rule *rules;
int nslot;
+ might_sleep();
+
if (rset->srs_nrule < rset->srs_nslot)
return 0;
- if (expand == 0)
- return -E2BIG;
-
nslot = rset->srs_nslot + 8;
/* better use realloc() if available */
/*
* merge @rule into @rset.
- * if @expand != 0 then @rset slots might be expanded.
+ * the @rset slots might be expanded.
*/
int sptlrpc_rule_set_merge(struct sptlrpc_rule_set *rset,
- struct sptlrpc_rule *rule,
- int expand)
+ struct sptlrpc_rule *rule)
{
struct sptlrpc_rule *p = rset->srs_rules;
int spec_dir, spec_net;
int rc, n, match = 0;
+ might_sleep();
+
spec_net = rule_spec_net(rule);
spec_dir = rule_spec_dir(rule);
LASSERT(n >= 0 && n <= rset->srs_nrule);
if (rule->sr_flvr.sf_rpc != SPTLRPC_FLVR_INVALID) {
- rc = sptlrpc_rule_set_expand(rset, expand);
+ rc = sptlrpc_rule_set_expand(rset);
if (rc)
return rc;
struct sptlrpc_rule *rule;
int i, n, rc;
+ might_sleep();
+
/* merge general rules firstly, then target-specific rules */
for (i = 0; i < 2; i++) {
if (src[i] == NULL)
rule->sr_to != to)
continue;
- rc = sptlrpc_rule_set_merge(rset, rule, 1);
+ rc = sptlrpc_rule_set_merge(rset, rule);
if (rc) {
CERROR("can't merge: %d\n", rc);
return rc;
}
}
- return sptlrpc_rule_set_merge(rule_set, rule, 1);
+ return sptlrpc_rule_set_merge(rule_set, rule);
}
/**
RETURN(-EINVAL);
}
- CDEBUG(D_SEC, "got one rule: %s.%s\n", target, param);
+ CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param);
/* parse rule to make sure the format is correct */
if (strncmp(param, PARAM_SRPC_FLVR, sizeof(PARAM_SRPC_FLVR) - 1) != 0) {
enum lustre_sec_part to,
unsigned int fl_udesc)
{
+ /*
+ * null flavor doesn't need to set any flavor, and in fact
+ * we'd better not do that because everybody share a single sec.
+ */
+ if (sf->sf_rpc == SPTLRPC_FLVR_NULL)
+ return;
+
if (from == LUSTRE_SP_MDT) {
/* MDT->MDT; MDT->OST */
sf->sf_flags |= PTLRPC_SEC_FL_ROOTONLY;
struct proc_dir_entry *sptlrpc_proc_root = NULL;
EXPORT_SYMBOL(sptlrpc_proc_root);
-void sec_flags2str(unsigned long flags, char *buf, int bufsize)
+char *sec_flags2str(unsigned long flags, char *buf, int bufsize)
{
buf[0] = '\0';
strncat(buf, "-,", bufsize);
buf[strlen(buf) - 1] = '\0';
-
+ return buf;
}
static int sptlrpc_info_lprocfs_seq_show(struct seq_file *seq, void *v)
struct obd_device *dev = seq->private;
struct client_obd *cli = &dev->u.cli;
struct ptlrpc_sec *sec = NULL;
- char flags_str[32];
+ char str[32];
LASSERT(strcmp(dev->obd_type->typ_name, LUSTRE_OSC_NAME) == 0 ||
strcmp(dev->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
if (sec == NULL)
goto out;
- sec_flags2str(sec->ps_flvr.sf_flags, flags_str, sizeof(flags_str));
+ sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str));
seq_printf(seq, "rpc flavor: %s\n",
- sptlrpc_rpcflavor2name(sec->ps_flvr.sf_rpc));
- seq_printf(seq, "bulk flavor: %s/%s\n",
- sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
- sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph));
- seq_printf(seq, "flags: %s\n", flags_str);
+ sptlrpc_flavor2name_base(sec->ps_flvr.sf_rpc));
+ seq_printf(seq, "bulk flavor: %s\n",
+ sptlrpc_flavor2name_bulk(&sec->ps_flvr, str, sizeof(str)));
+ seq_printf(seq, "flags: %s\n",
+ sec_flags2str(sec->ps_flvr.sf_flags, str, sizeof(str)));
seq_printf(seq, "id: %d\n", sec->ps_id);
seq_printf(seq, "refcount: %d\n", atomic_read(&sec->ps_refcount));
seq_printf(seq, "nctx: %d\n", atomic_read(&sec->ps_nctx));
static struct ptlrpc_svc_ctx null_svc_ctx;
/*
- * null sec temporarily use the third byte of lm_secflvr to identify
+ * we can temporarily use the topmost 8-bits of lm_secflvr to identify
* the source sec part.
*/
static inline
void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
{
- msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
+ msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 24;
}
static inline
{
switch (msg->lm_magic) {
case LUSTRE_MSG_MAGIC_V2:
- return (msg->lm_secflvr >> 16) & 0xFF;
+ return (msg->lm_secflvr >> 24) & 0xFF;
case LUSTRE_MSG_MAGIC_V2_SWABBED:
- return (msg->lm_secflvr >> 8) & 0xFF;
+ return (msg->lm_secflvr) & 0xFF;
default:
return LUSTRE_SP_ANY;
}
struct ptlrpc_svc_ctx *svc_ctx,
struct sptlrpc_flavor *sf)
{
- LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
-
- if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL ||
- sf->sf_bulk_hash != BULK_HASH_ALG_NULL) {
- CERROR("null sec don't support bulk algorithm: %u/%u\n",
- sf->sf_bulk_ciph, sf->sf_bulk_hash);
- return NULL;
- }
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
/* general layer has take a module reference for us, because we never
* really destroy the sec, simply release the reference here.
static
int null_accept(struct ptlrpc_request *req)
{
- LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_NULL);
+ LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
+ SPTLRPC_POLICY_NULL);
if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
null_sec.ps_id = -1;
null_sec.ps_import = NULL;
null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
- null_sec.ps_flvr.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
- null_sec.ps_flvr.sf_bulk_hash = BULK_HASH_ALG_NULL;
null_sec.ps_flvr.sf_flags = 0;
null_sec.ps_part = LUSTRE_SP_ANY;
null_sec.ps_dying = 0;
static unsigned int plain_at_offset;
/*
- * flavor flags (maximum 8 flags)
+ * for simplicity, plain policy rpc use fixed layout.
*/
-#define PLAIN_WFLVR_FLAGS_OFFSET (12)
-#define PLAIN_WFLVR_FLAG_BULK (1 << (0 + PLAIN_WFLVR_FLAGS_OFFSET))
-#define PLAIN_WFLVR_FLAG_USER (1 << (1 + PLAIN_WFLVR_FLAGS_OFFSET))
+#define PLAIN_PACK_SEGMENTS (4)
+
+#define PLAIN_PACK_HDR_OFF (0)
+#define PLAIN_PACK_MSG_OFF (1)
+#define PLAIN_PACK_USER_OFF (2)
+#define PLAIN_PACK_BULK_OFF (3)
+
+#define PLAIN_FL_USER (0x01)
+#define PLAIN_FL_BULK (0x02)
+
+struct plain_header {
+ __u8 ph_ver; /* 0 */
+ __u8 ph_flags;
+ __u8 ph_sp; /* source */
+ __u8 ph_bulk_hash_alg; /* complete flavor desc */
+ __u8 ph_pad[4];
+};
-#define PLAIN_WFLVR_HAS_BULK(wflvr) \
- (((wflvr) & PLAIN_WFLVR_FLAG_BULK) != 0)
-#define PLAIN_WFLVR_HAS_USER(wflvr) \
- (((wflvr) & PLAIN_WFLVR_FLAG_USER) != 0)
+struct plain_bulk_token {
+ __u8 pbt_hash[8];
+};
-#define PLAIN_WFLVR_TO_RPC(wflvr) \
- ((wflvr) & ((1 << PLAIN_WFLVR_FLAGS_OFFSET) - 1))
+#define PLAIN_BSD_SIZE \
+ (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
-/*
- * similar to null sec, temporarily use the third byte of lm_secflvr to identify
- * the source sec part.
- */
-static inline
-void plain_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
+/****************************************
+ * bulk checksum helpers *
+ ****************************************/
+
+static int plain_unpack_bsd(struct lustre_msg *msg)
{
- msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 16;
+ struct ptlrpc_bulk_sec_desc *bsd;
+
+ if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF))
+ return -EPROTO;
+
+ bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
+ if (bsd == NULL) {
+ CERROR("bulk sec desc has short size %d\n",
+ lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
+ return -EPROTO;
+ }
+
+ if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
+ bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
+ CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
+ return -EPROTO;
+ }
+
+ return 0;
}
-static inline
-enum lustre_sec_part plain_decode_sec_part(struct lustre_msg *msg)
+static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
+ __u8 hash_alg,
+ struct plain_bulk_token *token)
{
- return (msg->lm_secflvr >> 16) & 0xFF;
+ if (hash_alg == BULK_HASH_ALG_NULL)
+ return 0;
+
+ memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
+ return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
+ sizeof(token->pbt_hash));
}
-/*
- * for simplicity, plain policy rpc use fixed layout.
- */
-#define PLAIN_PACK_SEGMENTS (3)
+static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
+ __u8 hash_alg,
+ struct plain_bulk_token *tokenr)
+{
+ struct plain_bulk_token tokenv;
+ int rc;
+
+ if (hash_alg == BULK_HASH_ALG_NULL)
+ return 0;
-#define PLAIN_PACK_MSG_OFF (0)
-#define PLAIN_PACK_USER_OFF (1)
-#define PLAIN_PACK_BULK_OFF (2)
+ memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
+ rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
+ sizeof(tokenv.pbt_hash));
+ if (rc)
+ return rc;
+
+ if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
+ return -EACCES;
+ return 0;
+}
+
+#ifdef __KERNEL__
+static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
+{
+ char *ptr;
+ unsigned int off, i;
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len == 0)
+ continue;
+
+ ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
+ off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ ptr[off] ^= 0x1;
+ cfs_kunmap(desc->bd_iov[i].kiov_page);
+ return;
+ }
+}
+#else
+static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
+{
+ unsigned int i;
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].iov_len == 0)
+ continue;
+
+ ((char *)desc->bd_iov[i].iov_base)[i] ^= 0x1;
+ return;
+ }
+}
+#endif /* __KERNEL__ */
/****************************************
* cli_ctx apis *
static
int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
{
- struct lustre_msg_v2 *msg = req->rq_reqbuf;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct plain_header *phdr;
ENTRY;
msg->lm_secflvr = req->rq_flvr.sf_rpc;
- if (req->rq_pack_bulk)
- msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
- if (req->rq_pack_udesc)
- msg->lm_secflvr |= PLAIN_WFLVR_FLAG_USER;
- plain_encode_sec_part(msg, ctx->cc_sec->ps_part);
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
+ phdr->ph_ver = 0;
+ phdr->ph_flags = 0;
+ phdr->ph_sp = ctx->cc_sec->ps_part;
+ phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
+
+ if (req->rq_pack_udesc)
+ phdr->ph_flags |= PLAIN_FL_USER;
+ if (req->rq_pack_bulk)
+ phdr->ph_flags |= PLAIN_FL_BULK;
req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
msg->lm_buflens);
static
int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
{
- struct lustre_msg *msg = req->rq_repdata;
- __u32 cksum;
+ struct lustre_msg *msg = req->rq_repdata;
+ struct plain_header *phdr;
+ __u32 cksum;
ENTRY;
if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
RETURN(-EPROTO);
}
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
+ if (phdr == NULL) {
+ CERROR("missing plain header\n");
+ RETURN(-EPROTO);
+ }
+
+ if (phdr->ph_ver != 0) {
+ CERROR("Invalid header version\n");
+ RETURN(-EPROTO);
+ }
+
/* expect no user desc in reply */
- if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
+ if (phdr->ph_flags & PLAIN_FL_USER) {
CERROR("Unexpected udesc flag in reply\n");
RETURN(-EPROTO);
}
+ if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
+ CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
+ req->rq_flvr.u_bulk.hash.hash_alg);
+ RETURN(-EPROTO);
+ }
+
if (unlikely(req->rq_early)) {
cksum = crc32_le(!(__u32) 0,
lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
* in reply, except for early reply */
if (!req->rq_early &&
!equi(req->rq_pack_bulk == 1,
- PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
+ phdr->ph_flags & PLAIN_FL_BULK)) {
CERROR("%s bulk checksum in reply\n",
req->rq_pack_bulk ? "Missing" : "Unexpected");
RETURN(-EPROTO);
}
- if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr) &&
- bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
- CERROR("Mal-formed bulk checksum reply\n");
- RETURN(-EINVAL);
+ if (phdr->ph_flags & PLAIN_FL_BULK) {
+ if (plain_unpack_bsd(msg))
+ RETURN(-EPROTO);
}
}
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
+ struct ptlrpc_bulk_sec_desc *bsd;
+ struct plain_bulk_token *token;
+ int rc;
+
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
- return bulk_csum_cli_request(desc, req->rq_bulk_read,
- req->rq_flvr.sf_bulk_hash,
- req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF);
+ bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ token = (struct plain_bulk_token *) bsd->bsd_data;
+
+ bsd->bsd_version = 0;
+ bsd->bsd_flags = 0;
+ bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
+
+ if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ RETURN(0);
+
+ if (req->rq_bulk_read)
+ RETURN(0);
+
+ rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ token);
+ if (rc) {
+ CERROR("bulk write: failed to compute checksum: %d\n", rc);
+ } else {
+ /*
+ * for sending we only compute the wrong checksum instead
+ * of corrupting the data so it is still correct on a redo
+ */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
+ req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
+ token->pbt_hash[0] ^= 0x1;
+ }
+
+ return rc;
}
static
struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenr, *tokenv;
+ int rc;
+#ifdef __KERNEL__
+ int i, nob;
+#endif
+
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
- return bulk_csum_cli_reply(desc, req->rq_bulk_read,
- req->rq_reqbuf, PLAIN_PACK_BULK_OFF,
- req->rq_repdata, PLAIN_PACK_BULK_OFF);
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ if (req->rq_bulk_write) {
+ if (bsdv->bsd_flags & BSD_FL_ERR)
+ return -EIO;
+ return 0;
+ }
+
+#ifdef __KERNEL__
+ /* fix the actual data size */
+ for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
+ if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
+ desc->bd_iov[i].kiov_len =
+ desc->bd_nob_transferred - nob;
+ }
+ nob += desc->bd_iov[i].kiov_len;
+ }
+#endif
+
+ rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenv);
+ if (rc)
+ CERROR("bulk read: client verify failed: %d\n", rc);
+
+ return rc;
}
/****************************************
struct ptlrpc_cli_ctx *ctx;
ENTRY;
- LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
-
- if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
- CERROR("plain policy don't support bulk cipher: %u\n",
- sf->sf_bulk_ciph);
- RETURN(NULL);
- }
+ LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
OBD_ALLOC_PTR(plsec);
if (plsec == NULL)
int msgsize)
{
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
- int alloc_len;
+ int alloc_len;
ENTRY;
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
if (req->rq_pack_udesc)
if (req->rq_pack_bulk) {
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
-
- buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 1,
- req->rq_bulk_read);
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
}
alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
}
lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
- req->rq_reqmsg = lustre_msg_buf_v2(req->rq_reqbuf, 0, 0);
+ req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
int alloc_len;
ENTRY;
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
if (req->rq_pack_bulk) {
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
- buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_hash, 0,
- req->rq_bulk_read);
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
}
alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
static
int plain_accept(struct ptlrpc_request *req)
{
- struct lustre_msg *msg = req->rq_reqbuf;
+ struct lustre_msg *msg = req->rq_reqbuf;
+ struct plain_header *phdr;
ENTRY;
- LASSERT(RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == SPTLRPC_POLICY_PLAIN);
+ LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
+ SPTLRPC_POLICY_PLAIN);
+
+ if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
+ SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
+ CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
+ RETURN(SECSVC_DROP);
+ }
if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
RETURN(SECSVC_DROP);
}
- if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_PLAIN) {
- CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
- RETURN(SECSVC_DROP);
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
+ if (phdr == NULL) {
+ CERROR("missing plain header\n");
+ RETURN(-EPROTO);
}
- req->rq_sp_from = plain_decode_sec_part(msg);
+ if (phdr->ph_ver != 0) {
+ CERROR("Invalid header version\n");
+ RETURN(-EPROTO);
+ }
- if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
+ if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
+ CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
+ RETURN(-EPROTO);
+ }
+
+ req->rq_sp_from = phdr->ph_sp;
+ req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
+
+ if (phdr->ph_flags & PLAIN_FL_USER) {
if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF)) {
CERROR("Mal-formed user descriptor\n");
RETURN(SECSVC_DROP);
req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
}
- if (PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr)) {
- if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF)) {
- CERROR("Mal-formed bulk checksum request\n");
+ if (phdr->ph_flags & PLAIN_FL_BULK) {
+ if (plain_unpack_bsd(msg))
RETURN(SECSVC_DROP);
- }
req->rq_pack_bulk = 1;
}
int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
{
struct ptlrpc_reply_state *rs;
- struct ptlrpc_bulk_sec_desc *bsd;
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int rs_size = sizeof(*rs);
ENTRY;
LASSERT(msgsize % 8 == 0);
+ buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
- if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) {
- bsd = lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF, sizeof(*bsd));
- LASSERT(bsd);
+ if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
+ buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
- buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- bsd->bsd_hash_alg, 0,
- req->rq_bulk_read);
- }
rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
rs = req->rq_reply_state;
{
struct ptlrpc_reply_state *rs = req->rq_reply_state;
struct lustre_msg_v2 *msg = rs->rs_repbuf;
+ struct plain_header *phdr;
int len;
ENTRY;
len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
msg->lm_secflvr = req->rq_flvr.sf_rpc;
+
+ phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
+ phdr->ph_ver = 0;
+ phdr->ph_flags = 0;
+ phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
+
if (req->rq_pack_bulk)
- msg->lm_secflvr |= PLAIN_WFLVR_FLAG_BULK;
+ phdr->ph_flags |= PLAIN_FL_BULK;
rs->rs_repdata_len = len;
int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenr, *tokenv;
+ int rc;
- LASSERT(rs);
+ LASSERT(req->rq_bulk_write);
LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
- LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
- return bulk_csum_svc(desc, req->rq_bulk_read,
- lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF),
- lustre_msg_buf(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF));
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenr);
+ if (rc) {
+ bsdv->bsd_flags |= BSD_FL_ERR;
+ CERROR("bulk write: server verify failed: %d\n", rc);
+ }
+
+ return rc;
}
static
int plain_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
+ struct plain_bulk_token *tokenr, *tokenv;
+ int rc;
- LASSERT(rs);
+ LASSERT(req->rq_bulk_read);
LASSERT(req->rq_pack_bulk);
- LASSERT(req->rq_reqbuf->lm_bufcount >= PLAIN_PACK_SEGMENTS);
- LASSERT(rs->rs_repbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
- return bulk_csum_svc(desc, req->rq_bulk_read,
- lustre_msg_buf(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(req->rq_reqbuf,
- PLAIN_PACK_BULK_OFF),
- lustre_msg_buf(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF, 0),
- lustre_msg_buflen(rs->rs_repbuf,
- PLAIN_PACK_BULK_OFF));
+ bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+ bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
+ tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+
+ bsdv->bsd_version = 0;
+ bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
+ bsdv->bsd_svc = bsdr->bsd_svc;
+ bsdv->bsd_flags = 0;
+
+ if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
+ return 0;
+
+ rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
+ tokenv);
+ if (rc) {
+ CERROR("bulk read: server failed to compute "
+ "checksum: %d\n", rc);
+ } else {
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
+ corrupt_bulk_data(desc);
+ }
+
+ return rc;
}
static struct ptlrpc_ctx_ops plain_ctx_ops = {
.release_ctx = plain_release_ctx,
.flush_ctx_cache = plain_flush_ctx_cache,
.alloc_reqbuf = plain_alloc_reqbuf,
- .alloc_repbuf = plain_alloc_repbuf,
.free_reqbuf = plain_free_reqbuf,
+ .alloc_repbuf = plain_alloc_repbuf,
.free_repbuf = plain_free_repbuf,
.enlarge_reqbuf = plain_enlarge_reqbuf,
};
goto err_req;
}
+ switch(lustre_msg_get_opc(req->rq_reqmsg)) {
+ case MDS_WRITEPAGE:
+ case OST_WRITE:
+ req->rq_bulk_write = 1;
+ break;
+ case MDS_READPAGE:
+ case OST_READ:
+ req->rq_bulk_read = 1;
+ break;
+ }
+
CDEBUG(D_NET, "got req "LPD64"\n", req->rq_xid);
req->rq_export = class_conn2export(
rm -rf $DIR/[df][0-9]*
-check_runas_id $RUNAS_ID $RUNAS
+check_runas_id $RUNAS_ID $RUNAS_ID $RUNAS
build_test_filter
test_8()
{
- sleep $TIMEOUT
+ local ATHISTORY=$(do_facet mds "find /sys/ -name at_history")
+ local ATOLDBASE=$(do_facet mds "cat $ATHISTORY")
+ do_facet mds "echo 8 >> $ATHISTORY"
+
$LCTL dk > /dev/null
debugsave
sysctl -w lnet.debug="+other"
+ mkdir -p $DIR/d8
+ chmod a+w $DIR/d8
+
+ REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
+ awk '/portal 12/ {print $5}' | tail -1`
+ REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
+
# sleep sometime in ctx handle
- do_facet mds lctl set_param fail_val=30
+ do_facet mds lctl set_param fail_val=$REQ_DELAY
#define OBD_FAIL_SEC_CTX_HDL_PAUSE 0x1204
do_facet mds lctl set_param fail_loc=0x1204
$RUNAS $LFS flushctx || error "can't flush ctx"
- $RUNAS df $DIR &
- DFPID=$!
- echo "waiting df (pid $TOUCHPID) to finish..."
- sleep 2 # give df a chance to really trigger context init rpc
+ $RUNAS touch $DIR/d8/f &
+ TOUCHPID=$!
+ echo "waiting for touch (pid $TOUCHPID) to finish..."
+ sleep 2 # give it a chance to really trigger context init rpc
do_facet mds sysctl -w lustre.fail_loc=0
- wait $DFPID || error "df should have succeeded"
+ wait $TOUCHPID || error "touch should have succeeded"
$LCTL dk | grep "Early reply #" || error "No early reply"
+
debugrestore
+ do_facet mds "echo $ATOLDBASE >> $ATHISTORY" || true
}
run_test 8 "Early reply sent for slow gss context negotiation"
# so each test should not assume any start flavor.
#
-test_50() {
- local sample=$TMP/sanity-gss-8
- local tdir=$MOUNT/dir8
- local iosize="256K"
- local hash_algs="adler32 crc32 md5 sha1 sha256 sha384 sha512 wp256 wp384 wp512"
-
- # create sample file with aligned size for direct i/o
- dd if=/dev/zero of=$sample bs=$iosize count=1 || error
- dd conv=notrunc if=/etc/termcap of=$sample bs=$iosize count=1 || error
-
- rm -rf $tdir
- mkdir $tdir || error "create dir $tdir"
-
- restore_to_default_flavor
-
- for alg in $hash_algs; do
- echo "Testing $alg..."
- flavor=krb5i-bulki:$alg/null
- set_rule $FSNAME any cli2ost $flavor
- wait_flavor cli2ost $flavor $cnt_cli2ost
-
- dd if=$sample of=$tdir/$alg oflag=direct,dsync bs=$iosize || error "$alg write"
- diff $sample $tdir/$alg || error "$alg read"
- done
-
- rm -rf $tdir
- rm -f $sample
-}
-run_test 50 "verify bulk hash algorithms works"
-
-test_51() {
- local s1=$TMP/sanity-gss-9.1
- local s2=$TMP/sanity-gss-9.2
- local s3=$TMP/sanity-gss-9.3
- local s4=$TMP/sanity-gss-9.4
- local tdir=$MOUNT/dir9
- local s1_size=4194304 # n * pagesize (4M)
- local s2_size=512 # n * blksize
- local s3_size=111 # n * blksize + m
- local s4_size=5 # m
- local cipher_algs="arc4 aes128 aes192 aes256 cast128 cast256 twofish128 twofish256"
-
- # create sample files for each situation
- rm -f $s1 $s2 $s2 $s4
- dd if=/dev/urandom of=$s1 bs=1M count=4 || error
- dd if=/dev/urandom of=$s2 bs=$s2_size count=1 || error
- dd if=/dev/urandom of=$s3 bs=$s3_size count=1 || error
- dd if=/dev/urandom of=$s4 bs=$s4_size count=1 || error
-
- rm -rf $tdir
- mkdir $tdir || error "create dir $tdir"
-
- restore_to_default_flavor
-
- #
- # different bulk data alignment will lead to different behavior of
- # the implementation: (n > 0; 0 < m < encryption_block_size)
- # - full page i/o
- # - partial page, size = n * encryption_block_size
- # - partial page, size = n * encryption_block_size + m
- # - partial page, size = m
- #
- for alg in $cipher_algs; do
- echo "Testing $alg..."
- flavor=krb5p-bulkp:sha1/$alg
- set_rule $FSNAME any cli2ost $flavor
- wait_flavor cli2ost $flavor $cnt_cli2ost
-
- # sync write
- dd if=$s1 of=$tdir/$alg.1 oflag=dsync bs=1M || error "write $alg.1"
- dd if=$s2 of=$tdir/$alg.2 oflag=dsync || error "write $alg.2"
- dd if=$s3 of=$tdir/$alg.3 oflag=dsync || error "write $alg.3"
- dd if=$s4 of=$tdir/$alg.4 oflag=dsync || error "write $alg.4"
-
- # remount client
- umount_client $MOUNT
- umount_client $MOUNT2
- mount_client $MOUNT
- mount_client $MOUNT2
-
- # read & compare
- diff $tdir/$alg.1 $s1 || error "read $alg.1"
- diff $tdir/$alg.2 $s2 || error "read $alg.2"
- diff $tdir/$alg.3 $s3 || error "read $alg.3"
- diff $tdir/$alg.4 $s4 || error "read $alg.4"
- done
-
- rm -rf $tdir
- rm -f $sample
-}
-run_test 51 "bulk data alignment test under encryption mode"
-
test_90() {
if [ "$SLOW" = "no" ]; then
total=10
}
test_77a() { # bug 10889
+ $GSS && skip "could not run with gss" && return
[ ! -f $F77_TMP ] && setup_f77
set_checksums 1
dd if=$F77_TMP of=$DIR/$tfile bs=1M count=$F77SZ || error "dd error"
run_test 77a "normal checksum read/write operation ============="
test_77b() { # bug 10889
+ $GSS && skip "could not run with gss" && return
[ ! -f $F77_TMP ] && setup_f77
#define OBD_FAIL_OSC_CHECKSUM_SEND 0x409
lctl set_param fail_loc=0x80000409
run_test 77b "checksum error on client write ===================="
test_77c() { # bug 10889
+ $GSS && skip "could not run with gss" && return
[ ! -f $DIR/f77b ] && skip "requires 77b - skipping" && return
set_checksums 1
for algo in $CKSUM_TYPES; do
run_test 77c "checksum error on client read ==================="
test_77d() { # bug 10889
+ $GSS && skip "could not run with gss" && return
#define OBD_FAIL_OSC_CHECKSUM_SEND 0x409
lctl set_param fail_loc=0x80000409
set_checksums 1
run_test 77d "checksum error on OST direct write ==============="
test_77e() { # bug 10889
+ $GSS && skip "could not run with gss" && return
[ ! -f $DIR/f77 ] && skip "requires 77d - skipping" && return
#define OBD_FAIL_OSC_CHECKSUM_RECEIVE 0x408
lctl set_param fail_loc=0x80000408
run_test 77e "checksum error on OST direct read ================"
test_77f() { # bug 10889
+ $GSS && skip "could not run with gss" && return
set_checksums 1
for algo in $CKSUM_TYPES; do
cancel_lru_locks osc
run_test 77f "repeat checksum error on write (expect error) ===="
test_77g() { # bug 10889
+ $GSS && skip "could not run with gss" && return
remote_ost_nodsh && skip "remote OST with nodsh" && return
[ ! -f $F77_TMP ] && setup_f77
run_test 77g "checksum error on OST write ======================"
test_77h() { # bug 10889
+ $GSS && skip "could not run with gss" && return
remote_ost_nodsh && skip "remote OST with nodsh" && return
[ ! -f $DIR/f77g ] && skip "requires 77g - skipping" && return
run_test 77h "checksum error on OST read ======================="
test_77i() { # bug 13805
+ $GSS && skip "could not run with gss" && return
#define OBD_FAIL_OSC_CONNECT_CKSUM 0x40b
lctl set_param fail_loc=0x40b
remount_client $MOUNT
run_test 77i "client not supporting OSD_CONNECT_CKSUM =========="
test_77j() { # bug 13805
+ $GSS && skip "could not run with gss" && return
#define OBD_FAIL_OSC_CKSUM_ADLER_ONLY 0x40c
lctl set_param fail_loc=0x40c
remount_client $MOUNT
init_gss() {
if $GSS; then
start_gss_daemons
+
+ if [ -n "$LGSS_KEYRING_DEBUG" ]; then
+ echo $LGSS_KEYRING_DEBUG > /proc/fs/lustre/sptlrpc/gss/lgss_keyring/debug_level
+ fi
fi
}