AC_DEFUN([LC_CONFIG_SRCDIR],
[AC_CONFIG_SRCDIR([lustre/obdclass/obdo.c])
])
-
+
#
# LC_PATH_DEFAULTS
#
LC_CONFIG_GSS_KEYRING
LC_CONFIG_SUNRPC
- LB_LINUX_CONFIG_IM([CRYPTO_DES],[],
- [AC_MSG_WARN([kernel DES support is recommended by using GSS.])])
LB_LINUX_CONFIG_IM([CRYPTO_MD5],[],
[AC_MSG_WARN([kernel MD5 support is recommended by using GSS.])])
+ LB_LINUX_CONFIG_IM([CRYPTO_SHA1],[],
+ [AC_MSG_WARN([kernel SHA1 support is recommended by using GSS.])])
LB_LINUX_CONFIG_IM([CRYPTO_SHA256],[],
[AC_MSG_WARN([kernel SHA256 support is recommended by using GSS.])])
LB_LINUX_CONFIG_IM([CRYPTO_SHA512],[],
[AC_MSG_WARN([kernel SHA512 support is recommended by using GSS.])])
+ LB_LINUX_CONFIG_IM([CRYPTO_WP512],[],
+ [AC_MSG_WARN([kernel WP512 support is recommended by using GSS.])])
LB_LINUX_CONFIG_IM([CRYPTO_ARC4],[],
[AC_MSG_WARN([kernel ARC4 support is recommended by using GSS.])])
-
+ LB_LINUX_CONFIG_IM([CRYPTO_DES],[],
+ [AC_MSG_WARN([kernel DES support is recommended by using GSS.])])
+ LB_LINUX_CONFIG_IM([CRYPTO_TWOFISH],[],
+ [AC_MSG_WARN([kernel TWOFISH support is recommended by using GSS.])])
+ LB_LINUX_CONFIG_IM([CRYPTO_CAST6],[],
+ [AC_MSG_WARN([kernel CAST6 support is recommended by using GSS.])])
dnl FIXME
dnl the AES symbol usually tied with arch, e.g. CRYPTO_AES_586
dnl FIXME
# utils/llverfs.c
AC_CHECK_HEADERS([ext2fs/ext2fs.h])
+# include/linux/obd_support.h
+AC_CHECK_HEADERS([zlib.h])
+
+# check for -lz support
+AC_CHECK_LIB(z, [adler32],
+ [
+ ZLIB="-lz"
+ AC_DEFINE([HAVE_ADLER], 1, [support alder32 checksum type])
+ ],
+ [
+ ZLIB=""
+ AC_MSG_WARN([No zlib-devel package found, unable to use adler32 checksum])
+ ])
+AC_SUBST(ZLIB)
+
# Super safe df
AC_ARG_ENABLE([mindf],
AC_HELP_STRING([--enable-mindf],
spin_unlock(lock);
}
+#if defined(__KERNEL__) && !defined(HAVE_ADLER)
+/* zlib_adler() is an inline function defined in zutil.h */
+#define HAVE_ADLER
+#endif
#endif /* __LINUX_OBD_H */
#endif
#ifdef __KERNEL__
+# include <linux/zutil.h>
+# ifndef HAVE_ADLER
+# define HAVE_ADLER
+# endif
+#else /* ! __KERNEL__ */
+# ifdef HAVE_ADLER
+# include <zlib.h>
+
+static inline __u32 zlib_adler32(__u32 adler, unsigned char const *p,
+ size_t len)
+{
+ return adler32(adler, p, len);
+}
+# endif
+#endif /* __KERNEL__ */
+
+#ifdef __KERNEL__
# include <linux/types.h>
# include <linux/blkdev.h>
# include <lvfs.h>
struct sptlrpc_flavor {
__u16 sf_rpc; /* rpc flavor */
- __u8 sf_bulk_priv; /* bulk encrypt alg */
- __u8 sf_bulk_csum; /* bulk checksum alg */
+ __u8 sf_bulk_ciph; /* bulk cipher alg */
+ __u8 sf_bulk_hash; /* bulk hash alg */
__u32 sf_flags; /* general flags */
};
/*
* bulk flavors
*/
-enum bulk_checksum_alg {
- BULK_CSUM_ALG_NULL = 0,
- BULK_CSUM_ALG_CRC32,
- BULK_CSUM_ALG_MD5,
- BULK_CSUM_ALG_SHA1,
- BULK_CSUM_ALG_SHA256,
- BULK_CSUM_ALG_SHA384,
- BULK_CSUM_ALG_SHA512,
- BULK_CSUM_ALG_MAX
+enum sptlrpc_bulk_hash_alg {
+ BULK_HASH_ALG_NULL = 0,
+ BULK_HASH_ALG_ADLER32,
+ BULK_HASH_ALG_CRC32,
+ BULK_HASH_ALG_MD5,
+ BULK_HASH_ALG_SHA1,
+ BULK_HASH_ALG_SHA256,
+ BULK_HASH_ALG_SHA384,
+ BULK_HASH_ALG_SHA512,
+ BULK_HASH_ALG_WP256,
+ BULK_HASH_ALG_WP384,
+ BULK_HASH_ALG_WP512,
+ BULK_HASH_ALG_MAX
};
-enum bulk_encrypt_alg {
- BULK_PRIV_ALG_NULL = 0,
- BULK_PRIV_ALG_ARC4,
- BULK_PRIV_ALG_MAX
+enum sptlrpc_bulk_cipher_alg {
+ BULK_CIPH_ALG_NULL = 0,
+ BULK_CIPH_ALG_ARC4,
+ BULK_CIPH_ALG_AES128,
+ BULK_CIPH_ALG_AES192,
+ BULK_CIPH_ALG_AES256,
+ BULK_CIPH_ALG_CAST128,
+ BULK_CIPH_ALG_CAST256,
+ BULK_CIPH_ALG_TWOFISH128,
+ BULK_CIPH_ALG_TWOFISH256,
+ BULK_CIPH_ALG_MAX
};
+struct sptlrpc_hash_type {
+ char *sht_name;
+ char *sht_tfm_name;
+ unsigned int sht_size;
+};
+
+struct sptlrpc_ciph_type {
+ char *sct_name;
+ char *sct_tfm_name;
+ __u32 sct_tfm_flags;
+ unsigned int sct_ivsize;
+ unsigned int sct_keysize;
+};
+
+const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg);
+const char * sptlrpc_get_hash_name(__u8 hash_alg);
+const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg);
+const char *sptlrpc_get_ciph_name(__u8 ciph_alg);
+
+#define CIPHER_MAX_BLKSIZE (16)
+#define CIPHER_MAX_KEYSIZE (64)
+
struct ptlrpc_bulk_sec_desc {
- __u32 bsd_version;
- __u8 bsd_csum_alg; /* checksum algorithm */
- __u8 bsd_priv_alg; /* encrypt algorithm */
- __u16 bsd_pad;
- __u8 bsd_iv[16]; /* encrypt iv */
+ __u8 bsd_version;
+ __u8 bsd_flags;
+ __u8 bsd_pad[4];
+ __u8 bsd_hash_alg; /* hash algorithm */
+ __u8 bsd_ciph_alg; /* cipher algorithm */
+ __u8 bsd_key[CIPHER_MAX_KEYSIZE]; /* encrypt key seed */
__u8 bsd_csum[0];
};
-const char * sptlrpc_bulk_csum_alg2name(__u8 csum_alg);
-const char * sptlrpc_bulk_priv_alg2name(__u8 priv_alg);
-__u32 sptlrpc_bulk_priv_alg2flags(__u8 priv_alg);
/*
* lprocfs
int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset);
/* bulk helpers (internal use only by policies) */
-int bulk_sec_desc_size(__u8 csum_alg, int request, int read);
+int bulk_sec_desc_size(__u8 hash_alg, int request, int read);
int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset);
int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
LASSERT(exp->exp_connection);
if (data) {
- LASSERT((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
- ocd->ocd_connect_flags);
+ LASSERTF((ocd->ocd_connect_flags & data->ocd_connect_flags) ==
+ ocd->ocd_connect_flags, "old "LPX64", new "LPX64"\n",
+ data->ocd_connect_flags, ocd->ocd_connect_flags);
data->ocd_connect_flags = ocd->ocd_connect_flags;
}
if test x$OS = xAIX; then
$LD -shared -o $CWD/liblustre.so $ALL_OBJS -lpthread -Xlinker -bnoipath ../../libsyscall.so
else
-$LD -shared -nostdlib -o $CWD/liblustre.so $ALL_OBJS $CAP_LIBS $PTHREAD_LIBS
+$LD -shared -nostdlib -o $CWD/liblustre.so $ALL_OBJS $CAP_LIBS $PTHREAD_LIBS $ZLIB
fi
rm -rf $sysio_tmp
AM_CFLAGS = $(LLCFLAGS)
AM_LIBS = $(LIBEFENCE) $(LIBREADLINE)
-LLIB_EXEC = $(top_builddir)/lustre/utils/liblustreapi.a $(top_builddir)/lustre/liblustre/liblustre.a $(CAP_LIBS) $(PTHREAD_LIBS)
+LLIB_EXEC = $(top_builddir)/lustre/utils/liblustreapi.a $(top_builddir)/lustre/liblustre/liblustre.a $(CAP_LIBS) $(PTHREAD_LIBS) $(ZLIB)
if LIBLUSTRE
noinst_LIBRARIES = libtestcommon.a
echo_test_SOURCES = echo_test.c $(top_srcdir)/lustre/utils/parser.c $(top_srcdir)/lustre/utils/obd.c $(top_srcdir)/lustre/utils/lustre_cfg.c
echo_test_CFLAGS = $(LL_CFLAGS)
-echo_test_LDADD = $(top_builddir)/lustre/liblustre/liblsupport.a $(LIBREADLINE) $(CAP_LIBS) $(PTHREAD_LIBS)
+echo_test_LDADD = $(top_builddir)/lustre/liblustre/liblsupport.a $(LIBREADLINE) $(CAP_LIBS) $(PTHREAD_LIBS) $(ZLIB)
echo_test_DEPENDENCIES=$(top_builddir)/lustre/liblustre/liblsupport.a
sanity_SOURCES = sanity.c
/* size[REQ_REC_OFF] still sizeof (*body) */
if (opc == OST_WRITE) {
if (unlikely(cli->cl_checksum) &&
- req->rq_flvr.sf_bulk_csum == BULK_CSUM_ALG_NULL) {
+ req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL) {
body->oa.o_valid |= OBD_MD_FLCKSUM;
body->oa.o_cksum = osc_checksum_bulk(requested_nob,
page_count, pga,
sizeof(__u32) * niocount);
} else {
if (unlikely(cli->cl_checksum) &&
- req->rq_flvr.sf_bulk_csum == BULK_CSUM_ALG_NULL)
+ req->rq_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL)
body->oa.o_valid |= OBD_MD_FLCKSUM;
req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_SERVER, 0);
/* 1 RC for the whole I/O */
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * Copyright (C) 2006 Cluster File Systems, Inc.
+ * Copyright (C) 2008 Sun Microsystems. Inc.
+ * Author: Eric Mei <eric.mei@sun.com>
+ * Copyright (C) 2006,2007 Cluster File Systems, Inc.
* Author: Eric Mei <ericm@clusterfs.com>
*
* This file is part of Lustre, http://www.lustre.org.
#include "gss_internal.h"
#include "gss_api.h"
-static
-int do_bulk_privacy(struct gss_ctx *gctx,
- struct ptlrpc_bulk_desc *desc,
- int encrypt, __u32 alg,
- struct ptlrpc_bulk_sec_desc *bsd)
+static __u8 zero_iv[CIPHER_MAX_BLKSIZE] = { 0, };
+
+static void buf_to_sl(struct scatterlist *sl,
+ void *buf, unsigned int len)
+{
+ sl->page = virt_to_page(buf);
+ sl->offset = offset_in_page(buf);
+ sl->length = len;
+}
+
+/*
+ * CTS CBC encryption:
+ * 1. X(n-1) = P(n-1)
+ * 2. E(n-1) = Encrypt(K, X(n-1))
+ * 3. C(n) = HEAD(E(n-1))
+ * 4. P = P(n) | 0
+ * 5. D(n) = E(n-1) XOR P
+ * 6. C(n-1) = Encrypt(K, D(n))
+ *
+ * CTS encryption using standard CBC interface:
+ * 1. pad the last partial block with 0.
+ * 2. do CBC encryption.
+ * 3. swap the last two ciphertext blocks.
+ * 4. truncate to original plaintext size.
+ */
+static int cbc_cts_encrypt(struct crypto_tfm *tfm,
+ struct scatterlist *sld,
+ struct scatterlist *sls)
+{
+ struct scatterlist slst, sldt;
+ void *data;
+ __u8 sbuf[CIPHER_MAX_BLKSIZE];
+ __u8 dbuf[CIPHER_MAX_BLKSIZE];
+ unsigned int blksize, blks, tail;
+ int rc;
+
+ blksize = crypto_tfm_alg_blocksize(tfm);
+ blks = sls->length / blksize;
+ tail = sls->length % blksize;
+ LASSERT(blks > 0 && tail > 0);
+
+ /* pad tail block with 0, copy to sbuf */
+ data = cfs_kmap(sls->page);
+ memcpy(sbuf, data + sls->offset + blks * blksize, tail);
+ memset(sbuf + tail, 0, blksize - tail);
+ cfs_kunmap(sls->page);
+
+ buf_to_sl(&slst, sbuf, blksize);
+ buf_to_sl(&sldt, dbuf, blksize);
+
+ /* encrypt head */
+ rc = crypto_cipher_encrypt(tfm, sld, sls, sls->length - tail);
+ if (unlikely(rc)) {
+ CERROR("encrypt head (%u) data: %d\n", sls->length - tail, rc);
+ return rc;
+ }
+ /* encrypt tail */
+ rc = crypto_cipher_encrypt(tfm, &sldt, &slst, blksize);
+ if (unlikely(rc)) {
+ CERROR("encrypt tail (%u) data: %d\n", slst.length, rc);
+ return rc;
+ }
+
+ /* swab C(n) and C(n-1), if n == 1, then C(n-1) is the IV */
+ data = cfs_kmap(sld->page);
+
+ memcpy(data + sld->offset + blks * blksize,
+ data + sld->offset + (blks - 1) * blksize, tail);
+ memcpy(data + sld->offset + (blks - 1) * blksize, dbuf, blksize);
+ cfs_kunmap(sld->page);
+
+ return 0;
+}
+
+/*
+ * CTS CBC decryption:
+ * 1. D(n) = Decrypt(K, C(n-1))
+ * 2. C = C(n) | 0
+ * 3. X(n) = D(n) XOR C
+ * 4. P(n) = HEAD(X(n))
+ * 5. E(n-1) = C(n) | TAIL(X(n))
+ * 6. X(n-1) = Decrypt(K, E(n-1))
+ * 7. P(n-1) = X(n-1) XOR C(n-2)
+ *
+ * CTS decryption using standard CBC interface:
+ * 1. D(n) = Decrypt(K, C(n-1))
+ * 2. C(n) = C(n) | TAIL(D(n))
+ * 3. swap the last two ciphertext blocks.
+ * 4. do CBC decryption.
+ * 5. truncate to original ciphertext size.
+ */
+static int cbc_cts_decrypt(struct crypto_tfm *tfm,
+ struct scatterlist *sld,
+ struct scatterlist *sls)
+{
+ struct scatterlist slst, sldt;
+ void *data;
+ __u8 sbuf[CIPHER_MAX_BLKSIZE];
+ __u8 dbuf[CIPHER_MAX_BLKSIZE];
+ unsigned int blksize, blks, tail;
+ int rc;
+
+ blksize = crypto_tfm_alg_blocksize(tfm);
+ blks = sls->length / blksize;
+ tail = sls->length % blksize;
+ LASSERT(blks > 0 && tail > 0);
+
+ /* save current IV, and set IV to zero */
+ crypto_cipher_get_iv(tfm, sbuf, blksize);
+ crypto_cipher_set_iv(tfm, zero_iv, blksize);
+
+ /* D(n) = Decrypt(K, C(n-1)) */
+ slst = *sls;
+ slst.offset += (blks - 1) * blksize;
+ slst.length = blksize;
+
+ buf_to_sl(&sldt, dbuf, blksize);
+
+ rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
+ if (unlikely(rc)) {
+ CERROR("decrypt C(n-1) (%u): %d\n", slst.length, rc);
+ return rc;
+ }
+
+ /* restore IV */
+ crypto_cipher_set_iv(tfm, sbuf, blksize);
+
+ data = cfs_kmap(sls->page);
+ /* C(n) = C(n) | TAIL(D(n)) */
+ memcpy(dbuf, data + sls->offset + blks * blksize, tail);
+ /* swab C(n) and C(n-1) */
+ memcpy(sbuf, data + sls->offset + (blks - 1) * blksize, blksize);
+ memcpy(data + sls->offset + (blks - 1) * blksize, dbuf, blksize);
+ cfs_kunmap(sls->page);
+
+ /* do cbc decrypt */
+ buf_to_sl(&slst, sbuf, blksize);
+ buf_to_sl(&sldt, dbuf, blksize);
+
+ /* decrypt head */
+ rc = crypto_cipher_decrypt(tfm, sld, sls, sls->length - tail);
+ if (unlikely(rc)) {
+ CERROR("decrypt head (%u) data: %d\n", sls->length - tail, rc);
+ return rc;
+ }
+ /* decrypt tail */
+ rc = crypto_cipher_decrypt(tfm, &sldt, &slst, blksize);
+ if (unlikely(rc)) {
+ CERROR("decrypt tail (%u) data: %d\n", slst.length, rc);
+ return rc;
+ }
+
+ /* truncate to original ciphertext size */
+ data = cfs_kmap(sld->page);
+ memcpy(data + sld->offset + blks * blksize, dbuf, tail);
+ cfs_kunmap(sld->page);
+
+ return 0;
+}
+
+static inline int do_cts_tfm(struct crypto_tfm *tfm,
+ int encrypt,
+ struct scatterlist *sld,
+ struct scatterlist *sls)
+{
+ LASSERT(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC);
+
+ if (encrypt)
+ return cbc_cts_encrypt(tfm, sld, sls);
+ else
+ return cbc_cts_decrypt(tfm, sld, sls);
+}
+
+/*
+ * normal encrypt/decrypt of data of even blocksize
+ */
+static inline int do_cipher_tfm(struct crypto_tfm *tfm,
+ int encrypt,
+ struct scatterlist *sld,
+ struct scatterlist *sls)
+{
+ if (encrypt)
+ return crypto_cipher_encrypt(tfm, sld, sls, sls->length);
+ else
+ return crypto_cipher_decrypt(tfm, sld, sls, sls->length);
+}
+
+static struct crypto_tfm *get_stream_cipher(__u8 *key, unsigned int keylen)
+{
+ const struct sptlrpc_ciph_type *ct;
+ struct crypto_tfm *tfm;
+ int rc;
+
+ /* using ARC4, the only stream cipher in linux for now */
+ ct = sptlrpc_get_ciph_type(BULK_CIPH_ALG_ARC4);
+ LASSERT(ct);
+
+ tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
+ if (tfm == NULL) {
+ CERROR("Failed to allocate stream TFM %s\n", ct->sct_name);
+ return NULL;
+ }
+ LASSERT(crypto_tfm_alg_blocksize(tfm));
+
+ if (keylen > ct->sct_keysize)
+ keylen = ct->sct_keysize;
+
+ LASSERT(keylen >= crypto_tfm_alg_min_keysize(tfm));
+ LASSERT(keylen <= crypto_tfm_alg_max_keysize(tfm));
+
+ rc = crypto_cipher_setkey(tfm, key, keylen);
+ if (rc) {
+ CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
+ crypto_free_tfm(tfm);
+ return NULL;
+ }
+
+ return tfm;
+}
+
+static int do_bulk_privacy(struct gss_ctx *gctx,
+ struct ptlrpc_bulk_desc *desc,
+ int encrypt, __u32 alg,
+ struct ptlrpc_bulk_sec_desc *bsd)
{
+ const struct sptlrpc_ciph_type *ct = sptlrpc_get_ciph_type(alg);
struct crypto_tfm *tfm;
- struct scatterlist sg, sg2, *sgd;
- unsigned int blksize;
+ struct crypto_tfm *stfm = NULL; /* backup stream cipher */
+ struct scatterlist sls, sld, *sldp;
+ unsigned int blksize, keygen_size;
int i, rc;
- __u8 local_iv[sizeof(bsd->bsd_iv)];
+ __u8 key[CIPHER_MAX_KEYSIZE];
- LASSERT(alg < BULK_PRIV_ALG_MAX);
+ LASSERT(ct);
if (encrypt)
- bsd->bsd_priv_alg = BULK_PRIV_ALG_NULL;
+ bsd->bsd_ciph_alg = BULK_CIPH_ALG_NULL;
- if (alg == BULK_PRIV_ALG_NULL)
+ if (alg == BULK_CIPH_ALG_NULL)
return 0;
- tfm = crypto_alloc_tfm(sptlrpc_bulk_priv_alg2name(alg),
- sptlrpc_bulk_priv_alg2flags(alg));
+ if (desc->bd_iov_count <= 0) {
+ if (encrypt)
+ bsd->bsd_ciph_alg = alg;
+ return 0;
+ }
+
+ tfm = crypto_alloc_tfm(ct->sct_tfm_name, ct->sct_tfm_flags);
if (tfm == NULL) {
- CERROR("Failed to allocate TFM %s\n",
- sptlrpc_bulk_priv_alg2name(alg));
+ CERROR("Failed to allocate TFM %s\n", ct->sct_name);
return -ENOMEM;
}
-
blksize = crypto_tfm_alg_blocksize(tfm);
- LASSERT(blksize <= sizeof(local_iv));
- if (encrypt)
- get_random_bytes(bsd->bsd_iv, sizeof(bsd->bsd_iv));
+ LASSERT(crypto_tfm_alg_max_keysize(tfm) >= ct->sct_keysize);
+ LASSERT(crypto_tfm_alg_min_keysize(tfm) <= ct->sct_keysize);
+ LASSERT(ct->sct_ivsize == 0 ||
+ crypto_tfm_alg_ivsize(tfm) == ct->sct_ivsize);
+ LASSERT(ct->sct_keysize <= CIPHER_MAX_KEYSIZE);
+ LASSERT(blksize <= CIPHER_MAX_BLKSIZE);
+
+ /* generate ramdom key seed and compute the secret key based on it.
+ * note determined by algorithm which lgss_plain_encrypt use, it
+ * might require the key size be its (blocksize * n). so here for
+ * simplicity, we force it's be n * MAX_BLKSIZE by padding 0 */
+ keygen_size = (ct->sct_keysize + CIPHER_MAX_BLKSIZE - 1) &
+ ~(CIPHER_MAX_BLKSIZE - 1);
+ if (encrypt) {
+ get_random_bytes(bsd->bsd_key, ct->sct_keysize);
+ if (ct->sct_keysize < keygen_size)
+ memset(bsd->bsd_key + ct->sct_keysize, 0,
+ keygen_size - ct->sct_keysize);
+ }
- /* compute the secret iv */
- rc = lgss_plain_encrypt(gctx, 0,
- sizeof(local_iv), bsd->bsd_iv, local_iv);
+ rc = lgss_plain_encrypt(gctx, 0, keygen_size, bsd->bsd_key, key);
if (rc) {
- CERROR("failed to compute secret iv: %d\n", rc);
+ CERROR("failed to compute secret key: %d\n", rc);
goto out;
}
- rc = crypto_cipher_setkey(tfm, local_iv, sizeof(local_iv));
+ rc = crypto_cipher_setkey(tfm, key, ct->sct_keysize);
if (rc) {
- CERROR("Failed to set key for TFM %s: %d\n",
- sptlrpc_bulk_priv_alg2name(alg), rc);
+ CERROR("Failed to set key for TFM %s: %d\n", ct->sct_name, rc);
goto out;
}
+ /* stream cipher doesn't need iv */
+ if (blksize > 1)
+ crypto_cipher_set_iv(tfm, zero_iv, blksize);
+
for (i = 0; i < desc->bd_iov_count; i++) {
- sg.page = desc->bd_iov[i].kiov_page;
- sg.offset = desc->bd_iov[i].kiov_offset;
- sg.length = desc->bd_iov[i].kiov_len;
+ sls.page = desc->bd_iov[i].kiov_page;
+ sls.offset = desc->bd_iov[i].kiov_offset;
+ sls.length = desc->bd_iov[i].kiov_len;
+
+ if (unlikely(sls.length == 0)) {
+ CWARN("page %d with 0 length data?\n", i);
+ continue;
+ }
+
+ if (unlikely(sls.offset % blksize)) {
+ CERROR("page %d with odd offset %u, TFM %s\n",
+ i, sls.offset, ct->sct_name);
+ rc = -EINVAL;
+ goto out;
+ }
if (desc->bd_enc_pages) {
- sg2.page = desc->bd_enc_pages[i];
- sg2.offset = desc->bd_iov[i].kiov_offset;
- sg2.length = desc->bd_iov[i].kiov_len;
+ sld.page = desc->bd_enc_pages[i];
+ sld.offset = desc->bd_iov[i].kiov_offset;
+ sld.length = desc->bd_iov[i].kiov_len;
- sgd = &sg2;
- } else
- sgd = &sg;
+ sldp = &sld;
+ } else {
+ sldp = &sls;
+ }
- if (encrypt)
- rc = crypto_cipher_encrypt(tfm, sgd, &sg, sg.length);
- else
- rc = crypto_cipher_decrypt(tfm, sgd, &sg, sg.length);
+ if (likely(sls.length % blksize == 0)) {
+ /* data length is n * blocksize, do the normal tfm */
+ rc = do_cipher_tfm(tfm, encrypt, sldp, &sls);
+ } else if (sls.length < blksize) {
+ /* odd data length, and smaller than 1 block, CTS
+ * doesn't work in this case because it requires
+ * transfer a modified IV to peer. here we use a
+ * "backup" stream cipher to do the tfm */
+ if (stfm == NULL) {
+ stfm = get_stream_cipher(key, ct->sct_keysize);
+ if (tfm == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+ rc = do_cipher_tfm(stfm, encrypt, sldp, &sls);
+ } else {
+ /* odd data length but > 1 block, do CTS tfm */
+ rc = do_cts_tfm(tfm, encrypt, sldp, &sls);
+ }
- LASSERT(rc == 0);
+ if (unlikely(rc)) {
+ CERROR("error %s page %d/%d: %d\n",
+ encrypt ? "encrypt" : "decrypt",
+ i + 1, desc->bd_iov_count, rc);
+ goto out;
+ }
if (desc->bd_enc_pages)
desc->bd_iov[i].kiov_page = desc->bd_enc_pages[i];
-
- /* although the procedure might be lengthy, the crypto functions
- * internally called cond_resched() from time to time.
- */
}
if (encrypt)
- bsd->bsd_priv_alg = alg;
+ bsd->bsd_ciph_alg = alg;
out:
+ if (stfm)
+ crypto_free_tfm(stfm);
+
crypto_free_tfm(tfm);
return rc;
}
/* make checksum */
rc = bulk_csum_cli_request(desc, req->rq_bulk_read,
- req->rq_flvr.sf_bulk_csum, msg, offset);
+ req->rq_flvr.sf_bulk_hash, msg, offset);
if (rc) {
CERROR("client bulk %s: failed to generate checksum: %d\n",
req->rq_bulk_read ? "read" : "write", rc);
RETURN(rc);
}
- if (req->rq_flvr.sf_bulk_priv == BULK_PRIV_ALG_NULL)
+ if (req->rq_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
RETURN(0);
/* previous bulk_csum_cli_request() has verified bsdr is good */
bsdr = lustre_msg_buf(msg, offset, 0);
if (req->rq_bulk_read) {
- bsdr->bsd_priv_alg = req->rq_flvr.sf_bulk_priv;
+ bsdr->bsd_ciph_alg = req->rq_flvr.sf_bulk_ciph;
RETURN(0);
}
LASSERT(gctx->gc_mechctx);
rc = do_bulk_privacy(gctx->gc_mechctx, desc, 1,
- req->rq_flvr.sf_bulk_priv, bsdr);
+ req->rq_flvr.sf_bulk_ciph, bsdr);
if (rc)
CERROR("bulk write: client failed to encrypt pages\n");
if (req->rq_bulk_read) {
bsdr = lustre_msg_buf(rmsg, roff, 0);
- if (bsdr->bsd_priv_alg == BULK_PRIV_ALG_NULL)
+ if (bsdr->bsd_ciph_alg == BULK_CIPH_ALG_NULL)
goto verify_csum;
bsdv = lustre_msg_buf(vmsg, voff, 0);
- if (bsdr->bsd_priv_alg != bsdv->bsd_priv_alg) {
+ if (bsdr->bsd_ciph_alg != bsdv->bsd_ciph_alg) {
CERROR("bulk read: cipher algorithm mismatch: client "
"request %s but server reply with %s. try to "
"use the new one for decryption\n",
- sptlrpc_bulk_priv_alg2name(bsdr->bsd_priv_alg),
- sptlrpc_bulk_priv_alg2name(bsdv->bsd_priv_alg));
+ sptlrpc_get_ciph_name(bsdr->bsd_ciph_alg),
+ sptlrpc_get_ciph_name(bsdv->bsd_ciph_alg));
}
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
LASSERT(gctx->gc_mechctx);
rc = do_bulk_privacy(gctx->gc_mechctx, desc, 0,
- bsdv->bsd_priv_alg, bsdv);
+ bsdv->bsd_ciph_alg, bsdv);
if (rc) {
CERROR("bulk read: client failed to decrypt data\n");
RETURN(rc);
LASSERT(grctx->src_ctx->gsc_mechctx);
/* decrypt bulk data if it's encrypted */
- if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
+ if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 0,
- grctx->src_reqbsd->bsd_priv_alg,
+ grctx->src_reqbsd->bsd_ciph_alg,
grctx->src_reqbsd);
if (rc) {
CERROR("bulk write: server failed to decrypt data\n");
RETURN(rc);
/* encrypt bulk data if required */
- if (grctx->src_reqbsd->bsd_priv_alg != BULK_PRIV_ALG_NULL) {
+ if (grctx->src_reqbsd->bsd_ciph_alg != BULK_CIPH_ALG_NULL) {
rc = do_bulk_privacy(grctx->src_ctx->gsc_mechctx, desc, 1,
- grctx->src_reqbsd->bsd_priv_alg,
+ grctx->src_reqbsd->bsd_ciph_alg,
grctx->src_repbsd);
if (rc)
CERROR("bulk read: server failed to encrypt data: "
req->rq_phase = RQ_PHASE_RPC;
rc = ptl_send_rpc(req, 1);
- if (rc) {
- CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
- rc);
- }
+ if (rc)
+ CWARN("ctx %p(%u->%s): rpc error %d, destroy locally\n", ctx,
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), rc);
out_ref:
ptlrpc_req_finished(req);
{
struct hlist_node *pos, *next;
struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx;
hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
hlist_del_init(&ctx->cc_cache);
+ /* reverse ctx: update current seq to buddy svcctx if exist.
+ * ideally this should be done at gss_cli_ctx_finalize(), but
+ * the ctx destroy could be delayed by:
+ * 1) ctx still has reference;
+ * 2) ctx destroy is asynchronous;
+ * and reverse import call inval_all_ctx() require this be done
+ *_immediately_ otherwise newly created reverse ctx might copy
+ * the very old sequence number from svcctx. */
+ gctx = ctx2gctx(ctx);
+ if (!rawobj_empty(&gctx->gc_svc_handle) &&
+ sec_is_reverse(gctx->gc_base.cc_sec)) {
+ gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
+ (__u32) atomic_read(&gctx->gc_seq));
+ }
+
/* we need to wakeup waiting reqs here. the context might
* be forced released before upcall finished, then the
* late-arrived downcall can't find the ctx even. */
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
- CWARN("ctx %p(%d)\n", ctx, atomic_read(&ctx->cc_refcount));
cli_ctx_expire(ctx);
kill_ctx_kr(ctx);
}
}
if (!rawobj_empty(&gctx->gc_svc_handle)) {
- /* forward ctx: mark buddy reverse svcctx soon-expire.
- * reverse ctx: update current seq to buddy svcctx. */
- if (!sec_is_reverse(gctx->gc_base.cc_sec))
+ /* forward ctx: mark buddy reverse svcctx soon-expire. */
+ if (!sec_is_reverse(gctx->gc_base.cc_sec) &&
+ !rawobj_empty(&gctx->gc_svc_handle))
gss_svc_upcall_expire_rvs_ctx(&gctx->gc_svc_handle);
- else
- gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
- (__u32) atomic_read(&gctx->gc_seq));
rawobj_free(&gctx->gc_svc_handle);
}
errhdr = (struct gss_err_header *) ghdr;
+ CWARN("req x"LPU64"/t"LPU64", ctx %p idx "LPX64"(%u->%s): "
+ "%sserver respond (%08x/%08x)\n",
+ req->rq_xid, req->rq_transno, ctx,
+ gss_handle_to_u64(&ctx2gctx(ctx)->gc_handle),
+ ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
+ sec_is_reverse(ctx->cc_sec) ? "reverse" : "",
+ errhdr->gh_major, errhdr->gh_minor);
+
+ /* context fini rpc, let it failed */
+ if (req->rq_ctx_fini) {
+ CWARN("context fini rpc failed\n");
+ return -EINVAL;
+ }
+
+ /* reverse sec, just return error, don't expire this ctx because it's
+ * crucial to callback rpcs. note if the callback rpc failed because
+ * of bit flip during network transfer, the client will be evicted
+ * directly. so more gracefully we probably want let it retry for
+ * number of times. */
+ if (sec_is_reverse(ctx->cc_sec))
+ return -EINVAL;
+
+ if (errhdr->gh_major != GSS_S_NO_CONTEXT &&
+ errhdr->gh_major != GSS_S_BAD_SIG)
+ return -EACCES;
+
/* server return NO_CONTEXT might be caused by context expire
- * or server reboot/failover. we refresh the cred transparently
- * to upper layer.
+ * or server reboot/failover. we try to refresh a new ctx which
+ * be transparent to upper layer.
+ *
* In some cases, our gss handle is possible to be incidentally
* identical to another handle since the handle itself is not
* fully random. In krb5 case, the GSS_S_BAD_SIG will be
* returned, maybe other gss error for other mechanism.
*
* if we add new mechanism, make sure the correct error are
- * returned in this case.
- *
- * but in any cases, don't resend ctx destroying rpc, don't resend
- * reverse rpc. */
- if (req->rq_ctx_fini) {
- CWARN("server respond error (%08x/%08x) for ctx fini\n",
- errhdr->gh_major, errhdr->gh_minor);
- rc = -EINVAL;
- } else if (sec_is_reverse(ctx->cc_sec)) {
- CWARN("reverse server respond error (%08x/%08x)\n",
- errhdr->gh_major, errhdr->gh_minor);
- sptlrpc_cli_ctx_expire(ctx);
- rc = -EINVAL;
- } else if (errhdr->gh_major == GSS_S_NO_CONTEXT ||
- errhdr->gh_major == GSS_S_BAD_SIG) {
- CWARN("req x"LPU64"/t"LPU64": server respond ctx %p(%u->%s) "
- "%s, server might lost the context.\n",
- req->rq_xid, req->rq_transno, ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec),
- errhdr->gh_major == GSS_S_NO_CONTEXT ?
- "NO_CONTEXT" : "BAD_SIG");
-
- sptlrpc_cli_ctx_expire(ctx);
-
- /* we need replace the ctx right here, otherwise during
- * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
- * which keep the ctx with RESEND flag, thus we'll never
- * get rid of this ctx. */
- rc = sptlrpc_req_replace_dead_ctx(req);
- if (rc == 0)
- req->rq_resend = 1;
- } else {
- CERROR("req %p: server report gss error (%x/%x)\n",
- req, errhdr->gh_major, errhdr->gh_minor);
- rc = -EACCES;
- }
+ * returned in this case. */
+ CWARN("%s: server might lost the context, retrying\n",
+ errhdr->gh_major == GSS_S_NO_CONTEXT ? "NO_CONTEXT" : "BAD_SIG");
+
+ sptlrpc_cli_ctx_expire(ctx);
+
+ /* we need replace the ctx right here, otherwise during
+ * resent we'll hit the logic in sptlrpc_req_refresh_ctx()
+ * which keep the ctx with RESEND flag, thus we'll never
+ * get rid of this ctx. */
+ rc = sptlrpc_req_replace_dead_ctx(req);
+ if (rc == 0)
+ req->rq_resend = 1;
return rc;
}
req->rq_replen = msg->lm_buflens[1];
if (req->rq_pack_bulk) {
- if (msg->lm_bufcount < 4) {
+ /* FIXME */
+ /* bulk checksum is right after the lustre msg */
+ if (msg->lm_bufcount < 3) {
CERROR("Invalid reply bufcount %u\n",
msg->lm_bufcount);
RETURN(-EPROTO);
}
- /* bulk checksum is the second last segment */
- rc = bulk_sec_desc_unpack(msg, msg->lm_bufcount - 2);
+ rc = bulk_sec_desc_unpack(msg, 2);
}
break;
case PTLRPC_GSS_PROC_ERR:
sec->ps_gc_interval = 0;
}
- if (sec->ps_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL &&
+ if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
sptlrpc_enc_pool_add_user();
class_import_put(sec->ps_import);
- if (sec->ps_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL &&
+ if (sec->ps_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL &&
sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_BULK)
sptlrpc_enc_pool_del_user();
if (req->rq_pack_bulk) {
buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_csum, 1,
+ req->rq_flvr.sf_bulk_hash, 1,
req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
ibuflens[ibufcnt++] = sptlrpc_current_user_desc_size();
if (req->rq_pack_bulk)
ibuflens[ibufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_csum, 1,
+ req->rq_flvr.sf_bulk_hash, 1,
req->rq_bulk_read);
clearsize = lustre_msg_size_v2(ibufcnt, ibuflens);
if (req->rq_pack_bulk) {
buflens[bufcnt] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_csum, 0,
+ req->rq_flvr.sf_bulk_hash, 0,
req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
if (req->rq_pack_bulk) {
buflens[bufcnt++] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_csum, 0,
+ req->rq_flvr.sf_bulk_hash, 0,
req->rq_bulk_read);
}
txtsize = lustre_msg_size_v2(bufcnt, buflens);
if (rc == 0)
RETURN(SECSVC_OK);
- CERROR("svc %u failed: major 0x%08x: ctx %p(%u->%s)\n",
- gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
- libcfs_nid2str(req->rq_peer.nid));
+ CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
+ LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
+ grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
+ grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid));
error:
/* we only notify client in case of NO_CONTEXT/BAD_SIG, which
* might happen after server reboot, to allow recovery. */
bsd_off = ibufcnt;
ibuflens[ibufcnt++] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_csum_alg,
+ grctx->src_reqbsd->bsd_hash_alg,
0, req->rq_bulk_read);
}
bsd_off = bufcnt;
buflens[bufcnt] = bulk_sec_desc_size(
- grctx->src_reqbsd->bsd_csum_alg,
+ grctx->src_reqbsd->bsd_hash_alg,
0, req->rq_bulk_read);
if (svc == SPTLRPC_SVC_INTG)
txtsize += buflens[bufcnt];
{
char *bulk;
- if (sf->sf_bulk_priv != BULK_PRIV_ALG_NULL)
+ if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL)
bulk = "bulkp";
- else if (sf->sf_bulk_csum != BULK_CSUM_ALG_NULL)
+ else if (sf->sf_bulk_hash != BULK_HASH_ALG_NULL)
bulk = "bulki";
else
bulk = "bulkn";
snprintf(buf, bufsize, "%s-%s:%s/%s",
sptlrpc_rpcflavor2name(sf->sf_rpc), bulk,
- sptlrpc_bulk_csum_alg2name(sf->sf_bulk_csum),
- sptlrpc_bulk_priv_alg2name(sf->sf_bulk_priv));
+ sptlrpc_get_hash_name(sf->sf_bulk_hash),
+ sptlrpc_get_ciph_name(sf->sf_bulk_ciph));
return 0;
}
EXPORT_SYMBOL(sptlrpc_flavor2name);
/* bulk security flag */
if ((req->rq_bulk_read || req->rq_bulk_write) &&
- (req->rq_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL ||
- req->rq_flvr.sf_bulk_csum != BULK_CSUM_ALG_NULL))
+ (req->rq_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL ||
+ req->rq_flvr.sf_bulk_hash != BULK_HASH_ALG_NULL))
req->rq_pack_bulk = 1;
}
struct ptlrpc_sec *sec,
struct sptlrpc_flavor *sf)
{
- if (sf->sf_bulk_priv != sec->ps_flvr.sf_bulk_priv ||
- sf->sf_bulk_csum != sec->ps_flvr.sf_bulk_csum) {
+ if (sf->sf_bulk_ciph != sec->ps_flvr.sf_bulk_ciph ||
+ sf->sf_bulk_hash != sec->ps_flvr.sf_bulk_hash) {
CWARN("imp %p (%s->%s): changing bulk flavor %s/%s -> %s/%s\n",
imp, imp->imp_obd->obd_name,
obd_uuid2str(&imp->imp_connection->c_remote_uuid),
- sptlrpc_bulk_priv_alg2name(sec->ps_flvr.sf_bulk_priv),
- sptlrpc_bulk_csum_alg2name(sec->ps_flvr.sf_bulk_csum),
- sptlrpc_bulk_priv_alg2name(sf->sf_bulk_priv),
- sptlrpc_bulk_csum_alg2name(sf->sf_bulk_csum));
+ sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
+ sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
+ sptlrpc_get_ciph_name(sf->sf_bulk_ciph),
+ sptlrpc_get_hash_name(sf->sf_bulk_hash));
spin_lock(&sec->ps_lock);
- sec->ps_flvr.sf_bulk_priv = sf->sf_bulk_priv;
- sec->ps_flvr.sf_bulk_csum = sf->sf_bulk_csum;
+ sec->ps_flvr.sf_bulk_ciph = sf->sf_bulk_ciph;
+ sec->ps_flvr.sf_bulk_hash = sf->sf_bulk_hash;
spin_unlock(&sec->ps_lock);
}
} else {
/* reverse import, determine flavor from incoming reqeust */
sf.sf_rpc = rpc_flavor;
- sf.sf_bulk_priv = BULK_PRIV_ALG_NULL;
- sf.sf_bulk_csum = BULK_CSUM_ALG_NULL;
+ sf.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
+ sf.sf_bulk_hash = BULK_HASH_ALG_NULL;
sf.sf_flags = PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
sp = sptlrpc_target_sec_part(imp->imp_obd);
svc_ctx == NULL ? "->" : "<-",
obd_uuid2str(&conn->c_remote_uuid),
sptlrpc_rpcflavor2name(sec->ps_flvr.sf_rpc),
- sptlrpc_bulk_csum_alg2name(sec->ps_flvr.sf_bulk_csum),
- sptlrpc_bulk_priv_alg2name(sec->ps_flvr.sf_bulk_priv),
+ sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
+ sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
sptlrpc_rpcflavor2name(sf.sf_rpc),
- sptlrpc_bulk_csum_alg2name(sf.sf_bulk_csum),
- sptlrpc_bulk_priv_alg2name(sf.sf_bulk_priv));
+ sptlrpc_get_hash_name(sf.sf_bulk_hash),
+ sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
} else {
CWARN("%simport %p (%s%s%s) netid %x: "
"select initial flavor (%s, %s/%s)\n",
obd_uuid2str(&conn->c_remote_uuid),
LNET_NIDNET(conn->c_self),
sptlrpc_rpcflavor2name(sf.sf_rpc),
- sptlrpc_bulk_csum_alg2name(sf.sf_bulk_csum),
- sptlrpc_bulk_priv_alg2name(sf.sf_bulk_priv));
+ sptlrpc_get_hash_name(sf.sf_bulk_hash),
+ sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
}
mutex_down(&imp->imp_sec_mutex);
return 0;
}
+ /* if flavor just changed, we should not proceed, just leave
+ * it and current flavor will be discovered and replaced
+ * shortly, and let _this_ rpc pass through */
+ if (exp->exp_flvr_changed) {
+ LASSERT(exp->exp_flvr_adapt);
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+
if (exp->exp_flvr_adapt) {
exp->exp_flvr_adapt = 0;
CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
#ifndef __KERNEL__
#include <liblustre.h>
#include <libcfs/list.h>
+#include <zlib.h>
#else
#include <linux/crypto.h>
+#include <linux/zutil.h>
#endif
#include <obd.h>
* implement checksum funcationality *
****************************************/
-static struct {
- char *name;
- int size;
-} csum_types[] = {
- [BULK_CSUM_ALG_NULL] = { "null", 0 },
- [BULK_CSUM_ALG_CRC32] = { "crc32", 4 },
- [BULK_CSUM_ALG_MD5] = { "md5", 16 },
- [BULK_CSUM_ALG_SHA1] = { "sha1", 20 },
- [BULK_CSUM_ALG_SHA256] = { "sha256", 32 },
- [BULK_CSUM_ALG_SHA384] = { "sha384", 48 },
- [BULK_CSUM_ALG_SHA512] = { "sha512", 64 },
+static struct sptlrpc_hash_type hash_types[] = {
+ [BULK_HASH_ALG_NULL] = { "null", "null", 0 },
+ [BULK_HASH_ALG_ADLER32] = { "adler32", "adler32", 4 },
+ [BULK_HASH_ALG_CRC32] = { "crc32", "crc32", 4 },
+ [BULK_HASH_ALG_MD5] = { "md5", "md5", 16 },
+ [BULK_HASH_ALG_SHA1] = { "sha1", "sha1", 20 },
+ [BULK_HASH_ALG_SHA256] = { "sha256", "sha256", 32 },
+ [BULK_HASH_ALG_SHA384] = { "sha384", "sha384", 48 },
+ [BULK_HASH_ALG_SHA512] = { "sha512", "sha512", 64 },
+ [BULK_HASH_ALG_WP256] = { "wp256", "wp256", 32 },
+ [BULK_HASH_ALG_WP384] = { "wp384", "wp384", 48 },
+ [BULK_HASH_ALG_WP512] = { "wp512", "wp512", 64 },
};
-const char * sptlrpc_bulk_csum_alg2name(__u8 csum_alg)
+const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
{
- if (csum_alg < BULK_CSUM_ALG_MAX)
- return csum_types[csum_alg].name;
- return "unknown";
+ struct sptlrpc_hash_type *ht;
+
+ if (hash_alg < BULK_HASH_ALG_MAX) {
+ ht = &hash_types[hash_alg];
+ if (ht->sht_tfm_name)
+ return ht;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(sptlrpc_get_hash_type);
+
+const char * sptlrpc_get_hash_name(__u8 hash_alg)
+{
+ const struct sptlrpc_hash_type *ht;
+
+ ht = sptlrpc_get_hash_type(hash_alg);
+ if (ht)
+ return ht->sht_name;
+ else
+ return "unknown";
}
-EXPORT_SYMBOL(sptlrpc_bulk_csum_alg2name);
+EXPORT_SYMBOL(sptlrpc_get_hash_name);
-int bulk_sec_desc_size(__u8 csum_alg, int request, int read)
+int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
{
int size = sizeof(struct ptlrpc_bulk_sec_desc);
- LASSERT(csum_alg < BULK_CSUM_ALG_MAX);
+ LASSERT(hash_alg < BULK_HASH_ALG_MAX);
/* read request don't need extra data */
if (!(read && request))
- size += csum_types[csum_alg].size;
+ size += hash_types[hash_alg].sht_size;
return size;
}
return -EINVAL;
}
- if (lustre_msg_swabbed(msg)) {
- __swab32s(&bsd->bsd_version);
- __swab16s(&bsd->bsd_pad);
- }
+ /* nothing to swab */
- if (bsd->bsd_version != 0) {
+ if (unlikely(bsd->bsd_version != 0)) {
CERROR("Unexpected version %u\n", bsd->bsd_version);
return -EPROTO;
}
- if (bsd->bsd_csum_alg >= BULK_CSUM_ALG_MAX) {
+ if (unlikely(bsd->bsd_flags != 0)) {
+ CERROR("Unexpected flags %x\n", bsd->bsd_flags);
+ return -EPROTO;
+ }
+
+ if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
CERROR("Unsupported checksum algorithm %u\n",
- bsd->bsd_csum_alg);
+ bsd->bsd_hash_alg);
return -EINVAL;
}
- if (bsd->bsd_priv_alg >= BULK_PRIV_ALG_MAX) {
+
+ if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
CERROR("Unsupported cipher algorithm %u\n",
- bsd->bsd_priv_alg);
+ bsd->bsd_ciph_alg);
return -EINVAL;
}
- if (size > sizeof(*bsd) &&
- size < sizeof(*bsd) + csum_types[bsd->bsd_csum_alg].size) {
+ if (unlikely(size > sizeof(*bsd)) &&
+ size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
- bsd->bsd_csum_alg, size);
+ bsd->bsd_hash_alg, size);
return -EINVAL;
}
EXPORT_SYMBOL(bulk_sec_desc_unpack);
#ifdef __KERNEL__
-static
-int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
+
+static int do_bulk_checksum_adler32(struct ptlrpc_bulk_desc *desc, void *buf)
{
- struct page *page;
- int off;
- char *ptr;
- __u32 crc32 = ~0;
- int len, i;
+ struct page *page;
+ int off;
+ char *ptr;
+ __u32 adler32 = 1;
+ int len, i;
+
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ page = desc->bd_iov[i].kiov_page;
+ off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ ptr = cfs_kmap(page) + off;
+ len = desc->bd_iov[i].kiov_len;
+
+ adler32 = zlib_adler32(adler32, ptr, len);
+
+ cfs_kunmap(page);
+ }
+
+ adler32 = cpu_to_le32(adler32);
+ memcpy(buf, &adler32, sizeof(adler32));
+ return 0;
+}
+
+static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
+{
+ struct page *page;
+ int off;
+ char *ptr;
+ __u32 crc32 = ~0;
+ int len, i;
for (i = 0; i < desc->bd_iov_count; i++) {
page = desc->bd_iov[i].kiov_page;
cfs_kunmap(page);
}
- *((__u32 *) buf) = crc32;
+ crc32 = cpu_to_le32(crc32);
+ memcpy(buf, &crc32, sizeof(crc32));
return 0;
}
-static
-int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
+static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
{
struct crypto_tfm *tfm;
struct scatterlist *sl;
int i, rc = 0;
- LASSERT(alg > BULK_CSUM_ALG_NULL &&
- alg < BULK_CSUM_ALG_MAX);
+ LASSERT(alg > BULK_HASH_ALG_NULL &&
+ alg < BULK_HASH_ALG_MAX);
- if (alg == BULK_CSUM_ALG_CRC32)
+ if (alg == BULK_HASH_ALG_ADLER32)
+ return do_bulk_checksum_adler32(desc, buf);
+ if (alg == BULK_HASH_ALG_CRC32)
return do_bulk_checksum_crc32(desc, buf);
- tfm = crypto_alloc_tfm(csum_types[alg].name, 0);
+ tfm = crypto_alloc_tfm(hash_types[alg].sht_tfm_name, 0);
if (tfm == NULL) {
- CERROR("Unable to allocate tfm %s\n", csum_types[alg].name);
+ CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
return -ENOMEM;
}
crypto_free_tfm(tfm);
return rc;
}
-
+
#else /* !__KERNEL__ */
-static
-int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
+
+static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
{
- __u32 crc32 = ~0;
- int i;
+ __u32 csum32 = ~0;
+ int i;
+
+ LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32);
- LASSERT(alg == BULK_CSUM_ALG_CRC32);
+ if (alg == BULK_HASH_ALG_ADLER32)
+ csum32 = 1;
+ else
+ csum32 = ~0;
for (i = 0; i < desc->bd_iov_count; i++) {
char *ptr = desc->bd_iov[i].iov_base;
int len = desc->bd_iov[i].iov_len;
- crc32 = crc32_le(crc32, ptr, len);
+ if (alg == BULK_HASH_ALG_ADLER32)
+ csum32 = zlib_adler32(csum32, ptr, len);
+ else
+ csum32 = crc32_le(csum32, ptr, len);
}
- *((__u32 *) buf) = crc32;
+ *((__u32 *) buf) = csum32;
return 0;
}
+
#endif
/*
* perform algorithm @alg checksum on @desc, store result in @buf.
- * if anything goes wrong, leave 'alg' be BULK_CSUM_ALG_NULL.
+ * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
*/
static
int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
int rc;
LASSERT(bsd);
- LASSERT(alg < BULK_CSUM_ALG_MAX);
+ LASSERT(alg < BULK_HASH_ALG_MAX);
- bsd->bsd_csum_alg = BULK_CSUM_ALG_NULL;
+ bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
- if (alg == BULK_CSUM_ALG_NULL)
+ if (alg == BULK_HASH_ALG_NULL)
return 0;
- LASSERT(bsdsize >= sizeof(*bsd) + csum_types[alg].size);
+ LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
if (rc == 0)
- bsd->bsd_csum_alg = alg;
+ bsd->bsd_hash_alg = alg;
return rc;
}
int csum_size, rc = 0;
LASSERT(bsdv);
- LASSERT(bsdv->bsd_csum_alg < BULK_CSUM_ALG_MAX);
+ LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
if (bsdr)
- bsdr->bsd_csum_alg = BULK_CSUM_ALG_NULL;
+ bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
- if (bsdv->bsd_csum_alg == BULK_CSUM_ALG_NULL)
+ if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
return 0;
/* for all supported algorithms */
- csum_size = csum_types[bsdv->bsd_csum_alg].size;
+ csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
if (bsdvsize < sizeof(*bsdv) + csum_size) {
CERROR("verifier size %d too small, require %d\n",
csum_p = buf;
}
- rc = do_bulk_checksum(desc, bsdv->bsd_csum_alg, csum_p);
+ rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
CERROR("BAD %s CHECKSUM (%s), data mutated during "
"transfer!\n", read ? "READ" : "WRITE",
- csum_types[bsdv->bsd_csum_alg].name);
+ hash_types[bsdv->bsd_hash_alg].sht_name);
rc = -EINVAL;
} else {
CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
read ? "read" : "write",
- csum_types[bsdv->bsd_csum_alg].name);
+ hash_types[bsdv->bsd_hash_alg].sht_name);
}
if (bsdr) {
- bsdr->bsd_csum_alg = bsdv->bsd_csum_alg;
+ bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
memcpy(bsdr->bsd_csum, csum_p, csum_size);
} else {
LASSERT(buf);
LASSERT(bsdr);
LASSERT(rsize >= sizeof(*bsdr));
- LASSERT(alg < BULK_CSUM_ALG_MAX);
+ LASSERT(alg < BULK_HASH_ALG_MAX);
if (read) {
- bsdr->bsd_csum_alg = alg;
+ bsdr->bsd_hash_alg = alg;
} else {
rc = generate_bulk_csum(desc, alg, bsdr, rsize);
if (rc)
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo */
if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
- bsdr->bsd_csum_alg != BULK_CSUM_ALG_NULL)
+ bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
bsdr->bsd_csum[0] ^= 0x1;
}
LASSERT(rsize >= sizeof(*bsdr));
LASSERT(vsize >= sizeof(*bsdv));
- if (bsdr->bsd_csum_alg != bsdv->bsd_csum_alg) {
+ if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
CERROR("bulk %s: checksum algorithm mismatch: client request "
"%s but server reply with %s. try to use the new one "
"for checksum verification\n",
read ? "read" : "write",
- csum_types[bsdr->bsd_csum_alg].name,
- csum_types[bsdv->bsd_csum_alg].name);
+ hash_types[bsdr->bsd_hash_alg].sht_name,
+ hash_types[bsdv->bsd_hash_alg].sht_name);
}
if (read)
return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
else {
char *cli, *srv, *new = NULL;
- int csum_size = csum_types[bsdr->bsd_csum_alg].size;
+ int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
- LASSERT(bsdr->bsd_csum_alg < BULK_CSUM_ALG_MAX);
- if (bsdr->bsd_csum_alg == BULK_CSUM_ALG_NULL)
+ LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
+ if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
return 0;
if (vsize < sizeof(*bsdv) + csum_size) {
if (!memcmp(cli, srv, csum_size)) {
/* checksum confirmed */
CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
- csum_types[bsdr->bsd_csum_alg].name);
+ hash_types[bsdr->bsd_hash_alg].sht_name);
return 0;
}
if (new == NULL)
return -ENOMEM;
- do_bulk_checksum(desc, bsdr->bsd_csum_alg, new);
+ do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
if (!memcmp(new, srv, csum_size)) {
CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
"on the client after we checksummed them\n",
- csum_types[bsdr->bsd_csum_alg].name);
+ hash_types[bsdr->bsd_hash_alg].sht_name);
} else if (!memcmp(new, cli, csum_size)) {
CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
"in transit\n",
- csum_types[bsdr->bsd_csum_alg].name);
+ hash_types[bsdr->bsd_hash_alg].sht_name);
} else {
CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
"in transit, and the current page contents "
"don't match the originals and what the server "
"received\n",
- csum_types[bsdr->bsd_csum_alg].name);
+ hash_types[bsdr->bsd_hash_alg].sht_name);
}
OBD_FREE(new, csum_size);
LASSERT(bsdv && bsdr);
if (read) {
- rc = generate_bulk_csum(desc, bsdv->bsd_csum_alg, bsdr, rsize);
+ rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
if (rc)
CERROR("bulk read: server failed to generate %s "
"checksum: %d\n",
- csum_types[bsdv->bsd_csum_alg].name, rc);
+ hash_types[bsdv->bsd_hash_alg].sht_name, rc);
/* corrupt the data after we compute the checksum, to
* simulate an OST->client data error */
* implement encryption funcationality *
****************************************/
-/*
- * NOTE: These algorithms must be stream cipher!
- */
-static struct {
- char *name;
- __u32 flags;
-} priv_types[] = {
- [BULK_PRIV_ALG_NULL] = { "null", 0 },
- [BULK_PRIV_ALG_ARC4] = { "arc4", 0 },
+/* FIXME */
+#ifndef __KERNEL__
+#define CRYPTO_TFM_MODE_ECB (0)
+#define CRYPTO_TFM_MODE_CBC (1)
+#endif
+
+static struct sptlrpc_ciph_type cipher_types[] = {
+ [BULK_CIPH_ALG_NULL] = {
+ "null", "null", 0, 0, 0
+ },
+ [BULK_CIPH_ALG_ARC4] = {
+ "arc4", "arc4", CRYPTO_TFM_MODE_ECB, 0, 16
+ },
+ [BULK_CIPH_ALG_AES128] = {
+ "aes128", "aes", CRYPTO_TFM_MODE_CBC, 16, 16
+ },
+ [BULK_CIPH_ALG_AES192] = {
+ "aes192", "aes", CRYPTO_TFM_MODE_CBC, 16, 24
+ },
+ [BULK_CIPH_ALG_AES256] = {
+ "aes256", "aes", CRYPTO_TFM_MODE_CBC, 16, 32
+ },
+ [BULK_CIPH_ALG_CAST128] = {
+ "cast128", "cast5", CRYPTO_TFM_MODE_CBC, 8, 16
+ },
+ [BULK_CIPH_ALG_CAST256] = {
+ "cast256", "cast6", CRYPTO_TFM_MODE_CBC, 16, 32
+ },
+ [BULK_CIPH_ALG_TWOFISH128] = {
+ "twofish128", "twofish", CRYPTO_TFM_MODE_CBC, 16, 16
+ },
+ [BULK_CIPH_ALG_TWOFISH256] = {
+ "twofish256", "twofish", CRYPTO_TFM_MODE_CBC, 16, 32
+ },
};
-const char * sptlrpc_bulk_priv_alg2name(__u8 priv_alg)
+const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
{
- if (priv_alg < BULK_PRIV_ALG_MAX)
- return priv_types[priv_alg].name;
- return "unknown";
+ struct sptlrpc_ciph_type *ct;
+
+ if (ciph_alg < BULK_CIPH_ALG_MAX) {
+ ct = &cipher_types[ciph_alg];
+ if (ct->sct_tfm_name)
+ return ct;
+ }
+ return NULL;
}
-EXPORT_SYMBOL(sptlrpc_bulk_priv_alg2name);
+EXPORT_SYMBOL(sptlrpc_get_ciph_type);
-__u32 sptlrpc_bulk_priv_alg2flags(__u8 priv_alg)
+const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
{
- if (priv_alg < BULK_PRIV_ALG_MAX)
- return priv_types[priv_alg].flags;
- return 0;
+ const struct sptlrpc_ciph_type *ct;
+
+ ct = sptlrpc_get_ciph_type(ciph_alg);
+ if (ct)
+ return ct->sct_name;
+ else
+ return "unknown";
}
-EXPORT_SYMBOL(sptlrpc_bulk_priv_alg2flags);
+EXPORT_SYMBOL(sptlrpc_get_ciph_name);
static void get_default_flavor(struct sptlrpc_flavor *sf)
{
sf->sf_rpc = SPTLRPC_FLVR_NULL;
- sf->sf_bulk_priv = BULK_PRIV_ALG_NULL;
- sf->sf_bulk_csum = BULK_CSUM_ALG_NULL;
+ sf->sf_bulk_ciph = BULK_CIPH_ALG_NULL;
+ sf->sf_bulk_hash = BULK_HASH_ALG_NULL;
sf->sf_flags = 0;
}
switch (rpc_flavor) {
case SPTLRPC_FLVR_NULL:
+ break;
case SPTLRPC_FLVR_PLAIN:
case SPTLRPC_FLVR_KRB5N:
case SPTLRPC_FLVR_KRB5A:
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_ADLER32;
break;
case SPTLRPC_FLVR_KRB5P:
- rule->sr_flvr.sf_bulk_priv = BULK_PRIV_ALG_ARC4;
+ rule->sr_flvr.sf_bulk_ciph = BULK_CIPH_ALG_AES128;
/* fall through */
case SPTLRPC_FLVR_KRB5I:
- rule->sr_flvr.sf_bulk_csum = BULK_CSUM_ALG_SHA1;
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_SHA1;
break;
default:
LBUG();
}
}
-static void get_flavor_by_bulk(struct sptlrpc_rule *rule, bulk_type_t bulk_type)
+static void get_flavor_by_bulk(struct sptlrpc_rule *rule,
+ __u16 rpc_flavor, bulk_type_t bulk_type)
{
switch (bulk_type) {
case BULK_TYPE_N:
- rule->sr_flvr.sf_bulk_csum = BULK_CSUM_ALG_NULL;
- rule->sr_flvr.sf_bulk_priv = BULK_PRIV_ALG_NULL;
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_NULL;
+ rule->sr_flvr.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
break;
case BULK_TYPE_I:
- rule->sr_flvr.sf_bulk_csum = BULK_CSUM_ALG_SHA1;
- rule->sr_flvr.sf_bulk_priv = BULK_PRIV_ALG_NULL;
+ switch (rpc_flavor) {
+ case SPTLRPC_FLVR_PLAIN:
+ case SPTLRPC_FLVR_KRB5N:
+ case SPTLRPC_FLVR_KRB5A:
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_ADLER32;
+ break;
+ case SPTLRPC_FLVR_KRB5I:
+ case SPTLRPC_FLVR_KRB5P:
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_SHA1;
+ break;
+ default:
+ LBUG();
+ }
+ rule->sr_flvr.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
break;
case BULK_TYPE_P:
- rule->sr_flvr.sf_bulk_csum = BULK_CSUM_ALG_SHA1;
- rule->sr_flvr.sf_bulk_priv = BULK_PRIV_ALG_ARC4;
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_SHA1;
+ rule->sr_flvr.sf_bulk_ciph = BULK_CIPH_ALG_AES128;
break;
default:
LBUG();
/* verify bulk section */
if (strcmp(bulk, "bulkn") == 0) {
- rule->sr_flvr.sf_bulk_csum = BULK_CSUM_ALG_NULL;
- rule->sr_flvr.sf_bulk_priv = BULK_PRIV_ALG_NULL;
+ rule->sr_flvr.sf_bulk_hash = BULK_HASH_ALG_NULL;
+ rule->sr_flvr.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
bulk_type = BULK_TYPE_N;
} else if (strcmp(bulk, "bulki") == 0)
bulk_type = BULK_TYPE_I;
if (__flavors[i] == SPTLRPC_FLVR_PLAIN && bulk_type == BULK_TYPE_P)
GOTO(invalid, -EINVAL);
- get_flavor_by_bulk(rule, bulk_type);
+ get_flavor_by_bulk(rule, __flavors[i], bulk_type);
if (alg == NULL)
goto out;
*enc++ = '\0';
/* checksum algorithm */
- for (i = 0; i < BULK_CSUM_ALG_MAX; i++) {
- if (strcmp(alg, sptlrpc_bulk_csum_alg2name(i)) == 0) {
- rule->sr_flvr.sf_bulk_csum = i;
+ for (i = 0; i < BULK_HASH_ALG_MAX; i++) {
+ if (strcmp(alg, sptlrpc_get_hash_name(i)) == 0) {
+ rule->sr_flvr.sf_bulk_hash = i;
break;
}
}
- if (i >= BULK_CSUM_ALG_MAX)
+ if (i >= BULK_HASH_ALG_MAX)
GOTO(invalid, -EINVAL);
/* privacy algorithm */
if (enc) {
- for (i = 0; i < BULK_PRIV_ALG_MAX; i++) {
- if (strcmp(enc, sptlrpc_bulk_priv_alg2name(i)) == 0) {
- rule->sr_flvr.sf_bulk_priv = i;
+ for (i = 0; i < BULK_CIPH_ALG_MAX; i++) {
+ if (strcmp(enc, sptlrpc_get_ciph_name(i)) == 0) {
+ rule->sr_flvr.sf_bulk_ciph = i;
break;
}
}
- if (i >= BULK_PRIV_ALG_MAX)
+ if (i >= BULK_CIPH_ALG_MAX)
GOTO(invalid, -EINVAL);
}
* bulk combination sanity checks
*/
if (bulk_type == BULK_TYPE_P &&
- rule->sr_flvr.sf_bulk_priv == BULK_PRIV_ALG_NULL)
+ rule->sr_flvr.sf_bulk_ciph == BULK_CIPH_ALG_NULL)
GOTO(invalid, -EINVAL);
if (bulk_type == BULK_TYPE_I &&
- (rule->sr_flvr.sf_bulk_csum == BULK_CSUM_ALG_NULL ||
- rule->sr_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL))
+ (rule->sr_flvr.sf_bulk_hash == BULK_HASH_ALG_NULL ||
+ rule->sr_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL))
GOTO(invalid, -EINVAL);
if (bulk_type == BULK_TYPE_N &&
- (rule->sr_flvr.sf_bulk_csum != BULK_CSUM_ALG_NULL ||
- rule->sr_flvr.sf_bulk_priv != BULK_PRIV_ALG_NULL))
+ (rule->sr_flvr.sf_bulk_hash != BULK_HASH_ALG_NULL ||
+ rule->sr_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL))
GOTO(invalid, -EINVAL);
out:
seq_printf(seq, "rpc flavor: %s\n",
sptlrpc_rpcflavor2name(sec->ps_flvr.sf_rpc));
seq_printf(seq, "bulk flavor: %s/%s\n",
- sptlrpc_bulk_csum_alg2name(sec->ps_flvr.sf_bulk_csum),
- sptlrpc_bulk_priv_alg2name(sec->ps_flvr.sf_bulk_priv));
+ sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
+ sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph));
seq_printf(seq, "flags: %s\n", flags_str);
seq_printf(seq, "id: %d\n", sec->ps_id);
seq_printf(seq, "refcount: %d\n", atomic_read(&sec->ps_refcount));
{
LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_NULL);
- if (sf->sf_bulk_priv != BULK_PRIV_ALG_NULL ||
- sf->sf_bulk_csum != BULK_CSUM_ALG_NULL) {
+ if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL ||
+ sf->sf_bulk_hash != BULK_HASH_ALG_NULL) {
CERROR("null sec don't support bulk algorithm: %u/%u\n",
- sf->sf_bulk_priv, sf->sf_bulk_csum);
+ sf->sf_bulk_ciph, sf->sf_bulk_hash);
return NULL;
}
null_sec.ps_id = -1;
null_sec.ps_import = NULL;
null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
- null_sec.ps_flvr.sf_bulk_priv = BULK_PRIV_ALG_NULL;
- null_sec.ps_flvr.sf_bulk_csum = BULK_CSUM_ALG_NULL;
+ null_sec.ps_flvr.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
+ null_sec.ps_flvr.sf_bulk_hash = BULK_HASH_ALG_NULL;
null_sec.ps_flvr.sf_flags = 0;
null_sec.ps_part = LUSTRE_SP_ANY;
null_sec.ps_dying = 0;
int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
{
struct lustre_msg *msg = req->rq_repbuf;
- __u16 wflvr;
ENTRY;
if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
RETURN(-EPROTO);
}
- wflvr = WIRE_FLVR_RPC(msg->lm_secflvr);
-
/* expect no user desc in reply */
- if (PLAIN_WFLVR_HAS_USER(wflvr)) {
+ if (PLAIN_WFLVR_HAS_USER(msg->lm_secflvr)) {
CERROR("Unexpected udesc flag in reply\n");
RETURN(-EPROTO);
}
/* whether we sent with bulk or not, we expect the same in reply */
- if (!equi(req->rq_pack_bulk == 1, PLAIN_WFLVR_HAS_BULK(wflvr))) {
+ if (!equi(req->rq_pack_bulk == 1,
+ PLAIN_WFLVR_HAS_BULK(msg->lm_secflvr))) {
CERROR("%s bulk checksum in reply\n",
req->rq_pack_bulk ? "Missing" : "Unexpected");
RETURN(-EPROTO);
LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
return bulk_csum_cli_request(desc, req->rq_bulk_read,
- req->rq_flvr.sf_bulk_csum,
+ req->rq_flvr.sf_bulk_hash,
req->rq_reqbuf,
PLAIN_PACK_BULK_OFF);
}
LASSERT(RPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
- if (sf->sf_bulk_priv != BULK_PRIV_ALG_NULL) {
- CERROR("plain policy don't support bulk encryption: %u\n",
- sf->sf_bulk_priv);
+ if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL) {
+ CERROR("plain policy don't support bulk cipher: %u\n",
+ sf->sf_bulk_ciph);
RETURN(NULL);
}
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_csum, 1,
+ req->rq_flvr.sf_bulk_hash, 1,
req->rq_bulk_read);
}
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- req->rq_flvr.sf_bulk_csum, 0,
+ req->rq_flvr.sf_bulk_hash, 0,
req->rq_bulk_read);
}
LASSERT(bsd);
buflens[PLAIN_PACK_BULK_OFF] = bulk_sec_desc_size(
- bsd->bsd_csum_alg, 0,
+ bsd->bsd_hash_alg, 0,
req->rq_bulk_read);
}
rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
break;
case SECSVC_COMPLETE:
target_send_reply(request, 0, OBD_FAIL_MDS_ALL_REPLY_NET);
- goto put_conn;
+ goto out_stat;
case SECSVC_DROP:
goto out_req;
default:
lu_context_exit(&request->rq_session);
lu_context_fini(&request->rq_session);
-
+out_stat:
reply = request->rq_reply_state && request->rq_repmsg; /* bug 11169 */
do_gettimeofday(&work_end);
echo Client mount with a running ost
start_ost
+ if $GSS; then
+ # if gss enabled, wait full time to let connection from
+ # mds to ost be established, due to the mismatch between
+ # initial connect timeout and gss context negotiation timeout.
+ # This perhaps could be remove after AT landed.
+ echo "sleep $((TIMEOUT + TIMEOUT + TIMEOUT))s"
+ sleep $((TIMEOUT + TIMEOUT + TIMEOUT))
+ fi
mount_client $MOUNT
check_mount || return 41
pass
ALWAYS_EXCEPT=${ALWAYS_EXCEPT:-"$SANITY_GSS_EXCEPT"}
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="100 101"
-
# Tests that fail on uml
CPU=`awk '/model/ {print $4}' /proc/cpuinfo`
[ "$CPU" = "UML" ] && EXCEPT="$EXCEPT"
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-if [ $UID -ne 0 ]; then
- echo "Warning: running as non-root uid $UID"
- RUNAS_ID="$UID"
- RUNAS=""
-else
- RUNAS_ID=${RUNAS_ID:-500}
- RUNAS=${RUNAS:-"runas -u $RUNAS_ID"}
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="100 101"
- # $RUNAS_ID may get set incorrectly somewhere else
- [ $RUNAS_ID -eq 0 ] && error "\$RUNAS_ID set to 0, but \$UID is also 0!"
-fi
+# $RUNAS_ID may get set incorrectly somewhere else
+[ $UID -eq 0 -a $RUNAS_ID -eq 0 ] && error "\$RUNAS_ID set to 0, but \$UID is also 0!"
# remove $SEC, we'd like to control everything by ourselves
unset SEC
GSS=true
GSS_KRB5=true
+prepare_krb5_creds() {
+ echo prepare krb5 cred
+ rm -f $KRB5_CRED_SAVE
+ echo RUNAS=$RUNAS
+ $RUNAS krb5_login.sh || exit 1
+ [ -f $KRB5_CRED ] || exit 2
+ echo CRED=$KRB5_CRED
+ cp $KRB5_CRED $KRB5_CRED_SAVE
+}
+
+prepare_krb5_creds
+
# we want double mount
MOUNT_2=${MOUNT_2:-"yes"}
cleanup_and_setup_lustre
-rm -rf $DIR/${TESTSUITE}/[df][0-9]*
rm -rf $DIR/[df][0-9]*
check_runas_id $RUNAS_ID $RUNAS
build_test_filter
-prepare_krb5_creds() {
- rm -f $KRB5_CRED_SAVE
- $RUNAS krb5_login.sh || exit 1
- [ -f $KRB5_CRED ] || exit 2
- cp $KRB5_CRED $KRB5_CRED_SAVE
-}
-
combination()
{
local M=$1
count_flvr()
{
- output=$1
- flavor=$2
+ local output=$1
+ local flavor=$2
+ local count=0
+
+ rpc_flvr=`echo $flavor | awk -F - '{ print $1 }'`
+ bulkspec=`echo $flavor | awk -F - '{ print $2 }'`
+
+ count=`echo "$output" | grep "rpc flavor" | grep $rpc_flvr | wc -l`
+
+ if [ "x$bulkspec" != "x" ]; then
+ algs=`echo $bulkspec | awk -F : '{ print $2 }'`
+
+ if [ "x$algs" != "x" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" | grep $algs | wc -l`
+ else
+ bulk=`echo $bulkspec | awk -F : '{ print $1 }'`
+ if [ $bulk == "bulkn" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep "null/null" | wc -l`
+ elif [ $bulk == "bulki" ]; then
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep "/null" | grep -v "null/" | wc -l`
+ else
+ bulk_count=`echo "$output" | grep "bulk flavor" \
+ | grep -v "/null" | grep -v "null/" | wc -l`
+ fi
+ fi
+
+ [ $bulk_count -lt $count ] && count=$bulk_count
+ fi
- echo "$output" | grep rpc | grep $flavor | wc -l
+ echo $count
}
flvr_cnt_cli2mdt()
fi
}
-prepare_krb5_creds
calc_connection_cnt
umask 077
}
run_test 7 "exercise enlarge_reqbuf()"
+test_8() {
+ local sample=$TMP/sanity-gss-8
+ local tdir=$MOUNT/dir8
+ local iosize="256K"
+ local hash_algs="adler32 crc32 md5 sha1 sha256 sha384 sha512 wp256 wp384 wp512"
+
+ # create sample file with aligned size for direct i/o
+ dd if=/dev/zero of=$sample bs=$iosize count=1 || error
+ dd conv=notrunc if=/etc/termcap of=$sample bs=$iosize count=1 || error
+
+ rm -rf $tdir
+ mkdir $tdir || error "create dir $tdir"
+
+ restore_to_default_flavor
+
+ for alg in $hash_algs; do
+ echo "Testing $alg..."
+ flavor=krb5i-bulki:$alg/null
+ set_rule $FSNAME any cli2ost $flavor
+ wait_flavor cli2ost $flavor $cnt_cli2ost
+
+ dd if=$sample of=$tdir/$alg oflag=direct,dsync bs=$iosize || error "$alg write"
+ diff $sample $tdir/$alg || error "$alg read"
+ done
+
+ rm -rf $tdir
+ rm -f $sample
+}
+run_test 8 "verify bulk hash algorithms works"
+
+test_9() {
+ local s1=$TMP/sanity-gss-9.1
+ local s2=$TMP/sanity-gss-9.2
+ local s3=$TMP/sanity-gss-9.3
+ local s4=$TMP/sanity-gss-9.4
+ local tdir=$MOUNT/dir9
+ local s1_size=4194304 # n * pagesize (4M)
+ local s2_size=512 # n * blksize
+ local s3_size=111 # n * blksize + m
+ local s4_size=5 # m
+ local cipher_algs="arc4 aes128 aes192 aes256 cast128 cast256 twofish128 twofish256"
+
+ # create sample files for each situation
+ rm -f $s1 $s2 $s2 $s4
+ dd if=/dev/urandom of=$s1 bs=1M count=4 || error
+ dd if=/dev/urandom of=$s2 bs=$s2_size count=1 || error
+ dd if=/dev/urandom of=$s3 bs=$s3_size count=1 || error
+ dd if=/dev/urandom of=$s4 bs=$s4_size count=1 || error
+
+ rm -rf $tdir
+ mkdir $tdir || error "create dir $tdir"
+
+ restore_to_default_flavor
+
+ #
+ # different bulk data alignment will lead to different behavior of
+ # the implementation: (n > 0; 0 < m < encryption_block_size)
+ # - full page i/o
+ # - partial page, size = n * encryption_block_size
+ # - partial page, size = n * encryption_block_size + m
+ # - partial page, size = m
+ #
+ for alg in $cipher_algs; do
+ echo "Testing $alg..."
+ flavor=krb5p-bulkp:sha1/$alg
+ set_rule $FSNAME any cli2ost $flavor
+ wait_flavor cli2ost $flavor $cnt_cli2ost
+
+ # sync write
+ dd if=$s1 of=$tdir/$alg.1 oflag=dsync bs=1M || error "write $alg.1"
+ dd if=$s2 of=$tdir/$alg.2 oflag=dsync || error "write $alg.2"
+ dd if=$s3 of=$tdir/$alg.3 oflag=dsync || error "write $alg.3"
+ dd if=$s4 of=$tdir/$alg.4 oflag=dsync || error "write $alg.4"
+
+ # remount client
+ umount_client $MOUNT
+ umount_client $MOUNT2
+ mount_client $MOUNT
+ mount_client $MOUNT2
+
+ # read & compare
+ diff $tdir/$alg.1 $s1 || error "read $alg.1"
+ diff $tdir/$alg.2 $s2 || error "read $alg.2"
+ diff $tdir/$alg.3 $s3 || error "read $alg.3"
+ diff $tdir/$alg.4 $s4 || error "read $alg.4"
+ done
+
+ rm -rf $tdir
+ rm -f $sample
+}
+run_test 9 "bulk data alignment test under encryption mode"
+
test_90() {
if [ "$SLOW" = "no" ]; then
total=10
total=60
fi
+ restore_to_default_flavor
+ set_rule $FSNAME any any krb5p
+ wait_flavor all2all krb5p $cnt_all2all
+
start_dbench
for ((n=0;n<$total;n++)); do
[ "$SLOW" = "no" ] && EXCEPT_SLOW="24o 27m 36f 36g 51b 51c 60c 63 64b 68 71 73 77f 78 101 103 115 120g 124b"
-if $GSS_KRB5; then
- $RUNAS krb5_login.sh || exit 1
- $RUNAS -u $(($RUNAS_ID + 1)) krb5_login.sh || exit 1
-fi
-
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="24o 27m 36f 36g 51b 51c 60c 63 64b 68 71 73 77f 78 101 103 115 120g 124b"
-
SANITYLOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh).log}
FAIL_ON_ERROR=false
[ $UID -eq 0 -a $RUNAS_ID -eq 0 ] && error "\$RUNAS_ID set to 0, but \$UID is also 0!"
check_runas_id $RUNAS_ID $RUNAS
+check_runas_id $(($RUNAS_ID + 1)) "$RUNAS -u $(($RUNAS_ID + 1))"
build_test_filter
test_69() {
[ $(grep -c obdfilter $LPROC/devices) -eq 0 ] && \
skip "skipping test for remote OST" && return
- $GSS && skip "gss with bulk security will triger oops. re-enable this after b10091 get fixed" && return
f="$DIR/$tfile"
touch $f
SANITYLOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh).log}
FAIL_ON_ERROR=false
-if $GSS_KRB5; then
- $RUNAS krb5_login.sh || exit 1
-fi
-
SETUP=${SETUP:-:}
TRACE=${TRACE:-""}
grep " $1 " /proc/mounts || zconf_mount $HOSTNAME $*
}
+umount_client() {
+ grep " $1 " /proc/mounts && zconf_umount `hostname` $*
+}
+
# return value:
# 0: success, the old identity set already.
# 1: success, the old identity does not set.
if [ "$MOUNT_2" ]; then
mount_client $MOUNT2
fi
- sleep 5
+
+ # by remounting mdt before ost, initial connect from mdt to ost might
+ # timeout because ost is not ready yet. wait some time to its fully
+ # recovery. initial obd_connect timeout is 5s; in GSS case it's preceeded
+ # by a context negotiation rpc with $TIMEOUT.
+ # FIXME better by monitoring import status.
+ if $GSS; then
+ sleep $((TIMEOUT + 5))
+ else
+ sleep 5
+ fi
}
mounted_lustre_filesystems() {
local myRUNAS_ID=$1
shift
local myRUNAS=$@
+
+ if $GSS_KRB5; then
+ $myRUNAS krb5_login.sh || \
+ error "Failed to refresh Kerberos V5 TGT for UID $myRUNAS_ID."
+ fi
+
mkdir $DIR/d0_runas_test
chmod 0755 $DIR
chown $myRUNAS_ID:$myRUNAS_ID $DIR/d0_runas_test