/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define NR_CAPAHASH 32
#define CAPA_HASH_SIZE 3000 /* for MDS & OSS */
-cfs_mem_cache_t *capa_cachep = NULL;
+struct kmem_cache *capa_cachep;
#ifdef __KERNEL__
/* lock for capa hash/capa_list/fo_capa_keys */
cfs_hlist_head_t *init_capa_hash(void)
{
- cfs_hlist_head_t *hash;
- int nr_hash, i;
+ cfs_hlist_head_t *hash;
+ int nr_hash, i;
- OBD_ALLOC(hash, CFS_PAGE_SIZE);
- if (!hash)
- return NULL;
+ OBD_ALLOC(hash, PAGE_CACHE_SIZE);
+ if (!hash)
+ return NULL;
- nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t);
- LASSERT(nr_hash > NR_CAPAHASH);
+ nr_hash = PAGE_CACHE_SIZE / sizeof(cfs_hlist_head_t);
+ LASSERT(nr_hash > NR_CAPAHASH);
- for (i = 0; i < NR_CAPAHASH; i++)
- CFS_INIT_HLIST_HEAD(hash + i);
- return hash;
+ for (i = 0; i < NR_CAPAHASH; i++)
+ CFS_INIT_HLIST_HEAD(hash + i);
+ return hash;
}
EXPORT_SYMBOL(init_capa_hash);
}
spin_unlock(&capa_lock);
- OBD_FREE(hash, CFS_PAGE_SIZE);
+ OBD_FREE(hash, PAGE_CACHE_SIZE);
}
EXPORT_SYMBOL(cleanup_capa_hash);
while (count++ < LRU_CAPA_DELETE_COUNT) {
ocapa = cfs_list_entry(node, struct obd_capa, c_list);
node = node->next;
- if (cfs_atomic_read(&ocapa->c_refc))
+ if (atomic_read(&ocapa->c_refc))
continue;
DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
{
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
struct capa_hmac_alg *alg;
int keylen;
struct scatterlist sl;
alg = &capa_hmac_algs[capa_alg(capa)];
- tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0);
- if (!tfm) {
+ tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
+ if (IS_ERR(tfm)) {
CERROR("crypto_alloc_tfm failed, check whether your kernel"
"has crypto support!\n");
- return -ENOMEM;
+ return PTR_ERR(tfm);
}
keylen = alg->ha_keylen;
sg_set_page(&sl, virt_to_page(capa),
offsetof(struct lustre_capa, lc_hmac),
- (unsigned long)(capa) % CFS_PAGE_SIZE);
+ (unsigned long)(capa) % PAGE_CACHE_SIZE);
ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return 0;
}
int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- struct ll_crypto_cipher *tfm;
+ struct crypto_blkcipher *tfm;
struct scatterlist sd;
struct scatterlist ss;
struct blkcipher_desc desc;
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
RETURN(PTR_ERR(tfm));
GOTO(out, rc = -EINVAL);
}
- rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ rc = crypto_blkcipher_setkey(tfm, key, min);
if (rc) {
CERROR("failed to setting key for aes\n");
GOTO(out, rc);
}
- sg_set_page(&sd, virt_to_page(d), 16,
- (unsigned long)(d) % CFS_PAGE_SIZE);
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % PAGE_CACHE_SIZE);
- sg_set_page(&ss, virt_to_page(s), 16,
- (unsigned long)(s) % CFS_PAGE_SIZE);
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % PAGE_CACHE_SIZE);
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
- rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
+ rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
if (rc) {
CERROR("failed to encrypt for aes\n");
GOTO(out, rc);
EXIT;
out:
- ll_crypto_free_blkcipher(tfm);
+ crypto_free_blkcipher(tfm);
return rc;
}
EXPORT_SYMBOL(capa_encrypt_id);
int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- struct ll_crypto_cipher *tfm;
+ struct crypto_blkcipher *tfm;
struct scatterlist sd;
struct scatterlist ss;
struct blkcipher_desc desc;
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
RETURN(PTR_ERR(tfm));
GOTO(out, rc = -EINVAL);
}
- rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ rc = crypto_blkcipher_setkey(tfm, key, min);
if (rc) {
CERROR("failed to setting key for aes\n");
GOTO(out, rc);
}
- sg_set_page(&sd, virt_to_page(d), 16,
- (unsigned long)(d) % CFS_PAGE_SIZE);
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % PAGE_CACHE_SIZE);
- sg_set_page(&ss, virt_to_page(s), 16,
- (unsigned long)(s) % CFS_PAGE_SIZE);
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % PAGE_CACHE_SIZE);
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
- rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
+ rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
if (rc) {
CERROR("failed to decrypt for aes\n");
GOTO(out, rc);
EXIT;
out:
- ll_crypto_free_blkcipher(tfm);
+ crypto_free_blkcipher(tfm);
return rc;
}
EXPORT_SYMBOL(capa_decrypt_id);
va_end(args);
}
EXPORT_SYMBOL(_debug_capa);
+
+/*
+ * context key constructor/destructor:
+ * lu_capainfo_key_init, lu_capainfo_key_fini
+ */
+LU_KEY_INIT_FINI(lu_capainfo, struct lu_capainfo);
+
+struct lu_context_key lu_capainfo_key = {
+ .lct_tags = LCT_SERVER_SESSION,
+ .lct_init = lu_capainfo_key_init,
+ .lct_fini = lu_capainfo_key_fini
+};
+
+struct lu_capainfo *lu_capainfo_get(const struct lu_env *env)
+{
+ /* NB, in mdt_init0 */
+ if (env->le_ses == NULL)
+ return NULL;
+ return lu_context_key_get(env->le_ses, &lu_capainfo_key);
+}
+EXPORT_SYMBOL(lu_capainfo_get);
+
+/**
+ * Initialization of lu_capainfo_key data.
+ */
+int lu_capainfo_init(void)
+{
+ int rc;
+
+ LU_CONTEXT_KEY_INIT(&lu_capainfo_key);
+ rc = lu_context_key_register(&lu_capainfo_key);
+ return rc;
+}
+
+/**
+ * Dual to lu_capainfo_init().
+ */
+void lu_capainfo_fini(void)
+{
+ lu_context_key_degister(&lu_capainfo_key);
+}