X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fobdclass%2Fcapa.c;h=ee6da16f77ac87cd35afde1e90708d19722f3036;hb=f9920b4924edce1bd341622eee4281fdcd41845a;hp=02fb53d68a866e0ad87d67e4917c43a0ea8d908a;hpb=08aa217ce49aba1ded52e0f7adb8a607035123fd;p=fs%2Flustre-release.git diff --git a/lustre/obdclass/capa.c b/lustre/obdclass/capa.c index 02fb53d..ee6da16 100644 --- a/lustre/obdclass/capa.c +++ b/lustre/obdclass/capa.c @@ -27,7 +27,7 @@ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -42,7 +42,6 @@ #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include @@ -53,9 +52,6 @@ #include #include #include -#else -#include -#endif #include #include @@ -63,18 +59,16 @@ #define NR_CAPAHASH 32 #define CAPA_HASH_SIZE 3000 /* for MDS & OSS */ -cfs_mem_cache_t *capa_cachep = NULL; +struct kmem_cache *capa_cachep; -#ifdef __KERNEL__ /* lock for capa hash/capa_list/fo_capa_keys */ DEFINE_SPINLOCK(capa_lock); -cfs_list_t capa_list[CAPA_SITE_MAX]; +struct list_head capa_list[CAPA_SITE_MAX]; static struct capa_hmac_alg capa_hmac_algs[] = { DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20), }; -#endif /* capa count */ int capa_count[CAPA_SITE_MAX] = { 0, }; @@ -83,25 +77,24 @@ EXPORT_SYMBOL(capa_list); EXPORT_SYMBOL(capa_lock); EXPORT_SYMBOL(capa_count); -cfs_hlist_head_t *init_capa_hash(void) +struct hlist_head *init_capa_hash(void) { - cfs_hlist_head_t *hash; - int nr_hash, i; + struct hlist_head *hash; + int nr_hash, i; - OBD_ALLOC(hash, CFS_PAGE_SIZE); - if (!hash) - return NULL; + OBD_ALLOC(hash, PAGE_CACHE_SIZE); + if (!hash) + return NULL; - nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t); - LASSERT(nr_hash > NR_CAPAHASH); + nr_hash = PAGE_CACHE_SIZE / sizeof(struct hlist_head); + LASSERT(nr_hash > NR_CAPAHASH); - for (i = 0; i < NR_CAPAHASH; i++) - CFS_INIT_HLIST_HEAD(hash + i); - return hash; + for (i = 0; i < NR_CAPAHASH; i++) + INIT_HLIST_HEAD(hash + i); + return hash; } EXPORT_SYMBOL(init_capa_hash); -#ifdef __KERNEL__ static inline int capa_on_server(struct obd_capa *ocapa) { return ocapa->c_site == CAPA_SITE_SERVER; @@ -109,19 +102,20 @@ static inline int capa_on_server(struct obd_capa *ocapa) static inline void capa_delete(struct obd_capa *ocapa) { - LASSERT(capa_on_server(ocapa)); - cfs_hlist_del_init(&ocapa->u.tgt.c_hash); - cfs_list_del_init(&ocapa->c_list); - capa_count[ocapa->c_site]--; - /* release the ref when alloc */ - capa_put(ocapa); + LASSERT(capa_on_server(ocapa)); + hlist_del_init(&ocapa->u.tgt.c_hash); + list_del_init(&ocapa->c_list); + capa_count[ocapa->c_site]--; + /* release the ref when alloc */ + capa_put(ocapa); } -void cleanup_capa_hash(cfs_hlist_head_t *hash) +void cleanup_capa_hash(struct hlist_head *hash) { - int i; - cfs_hlist_node_t *pos, *next; + struct hlist_node __maybe_unused *pos; + struct hlist_node *next; struct obd_capa *oc; + int i; spin_lock(&capa_lock); for (i = 0; i < NR_CAPAHASH; i++) { @@ -131,7 +125,7 @@ void cleanup_capa_hash(cfs_hlist_head_t *hash) } spin_unlock(&capa_lock); - OBD_FREE(hash, CFS_PAGE_SIZE); + OBD_FREE(hash, PAGE_CACHE_SIZE); } EXPORT_SYMBOL(cleanup_capa_hash); @@ -151,18 +145,18 @@ static inline int capa_is_to_expire(struct obd_capa *oc) } static struct obd_capa *find_capa(struct lustre_capa *capa, - cfs_hlist_head_t *head, int alive) + struct hlist_head *head, int alive) { - cfs_hlist_node_t *pos; - struct obd_capa *ocapa; - int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa); + struct hlist_node __maybe_unused *pos; + struct obd_capa *ocapa; + int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa); - cfs_hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) { - if (memcmp(&ocapa->c_capa, capa, len)) - continue; - /* don't return one that will expire soon in this case */ - if (alive && capa_is_to_expire(ocapa)) - continue; + cfs_hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) { + if (memcmp(&ocapa->c_capa, capa, len)) + continue; + /* don't return one that will expire soon in this case */ + if (alive && capa_is_to_expire(ocapa)) + continue; LASSERT(capa_on_server(ocapa)); @@ -174,30 +168,30 @@ static struct obd_capa *find_capa(struct lustre_capa *capa, } #define LRU_CAPA_DELETE_COUNT 12 -static inline void capa_delete_lru(cfs_list_t *head) +static inline void capa_delete_lru(struct list_head *head) { - struct obd_capa *ocapa; - cfs_list_t *node = head->next; - int count = 0; - - /* free LRU_CAPA_DELETE_COUNT unused capa from head */ - while (count++ < LRU_CAPA_DELETE_COUNT) { - ocapa = cfs_list_entry(node, struct obd_capa, c_list); - node = node->next; - if (cfs_atomic_read(&ocapa->c_refc)) - continue; - - DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru"); - capa_delete(ocapa); - } + struct obd_capa *ocapa; + struct list_head *node = head->next; + int count = 0; + + /* free LRU_CAPA_DELETE_COUNT unused capa from head */ + while (count++ < LRU_CAPA_DELETE_COUNT) { + ocapa = list_entry(node, struct obd_capa, c_list); + node = node->next; + if (atomic_read(&ocapa->c_refc)) + continue; + + DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru"); + capa_delete(ocapa); + } } /* add or update */ -struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa) +struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa) { - cfs_hlist_head_t *head = hash + capa_hashfn(&capa->lc_fid); + struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid); struct obd_capa *ocapa, *old = NULL; - cfs_list_t *list = &capa_list[CAPA_SITE_SERVER]; + struct list_head *list = &capa_list[CAPA_SITE_SERVER]; ocapa = alloc_capa(CAPA_SITE_SERVER); if (IS_ERR(ocapa)) @@ -208,8 +202,8 @@ struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa) if (!old) { ocapa->c_capa = *capa; set_capa_expiry(ocapa); - cfs_hlist_add_head(&ocapa->u.tgt.c_hash, head); - cfs_list_add_tail(&ocapa->c_list, list); + hlist_add_head(&ocapa->u.tgt.c_hash, head); + list_add_tail(&ocapa->c_list, list); capa_get(ocapa); capa_count[CAPA_SITE_SERVER]++; if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE) @@ -225,7 +219,7 @@ struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa) } EXPORT_SYMBOL(capa_add); -struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa, +struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa, int alive) { struct obd_capa *ocapa; @@ -233,8 +227,7 @@ struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa, spin_lock(&capa_lock); ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive); if (ocapa) { - cfs_list_move_tail(&ocapa->c_list, - &capa_list[CAPA_SITE_SERVER]); + list_move_tail(&ocapa->c_list, &capa_list[CAPA_SITE_SERVER]); capa_get(ocapa); } spin_unlock(&capa_lock); @@ -245,7 +238,7 @@ EXPORT_SYMBOL(capa_lookup); int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key) { - struct ll_crypto_hash *tfm; + struct crypto_hash *tfm; struct capa_hmac_alg *alg; int keylen; struct scatterlist sl; @@ -257,20 +250,21 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key) alg = &capa_hmac_algs[capa_alg(capa)]; - tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0); - if (!tfm) { + tfm = crypto_alloc_hash(alg->ha_name, 0, 0); + if (IS_ERR(tfm)) { CERROR("crypto_alloc_tfm failed, check whether your kernel" "has crypto support!\n"); - return -ENOMEM; + return PTR_ERR(tfm); } keylen = alg->ha_keylen; + sg_init_table(&sl, 1); sg_set_page(&sl, virt_to_page(capa), offsetof(struct lustre_capa, lc_hmac), - (unsigned long)(capa) % CFS_PAGE_SIZE); + (unsigned long)(capa) % PAGE_CACHE_SIZE); ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac); - ll_crypto_free_hash(tfm); + crypto_free_hash(tfm); return 0; } @@ -278,7 +272,7 @@ EXPORT_SYMBOL(capa_hmac); int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) { - struct ll_crypto_cipher *tfm; + struct crypto_blkcipher *tfm; struct scatterlist sd; struct scatterlist ss; struct blkcipher_desc desc; @@ -289,7 +283,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) /* passing "aes" in a variable instead of a constant string keeps gcc * 4.3.2 happy */ - tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 ); + tfm = crypto_alloc_blkcipher(alg, 0, 0 ); if (IS_ERR(tfm)) { CERROR("failed to load transform for aes\n"); RETURN(PTR_ERR(tfm)); @@ -301,21 +295,23 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) GOTO(out, rc = -EINVAL); } - rc = ll_crypto_blkcipher_setkey(tfm, key, min); + rc = crypto_blkcipher_setkey(tfm, key, min); if (rc) { CERROR("failed to setting key for aes\n"); GOTO(out, rc); } - sg_set_page(&sd, virt_to_page(d), 16, - (unsigned long)(d) % CFS_PAGE_SIZE); + sg_init_table(&sd, 1); + sg_set_page(&sd, virt_to_page(d), 16, + (unsigned long)(d) % PAGE_CACHE_SIZE); - sg_set_page(&ss, virt_to_page(s), 16, - (unsigned long)(s) % CFS_PAGE_SIZE); + sg_init_table(&ss, 1); + sg_set_page(&ss, virt_to_page(s), 16, + (unsigned long)(s) % PAGE_CACHE_SIZE); desc.tfm = tfm; desc.info = NULL; desc.flags = 0; - rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16); + rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16); if (rc) { CERROR("failed to encrypt for aes\n"); GOTO(out, rc); @@ -324,14 +320,14 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) EXIT; out: - ll_crypto_free_blkcipher(tfm); + crypto_free_blkcipher(tfm); return rc; } EXPORT_SYMBOL(capa_encrypt_id); int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) { - struct ll_crypto_cipher *tfm; + struct crypto_blkcipher *tfm; struct scatterlist sd; struct scatterlist ss; struct blkcipher_desc desc; @@ -342,7 +338,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) /* passing "aes" in a variable instead of a constant string keeps gcc * 4.3.2 happy */ - tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 ); + tfm = crypto_alloc_blkcipher(alg, 0, 0 ); if (IS_ERR(tfm)) { CERROR("failed to load transform for aes\n"); RETURN(PTR_ERR(tfm)); @@ -354,22 +350,24 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) GOTO(out, rc = -EINVAL); } - rc = ll_crypto_blkcipher_setkey(tfm, key, min); + rc = crypto_blkcipher_setkey(tfm, key, min); if (rc) { CERROR("failed to setting key for aes\n"); GOTO(out, rc); } - sg_set_page(&sd, virt_to_page(d), 16, - (unsigned long)(d) % CFS_PAGE_SIZE); + sg_init_table(&sd, 1); + sg_set_page(&sd, virt_to_page(d), 16, + (unsigned long)(d) % PAGE_CACHE_SIZE); - sg_set_page(&ss, virt_to_page(s), 16, - (unsigned long)(s) % CFS_PAGE_SIZE); + sg_init_table(&ss, 1); + sg_set_page(&ss, virt_to_page(s), 16, + (unsigned long)(s) % PAGE_CACHE_SIZE); desc.tfm = tfm; desc.info = NULL; desc.flags = 0; - rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16); + rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16); if (rc) { CERROR("failed to decrypt for aes\n"); GOTO(out, rc); @@ -378,11 +376,10 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) EXIT; out: - ll_crypto_free_blkcipher(tfm); + crypto_free_blkcipher(tfm); return rc; } EXPORT_SYMBOL(capa_decrypt_id); -#endif void capa_cpy(void *capa, struct obd_capa *ocapa) { @@ -408,3 +405,44 @@ void _debug_capa(struct lustre_capa *c, va_end(args); } EXPORT_SYMBOL(_debug_capa); + +/* + * context key constructor/destructor: + * lu_capainfo_key_init, lu_capainfo_key_fini + */ +LU_KEY_INIT_FINI(lu_capainfo, struct lu_capainfo); + +static struct lu_context_key lu_capainfo_key = { + .lct_tags = LCT_SERVER_SESSION, + .lct_init = lu_capainfo_key_init, + .lct_fini = lu_capainfo_key_fini +}; + +struct lu_capainfo *lu_capainfo_get(const struct lu_env *env) +{ + /* NB, in mdt_init0 */ + if (env->le_ses == NULL) + return NULL; + return lu_context_key_get(env->le_ses, &lu_capainfo_key); +} +EXPORT_SYMBOL(lu_capainfo_get); + +/** + * Initialization of lu_capainfo_key data. + */ +int lu_capainfo_init(void) +{ + int rc; + + LU_CONTEXT_KEY_INIT(&lu_capainfo_key); + rc = lu_context_key_register(&lu_capainfo_key); + return rc; +} + +/** + * Dual to lu_capainfo_init(). + */ +void lu_capainfo_fini(void) +{ + lu_context_key_degister(&lu_capainfo_key); +}