X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Fptlrpc%2Fsec_bulk.c;h=b1828b14e3a04867eb52c0949d4686c8b80a0749;hb=d099fdd6cd15d0d00d9b573da5d3bfd3e4bbcb9d;hp=9da60ad874cdd00a60ba75d903e83e741e7cffa3;hpb=52d620153307605c54ea5cdb4fe95249d94081e6;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 9da60ad..b1828b1 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -41,12 +41,6 @@ #define DEBUG_SUBSYSTEM S_SEC #include -#ifndef __KERNEL__ -#include -#include -#else -#include -#endif #include #include @@ -63,7 +57,6 @@ * bulk encryption page pools * ****************************************/ -#ifdef __KERNEL__ #define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (PTRS_PER_PAGE) @@ -129,7 +122,7 @@ static struct ptlrpc_enc_page_pool { /* * memory shrinker */ -const int pools_shrinker_seeks = DEFAULT_SEEKS; +static const int pools_shrinker_seeks = DEFAULT_SEEKS; static struct shrinker *pools_shrinker; @@ -160,7 +153,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) "cache missing: %lu\n" "low free mark: %lu\n" "max waitqueue depth: %u\n" - "max wait time: "CFS_TIME_T"/%u\n" + "max wait time: "CFS_TIME_T"/%lu\n" , totalram_pages, PAGES_PER_POOL, @@ -179,7 +172,8 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) page_pools.epp_st_missings, page_pools.epp_st_lowfree, page_pools.epp_st_max_wqlen, - page_pools.epp_st_max_wait, HZ + page_pools.epp_st_max_wait, + msecs_to_jiffies(MSEC_PER_SEC) ); spin_unlock(&page_pools.epp_lock); @@ -537,16 +531,17 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) int p_idx, g_idx; int i; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(desc->bd_iov_count > 0); LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); /* resent bulk, enc iov might have been allocated previously */ - if (desc->bd_enc_iov != NULL) + if (GET_ENC_KIOV(desc) != NULL) return 0; - OBD_ALLOC(desc->bd_enc_iov, - desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); - if (desc->bd_enc_iov == NULL) + OBD_ALLOC(GET_ENC_KIOV(desc), + desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc))); + if (GET_ENC_KIOV(desc) == NULL) return -ENOMEM; spin_lock(&page_pools.epp_lock); @@ -579,11 +574,11 @@ again: page_pools.epp_waitqlen; set_current_state(TASK_UNINTERRUPTIBLE); - init_waitqueue_entry_current(&waitlink); + init_waitqueue_entry(&waitlink, current); add_wait_queue(&page_pools.epp_waitq, &waitlink); spin_unlock(&page_pools.epp_lock); - waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE); + schedule(); remove_wait_queue(&page_pools.epp_waitq, &waitlink); LASSERT(page_pools.epp_waitqlen > 0); spin_lock(&page_pools.epp_lock); @@ -610,17 +605,17 @@ again: p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; - for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); - desc->bd_enc_iov[i].kiov_page = - page_pools.epp_pools[p_idx][g_idx]; - page_pools.epp_pools[p_idx][g_idx] = NULL; - - if (++g_idx == PAGES_PER_POOL) { - p_idx++; - g_idx = 0; - } - } + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); + BD_GET_ENC_KIOV(desc, i).kiov_page = + page_pools.epp_pools[p_idx][g_idx]; + page_pools.epp_pools[p_idx][g_idx] = NULL; + + if (++g_idx == PAGES_PER_POOL) { + p_idx++; + g_idx = 0; + } + } if (page_pools.epp_free_pages < page_pools.epp_st_lowfree) page_pools.epp_st_lowfree = page_pools.epp_free_pages; @@ -645,48 +640,49 @@ EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages); void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) { - int p_idx, g_idx; - int i; + int p_idx, g_idx; + int i; - if (desc->bd_enc_iov == NULL) - return; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); - LASSERT(desc->bd_iov_count > 0); + if (GET_ENC_KIOV(desc) == NULL) + return; + + LASSERT(desc->bd_iov_count > 0); spin_lock(&page_pools.epp_lock); - p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; - g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; + p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; + g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; - LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <= - page_pools.epp_total_pages); - LASSERT(page_pools.epp_pools[p_idx]); + LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <= + page_pools.epp_total_pages); + LASSERT(page_pools.epp_pools[p_idx]); - for (i = 0; i < desc->bd_iov_count; i++) { - LASSERT(desc->bd_enc_iov[i].kiov_page != NULL); - LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); - LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); + for (i = 0; i < desc->bd_iov_count; i++) { + LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL); + LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]); + LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL); - page_pools.epp_pools[p_idx][g_idx] = - desc->bd_enc_iov[i].kiov_page; + page_pools.epp_pools[p_idx][g_idx] = + BD_GET_ENC_KIOV(desc, i).kiov_page; - if (++g_idx == PAGES_PER_POOL) { - p_idx++; - g_idx = 0; - } - } + if (++g_idx == PAGES_PER_POOL) { + p_idx++; + g_idx = 0; + } + } - page_pools.epp_free_pages += desc->bd_iov_count; + page_pools.epp_free_pages += desc->bd_iov_count; - enc_pools_wakeup(); + enc_pools_wakeup(); spin_unlock(&page_pools.epp_lock); - OBD_FREE(desc->bd_enc_iov, - desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); - desc->bd_enc_iov = NULL; + OBD_FREE(GET_ENC_KIOV(desc), + desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc))); + GET_ENC_KIOV(desc) = NULL; } -EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages); /* * we don't do much stuff for add_user/del_user anymore, except adding some @@ -805,39 +801,20 @@ void sptlrpc_enc_pool_fini(void) enc_pools_free(); - if (page_pools.epp_st_access > 0) { - CDEBUG(D_SEC, - "max pages %lu, grows %u, grow fails %u, shrinks %u, " - "access %lu, missing %lu, max qlen %u, max wait " - CFS_TIME_T"/%d\n", - page_pools.epp_st_max_pages, page_pools.epp_st_grows, - page_pools.epp_st_grow_fails, + if (page_pools.epp_st_access > 0) { + CDEBUG(D_SEC, + "max pages %lu, grows %u, grow fails %u, shrinks %u, " + "access %lu, missing %lu, max qlen %u, max wait " + CFS_TIME_T"/%lu\n", + page_pools.epp_st_max_pages, page_pools.epp_st_grows, + page_pools.epp_st_grow_fails, page_pools.epp_st_shrinks, page_pools.epp_st_access, page_pools.epp_st_missings, page_pools.epp_st_max_wqlen, - page_pools.epp_st_max_wait, HZ); + page_pools.epp_st_max_wait, + msecs_to_jiffies(MSEC_PER_SEC)); } } -#else /* !__KERNEL__ */ - -int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) -{ - return 0; -} - -void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) -{ -} - -int sptlrpc_enc_pool_init(void) -{ - return 0; -} - -void sptlrpc_enc_pool_fini(void) -{ -} -#endif static int cfs_hash_alg_id[] = { [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL, @@ -853,13 +830,11 @@ const char * sptlrpc_get_hash_name(__u8 hash_alg) { return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]); } -EXPORT_SYMBOL(sptlrpc_get_hash_name); __u8 sptlrpc_get_hash_alg(const char *algname) { return cfs_crypto_hash_alg(algname); } -EXPORT_SYMBOL(sptlrpc_get_hash_alg); int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) { @@ -899,15 +874,21 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) } EXPORT_SYMBOL(bulk_sec_desc_unpack); +/* + * Compute the checksum of an RPC buffer payload. If the return \a buflen + * is not large enough, truncate the result to fit so that it is possible + * to use a hash function with a large hash space, but only use a part of + * the resulting hash. + */ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, void *buf, int buflen) { struct cfs_crypto_hash_desc *hdesc; int hashsize; - char hashbuf[64]; unsigned int bufsize; int i, err; + LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type)); LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX); LASSERT(buflen >= 4); @@ -921,30 +902,25 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); for (i = 0; i < desc->bd_iov_count; i++) { -#ifdef __KERNEL__ - cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page, - desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK, - desc->bd_iov[i].kiov_len); -#else - cfs_crypto_hash_update(hdesc, desc->bd_iov[i].iov_base, - desc->bd_iov[i].iov_len); -#endif + cfs_crypto_hash_update_page(hdesc, + BD_GET_KIOV(desc, i).kiov_page, + BD_GET_KIOV(desc, i).kiov_offset & + ~PAGE_MASK, + BD_GET_KIOV(desc, i).kiov_len); } + if (hashsize > buflen) { + unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; + bufsize = sizeof(hashbuf); - err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf, - &bufsize); + LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n", + bufsize, hashsize); + err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize); memcpy(buf, hashbuf, buflen); } else { bufsize = buflen; - err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf, - &bufsize); + err = cfs_crypto_hash_final(hdesc, buf, &bufsize); } - if (err) - cfs_crypto_hash_final(hdesc, NULL, NULL); return err; } -EXPORT_SYMBOL(sptlrpc_get_bulk_checksum); - -