-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
*
- * lustre/obdclass/capa.c
- * Lustre Capability Cache Management
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Copyright (c) 2001-2003 Cluster File Systems, Inc.
- * Author: Lai Siyao<lsy@clusterfs.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * This file is part of Lustre, http://www.lustre.org.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/obdclass/capa.c
+ *
+ * Lustre Capability Hash Management
+ *
+ * Author: Lai Siyao<lsy@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/obd_class.h>
-#include <linux/lustre_debug.h>
-#include <linux/lustre_idl.h>
-#include <linux/lustre_sec.h>
+#include <obd_class.h>
+#include <lustre_debug.h>
+#include <lustre/lustre_idl.h>
#else
#include <liblustre.h>
#endif
#include <libcfs/list.h>
-#include <linux/lustre_sec.h>
+#include <lustre_capa.h>
-kmem_cache_t *capa_cachep = NULL;
+#define NR_CAPAHASH 32
+#define CAPA_HASH_SIZE 3000 /* for MDS & OSS */
-/* capa_lock protect capa hash, list and content. */
-spinlock_t capa_lock = SPIN_LOCK_UNLOCKED;
-struct hlist_head *capa_hash;
-struct list_head capa_list[3];
-static int capa_count[3] = { 0 };
+cfs_mem_cache_t *capa_cachep = NULL;
-/* TODO: mdc and llite all need this, so define it here.
- * in the future it will be moved to ll_sb_info to support multi-
- * mount point */
-struct timer_list ll_capa_timer;
-
-EXPORT_SYMBOL(capa_lock);
-EXPORT_SYMBOL(capa_hash);
-EXPORT_SYMBOL(capa_list);
-EXPORT_SYMBOL(ll_capa_timer);
+#ifdef __KERNEL__
+/* lock for capa hash/capa_list/fo_capa_keys */
+cfs_spinlock_t capa_lock = CFS_SPIN_LOCK_UNLOCKED;
-static inline int const
-capa_hashfn(unsigned int uid, int capa_op, __u64 mdsid, unsigned long ino)
-{
- return (ino ^ uid) * (unsigned long)capa_op * (unsigned long)mdsid %
- NR_CAPAHASH;
-}
+cfs_list_t capa_list[CAPA_SITE_MAX];
-int capa_op(int flags)
-{
- if (flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
- return MAY_WRITE;
- else if (flags & FMODE_READ)
- return MAY_READ;
+static struct capa_hmac_alg capa_hmac_algs[] = {
+ DEF_CAPA_HMAC_ALG("sha1", SHA1, 20, 20),
+};
+#endif
+/* capa count */
+int capa_count[CAPA_SITE_MAX] = { 0, };
- LBUG(); /* should be either MAY_READ or MAY_WRITE */
- return 0;
-}
+EXPORT_SYMBOL(capa_cachep);
+EXPORT_SYMBOL(capa_list);
+EXPORT_SYMBOL(capa_lock);
+EXPORT_SYMBOL(capa_count);
-static struct obd_capa *
-find_capa(struct hlist_head *head, uid_t uid, int capa_op, __u64 mdsid,
- unsigned long ino, int type)
+cfs_hlist_head_t *init_capa_hash(void)
{
- struct hlist_node *pos;
- struct obd_capa *ocapa;
- uid_t ouid;
-
- hlist_for_each_entry(ocapa, pos, head, c_hash) {
- if (ocapa->c_capa.lc_ino != ino)
- continue;
- if (ocapa->c_capa.lc_mdsid != mdsid)
- continue;
- if (ocapa->c_capa.lc_op != capa_op)
- continue;
- if (ocapa->c_type != type)
- continue;
-
- if (ocapa->c_type == CLIENT_CAPA &&
- ocapa->c_capa.lc_flags & CAPA_FL_REMUID)
- ouid = ocapa->c_capa.lc_ruid;
- else
- ouid = ocapa->c_capa.lc_uid;
-
- if (ouid != uid)
- continue;
+ cfs_hlist_head_t *hash;
+ int nr_hash, i;
- DEBUG_CAPA(D_CACHE, &ocapa->c_capa, "found");
+ OBD_ALLOC(hash, CFS_PAGE_SIZE);
+ if (!hash)
+ return NULL;
- return ocapa;
- }
+ nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t);
+ LASSERT(nr_hash > NR_CAPAHASH);
- return NULL;
+ for (i = 0; i < NR_CAPAHASH; i++)
+ CFS_INIT_HLIST_HEAD(hash + i);
+ return hash;
}
-inline void __capa_get(struct obd_capa *ocapa)
+#ifdef __KERNEL__
+static inline int capa_on_server(struct obd_capa *ocapa)
{
- atomic_inc(&ocapa->c_refc);
+ return ocapa->c_site == CAPA_SITE_SERVER;
}
-static struct obd_capa *
-find_capa_locked(struct hlist_head *head, uid_t uid, int capa_op, __u64 mdsid,
- unsigned long ino, int type)
+static inline void capa_delete(struct obd_capa *ocapa)
{
- struct obd_capa *ocapa;
-
- spin_lock(&capa_lock);
- ocapa = find_capa(head, uid, capa_op, mdsid, ino, type);
- if (ocapa)
- __capa_get(ocapa);
- spin_unlock(&capa_lock);
-
- return ocapa;
+ LASSERT(capa_on_server(ocapa));
+ cfs_hlist_del_init(&ocapa->u.tgt.c_hash);
+ cfs_list_del_init(&ocapa->c_list);
+ capa_count[ocapa->c_site]--;
+ /* release the ref when alloc */
+ capa_put(ocapa);
}
-static struct obd_capa *alloc_capa(void)
+void cleanup_capa_hash(cfs_hlist_head_t *hash)
{
- struct obd_capa *ocapa;
-
- OBD_SLAB_ALLOC(ocapa, capa_cachep, SLAB_NOFS, sizeof(*ocapa));
- if (ocapa) {
- INIT_HLIST_NODE(&ocapa->c_hash);
- INIT_LIST_HEAD(&ocapa->c_list);
+ int i;
+ cfs_hlist_node_t *pos, *next;
+ struct obd_capa *oc;
+
+ cfs_spin_lock(&capa_lock);
+ for (i = 0; i < NR_CAPAHASH; i++) {
+ cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
+ u.tgt.c_hash)
+ capa_delete(oc);
}
+ cfs_spin_unlock(&capa_lock);
- return ocapa;
+ OBD_FREE(hash, CFS_PAGE_SIZE);
}
-static void destroy_capa(struct obd_capa *ocapa)
+static inline int capa_hashfn(struct lu_fid *fid)
{
- OBD_SLAB_FREE(ocapa, capa_cachep, sizeof(*ocapa));
+ return (fid_oid(fid) ^ fid_ver(fid)) *
+ (unsigned long)(fid_seq(fid) + 1) % NR_CAPAHASH;
}
-int capa_cache_init(void)
+/* capa renewal time check is earlier than that on client, which is to prevent
+ * client renew right after obtaining it. */
+static inline int capa_is_to_expire(struct obd_capa *oc)
{
- int nr_hash, i;
-
- OBD_ALLOC(capa_hash, PAGE_SIZE);
- if (!capa_hash)
- return -ENOMEM;
-
- nr_hash = PAGE_SIZE / sizeof(struct hlist_head);
- LASSERT(nr_hash > NR_CAPAHASH);
-
- for (i = 0; i < NR_CAPAHASH; i++)
- INIT_HLIST_HEAD(capa_hash + i);
-
- for (i = 0; i < 3; i++)
- INIT_LIST_HEAD(&capa_list[i]);
-
- return 0;
+ return cfs_time_before(cfs_time_sub(oc->c_expiry,
+ cfs_time_seconds(oc->c_capa.lc_timeout)*2/3),
+ cfs_time_current());
}
-void capa_cache_cleanup(void)
+static struct obd_capa *find_capa(struct lustre_capa *capa,
+ cfs_hlist_head_t *head, int alive)
{
+ cfs_hlist_node_t *pos;
struct obd_capa *ocapa;
- struct hlist_node *pos, *n;
+ int len = alive ? offsetof(struct lustre_capa, lc_keyid):sizeof(*capa);
- hlist_for_each_entry_safe(ocapa, pos, n, capa_hash, c_hash) {
- LASSERT(ocapa->c_type != CLIENT_CAPA);
- hlist_del(&ocapa->c_hash);
- list_del(&ocapa->c_list);
- OBD_FREE(ocapa, sizeof(*ocapa));
- }
-
- OBD_FREE(capa_hash, PAGE_SIZE);
-}
+ cfs_hlist_for_each_entry(ocapa, pos, head, u.tgt.c_hash) {
+ if (memcmp(&ocapa->c_capa, capa, len))
+ continue;
+ /* don't return one that will expire soon in this case */
+ if (alive && capa_is_to_expire(ocapa))
+ continue;
+ LASSERT(capa_on_server(ocapa));
-static inline void list_add_capa(struct obd_capa *ocapa, struct list_head *head)
-{
- struct obd_capa *tmp;
-
- /* XXX: capa is sorted in client, this could be optimized */
- if (ocapa->c_type == CLIENT_CAPA) {
- list_for_each_entry_reverse(tmp, head, c_list) {
- if (ocapa->c_capa.lc_expiry > tmp->c_capa.lc_expiry) {
- list_add(&ocapa->c_list, &tmp->c_list);
- return;
- }
- }
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "found");
+ return ocapa;
}
- list_add_tail(&ocapa->c_list, head);
+ return NULL;
}
-static inline void do_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
+#define LRU_CAPA_DELETE_COUNT 12
+static inline void capa_delete_lru(cfs_list_t *head)
{
- memcpy(&ocapa->c_capa, capa, sizeof(*capa));
+ struct obd_capa *ocapa;
+ cfs_list_t *node = head->next;
+ int count = 0;
+
+ /* free LRU_CAPA_DELETE_COUNT unused capa from head */
+ while (count++ < LRU_CAPA_DELETE_COUNT) {
+ ocapa = cfs_list_entry(node, struct obd_capa, c_list);
+ node = node->next;
+ if (cfs_atomic_read(&ocapa->c_refc))
+ continue;
+
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "free lru");
+ capa_delete(ocapa);
+ }
}
-static struct obd_capa *
-get_new_capa_locked(struct hlist_head *head, int type, struct lustre_capa *capa,
- struct inode *inode, struct lustre_handle *handle)
+/* add or update */
+struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa)
{
- uid_t uid = capa->lc_uid;
- int capa_op = capa->lc_op;
- __u64 mdsid = capa->lc_mdsid;
- unsigned long ino = capa->lc_ino;
- struct obd_capa *ocapa, *old;
-
- ocapa = alloc_capa();
- if (!ocapa)
+ cfs_hlist_head_t *head = hash + capa_hashfn(&capa->lc_fid);
+ struct obd_capa *ocapa, *old = NULL;
+ cfs_list_t *list = &capa_list[CAPA_SITE_SERVER];
+
+ ocapa = alloc_capa(CAPA_SITE_SERVER);
+ if (IS_ERR(ocapa))
return NULL;
- spin_lock(&capa_lock);
- old = find_capa(head, uid, capa_op, mdsid, ino, type);
+ cfs_spin_lock(&capa_lock);
+ old = find_capa(capa, head, 0);
if (!old) {
- do_update_capa(ocapa, capa);
- ocapa->c_type = type;
-
- if (type == CLIENT_CAPA) {
- LASSERT(inode);
- LASSERT(handle);
-#ifdef __KERNEL__
- igrab(inode);
-#endif
- ocapa->c_inode = inode;
- memcpy(&ocapa->c_handle, handle, sizeof(*handle));
- }
-
- DEBUG_CAPA(D_CACHE, &ocapa->c_capa, "new");
-
- list_add_capa(ocapa, &capa_list[type]);
- hlist_add_head(&ocapa->c_hash, capa_hash);
- capa_count[type]++;
-
- __capa_get(ocapa);
-
- if (type != CLIENT_CAPA && capa_count[type] > CAPA_CACHE_SIZE) {
- struct list_head *node = capa_list[type].next;
- struct obd_capa *tcapa;
- int count = 0;
-
- /* free 12 unused capa from head */
- while (node->next != &capa_list[type] && count < 12) {
- tcapa = list_entry(node, struct obd_capa, c_list);
- node = node->next;
- if (atomic_read(&tcapa->c_refc) > 0)
- continue;
- list_del(&tcapa->c_list);
- hlist_del(&tcapa->c_hash);
- destroy_capa(tcapa);
- capa_count[type]--;
- count++;
- }
- }
-
- spin_unlock(&capa_lock);
+ ocapa->c_capa = *capa;
+ set_capa_expiry(ocapa);
+ cfs_hlist_add_head(&ocapa->u.tgt.c_hash, head);
+ cfs_list_add_tail(&ocapa->c_list, list);
+ capa_get(ocapa);
+ capa_count[CAPA_SITE_SERVER]++;
+ if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
+ capa_delete_lru(list);
+ cfs_spin_unlock(&capa_lock);
return ocapa;
+ } else {
+ capa_get(old);
+ cfs_spin_unlock(&capa_lock);
+ capa_put(ocapa);
+ return old;
}
-
- __capa_get(old);
- spin_unlock(&capa_lock);
-
- destroy_capa(ocapa);
- return old;
}
-static struct obd_capa *
-capa_get_locked(uid_t uid, int capa_op,__u64 mdsid, unsigned long ino,
- int type, struct lustre_capa *capa, struct inode *inode,
- struct lustre_handle *handle)
+struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa,
+ int alive)
{
- struct hlist_head *head = capa_hash +
- capa_hashfn(uid, capa_op, mdsid, ino);
struct obd_capa *ocapa;
- ocapa = find_capa_locked(head, uid, capa_op, mdsid, ino, type);
- if (ocapa)
- return ocapa;
-
- if (capa)
- ocapa = get_new_capa_locked(head, type, capa, inode, handle);
- return ocapa;
-}
+ cfs_spin_lock(&capa_lock);
+ ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
+ if (ocapa) {
+ cfs_list_move_tail(&ocapa->c_list,
+ &capa_list[CAPA_SITE_SERVER]);
+ capa_get(ocapa);
+ }
+ cfs_spin_unlock(&capa_lock);
-struct obd_capa *
-capa_get(uid_t uid, int capa_op, __u64 mdsid, unsigned long ino, int type,
- struct lustre_capa *capa, struct inode *inode,
- struct lustre_handle *handle)
-{
- return capa_get_locked(uid, capa_op, mdsid, ino, type, capa, inode,
- handle);
+ return ocapa;
}
-static void __capa_put(struct obd_capa *ocapa, int type)
+int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
{
- hlist_del_init(&ocapa->c_hash);
- list_del_init(&ocapa->c_list);
- capa_count[type]--;
-}
+ struct ll_crypto_hash *tfm;
+ struct capa_hmac_alg *alg;
+ int keylen;
+ struct scatterlist sl;
+
+ if (capa_alg(capa) != CAPA_HMAC_ALG_SHA1) {
+ CERROR("unknown capability hmac algorithm!\n");
+ return -EFAULT;
+ }
-void capa_put(struct obd_capa *ocapa, int type)
-{
- ENTRY;
+ alg = &capa_hmac_algs[capa_alg(capa)];
- if (ocapa) {
- if (atomic_dec_and_lock(&ocapa->c_refc, &capa_lock)) {
- if (type == CLIENT_CAPA) {
-#ifdef __KERNEL__
- iput(ocapa->c_inode);
-#endif
- __capa_put(ocapa, type);
- destroy_capa(ocapa);
- }
- spin_unlock(&capa_lock);
- }
+ tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0);
+ if (!tfm) {
+ CERROR("crypto_alloc_tfm failed, check whether your kernel"
+ "has crypto support!\n");
+ return -ENOMEM;
}
+ keylen = alg->ha_keylen;
- EXIT;
+ sg_set_page(&sl, virt_to_page(capa),
+ offsetof(struct lustre_capa, lc_hmac),
+ (unsigned long)(capa) % CFS_PAGE_SIZE);
+
+ ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
+ ll_crypto_free_hash(tfm);
+
+ return 0;
}
-static int update_capa_locked(struct lustre_capa *capa, int type)
+int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- uid_t uid = capa->lc_uid;
- int capa_op = capa->lc_op;
- __u64 mdsid = capa->lc_mdsid;
- unsigned long ino = capa->lc_ino;
- struct hlist_head *head = capa_hash +
- capa_hashfn(uid, capa_op, mdsid, ino);
- struct obd_capa *ocapa;
+ struct ll_crypto_cipher *tfm;
+ struct scatterlist sd;
+ struct scatterlist ss;
+ struct blkcipher_desc desc;
+ unsigned int min;
+ int rc;
+ char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
ENTRY;
- spin_lock(&capa_lock);
- ocapa = find_capa(head, uid, capa_op, mdsid, ino, type);
- if (ocapa)
- do_update_capa(ocapa, capa);
- spin_unlock(&capa_lock);
+ /* passing "aes" in a variable instead of a constant string keeps gcc
+ * 4.3.2 happy */
+ tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ if (IS_ERR(tfm)) {
+ CERROR("failed to load transform for aes\n");
+ RETURN(PTR_ERR(tfm));
+ }
+
+ min = ll_crypto_tfm_alg_min_keysize(tfm);
+ if (keylen < min) {
+ CERROR("keylen at least %d bits for aes\n", min * 8);
+ GOTO(out, rc = -EINVAL);
+ }
- if (ocapa == NULL && type == MDS_CAPA) {
- ocapa = get_new_capa_locked(head, type, capa, NULL, NULL);
- capa_put(ocapa, type);
+ rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ if (rc) {
+ CERROR("failed to setting key for aes\n");
+ GOTO(out, rc);
}
- RETURN(ocapa ? 0 : -ENOENT);
-}
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % CFS_PAGE_SIZE);
+
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % CFS_PAGE_SIZE);
+ desc.tfm = tfm;
+ desc.info = NULL;
+ desc.flags = 0;
+ rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
+ if (rc) {
+ CERROR("failed to encrypt for aes\n");
+ GOTO(out, rc);
+ }
-int capa_renew(struct lustre_capa *capa, int type)
-{
- DEBUG_CAPA(D_INFO, capa, "renew");
+ EXIT;
- return update_capa_locked(capa, type);
+out:
+ ll_crypto_free_blkcipher(tfm);
+ return rc;
}
-void capa_hmac(struct crypto_tfm *tfm, __u8 *key, struct lustre_capa *capa)
+int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- int keylen = CAPA_KEY_LEN;
- struct scatterlist sl = {
- .page = virt_to_page(capa),
- .offset = (unsigned long)(capa) % PAGE_SIZE,
- .length = sizeof(struct lustre_capa_data),
- };
-
- LASSERT(tfm);
- crypto_hmac(tfm, key, &keylen, &sl, 1, capa->lc_hmac);
-}
+ struct ll_crypto_cipher *tfm;
+ struct scatterlist sd;
+ struct scatterlist ss;
+ struct blkcipher_desc desc;
+ unsigned int min;
+ int rc;
+ char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
+ ENTRY;
-void capa_dup(void *dst, struct obd_capa *ocapa)
-{
- spin_lock(&capa_lock);
- memcpy(dst, &ocapa->c_capa, sizeof(ocapa->c_capa));
- spin_unlock(&capa_lock);
-}
+ /* passing "aes" in a variable instead of a constant string keeps gcc
+ * 4.3.2 happy */
+ tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ if (IS_ERR(tfm)) {
+ CERROR("failed to load transform for aes\n");
+ RETURN(PTR_ERR(tfm));
+ }
+
+ min = ll_crypto_tfm_alg_min_keysize(tfm);
+ if (keylen < min) {
+ CERROR("keylen at least %d bits for aes\n", min * 8);
+ GOTO(out, rc = -EINVAL);
+ }
-void capa_dup2(void *dst, struct lustre_capa *capa)
-{
- spin_lock(&capa_lock);
- memcpy(dst, capa, sizeof(*capa));
- spin_unlock(&capa_lock);
-}
+ rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ if (rc) {
+ CERROR("failed to setting key for aes\n");
+ GOTO(out, rc);
+ }
-int capa_expired(struct lustre_capa *capa)
-{
- struct timeval tv;
+ sg_set_page(&sd, virt_to_page(d), 16,
+ (unsigned long)(d) % CFS_PAGE_SIZE);
- do_gettimeofday(&tv);
- return (capa->lc_expiry < tv.tv_sec) ? 1 : 0;
-}
+ sg_set_page(&ss, virt_to_page(s), 16,
+ (unsigned long)(s) % CFS_PAGE_SIZE);
-int __capa_is_to_expire(struct obd_capa *ocapa)
-{
- struct timeval tv;
- int pre_expiry = capa_pre_expiry(&ocapa->c_capa);
+ desc.tfm = tfm;
+ desc.info = NULL;
+ desc.flags = 0;
+ rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
+ if (rc) {
+ CERROR("failed to decrypt for aes\n");
+ GOTO(out, rc);
+ }
+
+ EXIT;
- do_gettimeofday(&tv);
- return (ocapa->c_capa.lc_expiry - pre_expiry - 1 <= tv.tv_sec)? 1 : 0;
+out:
+ ll_crypto_free_blkcipher(tfm);
+ return rc;
}
+#endif
-int capa_is_to_expire(struct obd_capa *ocapa)
+void capa_cpy(void *capa, struct obd_capa *ocapa)
{
- int rc;
-
- spin_lock(&capa_lock);
- rc = __capa_is_to_expire(ocapa);
- spin_unlock(&capa_lock);
+ cfs_spin_lock(&ocapa->c_lock);
+ *(struct lustre_capa *)capa = ocapa->c_capa;
+ cfs_spin_unlock(&ocapa->c_lock);
+}
- return rc;
+void _debug_capa(struct lustre_capa *c,
+ struct libcfs_debug_msg_data *msgdata,
+ const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ libcfs_debug_vmsg2(msgdata, fmt, args,
+ " capability@%p fid "DFID" opc "LPX64" uid "LPU64
+ " gid "LPU64" flags %u alg %d keyid %u timeout %u "
+ "expiry %u\n", c, PFID(capa_fid(c)), capa_opc(c),
+ capa_uid(c), capa_gid(c), capa_flags(c),
+ capa_alg(c), capa_keyid(c), capa_timeout(c),
+ capa_expiry(c));
+ va_end(args);
}
+EXPORT_SYMBOL(_debug_capa);
-EXPORT_SYMBOL(capa_op);
-EXPORT_SYMBOL(capa_get);
-EXPORT_SYMBOL(capa_put);
-EXPORT_SYMBOL(capa_renew);
-EXPORT_SYMBOL(__capa_get);
+EXPORT_SYMBOL(init_capa_hash);
+EXPORT_SYMBOL(cleanup_capa_hash);
+EXPORT_SYMBOL(capa_add);
+EXPORT_SYMBOL(capa_lookup);
EXPORT_SYMBOL(capa_hmac);
-EXPORT_SYMBOL(capa_dup);
-EXPORT_SYMBOL(capa_dup2);
-EXPORT_SYMBOL(capa_expired);
-EXPORT_SYMBOL(__capa_is_to_expire);
-EXPORT_SYMBOL(capa_is_to_expire);
+EXPORT_SYMBOL(capa_encrypt_id);
+EXPORT_SYMBOL(capa_decrypt_id);
+EXPORT_SYMBOL(capa_cpy);