1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
25 #include <linux/lustre_net.h>
27 static spinlock_t conn_lock;
28 static struct list_head conn_list;
29 static struct list_head conn_unused_list;
31 struct ptlrpc_connection *ptlrpc_get_connection(struct lustre_peer *peer)
33 struct list_head *tmp, *pos;
34 struct ptlrpc_connection *c;
37 spin_lock(&conn_lock);
38 list_for_each(tmp, &conn_list) {
39 c = list_entry(tmp, struct ptlrpc_connection, c_link);
40 if (memcmp(peer, &c->c_peer, sizeof(*peer)) == 0) {
41 atomic_inc(&c->c_refcount);
46 list_for_each_safe(tmp, pos, &conn_unused_list) {
47 c = list_entry(tmp, struct ptlrpc_connection, c_link);
48 if (memcmp(peer, &c->c_peer, sizeof(*peer)) == 0) {
49 atomic_inc(&c->c_refcount);
51 list_add(&c->c_link, &conn_list);
56 /* FIXME: this should be a slab once we can validate slab addresses
58 OBD_ALLOC(c, sizeof(*c));
62 c->c_level = LUSTRE_CONN_NEW;
68 atomic_set(&c->c_refcount, 1);
69 spin_lock_init(&c->c_lock);
71 memcpy(&c->c_peer, peer, sizeof(c->c_peer));
72 list_add(&c->c_link, &conn_list);
76 spin_unlock(&conn_lock);
80 int ptlrpc_put_connection(struct ptlrpc_connection *c)
85 if (atomic_dec_and_test(&c->c_refcount)) {
86 spin_lock(&conn_lock);
88 list_add(&c->c_link, &conn_unused_list);
89 spin_unlock(&conn_lock);
92 if (atomic_read(&c->c_refcount) < 0)
93 CERROR("refcount < 0!\n");
98 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *c)
101 atomic_inc(&c->c_refcount);
105 void ptlrpc_init_connection(void)
107 INIT_LIST_HEAD(&conn_list);
108 INIT_LIST_HEAD(&conn_unused_list);
109 conn_lock = SPIN_LOCK_UNLOCKED;
112 void ptlrpc_cleanup_connection(void)
114 struct list_head *tmp, *pos;
115 struct ptlrpc_connection *c;
117 spin_lock(&conn_lock);
118 list_for_each_safe(tmp, pos, &conn_unused_list) {
119 c = list_entry(tmp, struct ptlrpc_connection, c_link);
120 list_del(&c->c_link);
121 OBD_FREE(c, sizeof(*c));
123 list_for_each_safe(tmp, pos, &conn_list) {
124 c = list_entry(tmp, struct ptlrpc_connection, c_link);
125 CERROR("Connection %p has refcount %d at cleanup (nid=%lu)!\n",
126 c, atomic_read(&c->c_refcount),
127 (unsigned long)c->c_peer.peer_nid);
128 list_del(&c->c_link);
129 OBD_FREE(c, sizeof(*c));
131 spin_unlock(&conn_lock);