Whamcloud - gitweb
WARNING: we currently crash on unmount after the last phase of runtests.
[fs/lustre-release.git] / lustre / ptlrpc / connection.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/obd_support.h>
26 #include <linux/lustre_net.h>
27
28 static spinlock_t conn_lock;
29 static struct list_head conn_list;
30 static struct list_head conn_unused_list;
31
32 struct ptlrpc_connection *ptlrpc_get_connection(struct lustre_peer *peer,
33                                                 char *uuid)
34 {
35         struct list_head *tmp, *pos;
36         struct ptlrpc_connection *c;
37         ENTRY;
38
39         CDEBUG(D_INFO, "peer is %08x %08lx %08lx\n",
40                peer->peer_nid, peer->peer_ni.nal_idx, peer->peer_ni.handle_idx);
41
42         spin_lock(&conn_lock);
43         list_for_each(tmp, &conn_list) {
44                 c = list_entry(tmp, struct ptlrpc_connection, c_link);
45                 if (memcmp(peer, &c->c_peer, sizeof(*peer)) == 0 &&
46                     (!uuid || strcmp(c->c_remote_uuid, uuid) == 0)) {
47                         ptlrpc_connection_addref(c);
48                         GOTO(out, c);
49                 }
50         }
51
52         list_for_each_safe(tmp, pos, &conn_unused_list) {
53                 c = list_entry(tmp, struct ptlrpc_connection, c_link);
54                 if (memcmp(peer, &c->c_peer, sizeof(*peer)) == 0 &&
55                     (!uuid || strcmp(c->c_remote_uuid, uuid) == 0)) {
56                         ptlrpc_connection_addref(c);
57                         list_del(&c->c_link);
58                         list_add(&c->c_link, &conn_list);
59                         GOTO(out, c);
60                 }
61         }
62
63         /* FIXME: this should be a slab once we can validate slab addresses
64          * without OOPSing */
65         OBD_ALLOC(c, sizeof(*c));
66         if (c == NULL)
67                 GOTO(out, c);
68
69         c->c_level = LUSTRE_CONN_NEW;
70         c->c_xid_in = 1;
71         c->c_xid_out = 1;
72         c->c_generation = 1;
73         c->c_epoch = 1;
74         c->c_bootcount = 0;
75         strcpy(c->c_remote_uuid, uuid);
76         INIT_LIST_HEAD(&c->c_delayed_head);
77         INIT_LIST_HEAD(&c->c_sending_head);
78         INIT_LIST_HEAD(&c->c_dying_head);
79         INIT_LIST_HEAD(&c->c_clients);
80         INIT_LIST_HEAD(&c->c_exports);
81         atomic_set(&c->c_refcount, 0);
82         ptlrpc_connection_addref(c);
83         spin_lock_init(&c->c_lock);
84
85         memcpy(&c->c_peer, peer, sizeof(c->c_peer));
86         list_add(&c->c_link, &conn_list);
87
88         EXIT;
89  out:
90         spin_unlock(&conn_lock);
91         return c;
92 }
93
94 int ptlrpc_put_connection(struct ptlrpc_connection *c)
95 {
96         int rc = 0;
97         ENTRY;
98
99         CDEBUG(D_INFO, "connection=%p refcount %d\n",
100                c, atomic_read(&c->c_refcount) - 1);
101         if (atomic_dec_and_test(&c->c_refcount)) {
102                 spin_lock(&conn_lock);
103                 list_del(&c->c_link);
104                 list_add(&c->c_link, &conn_unused_list);
105                 spin_unlock(&conn_lock);
106                 rc = 1;
107         }
108         if (atomic_read(&c->c_refcount) < 0)
109                 CERROR("connection %p refcount %d!\n",
110                        c, atomic_read(&c->c_refcount));
111
112         RETURN(rc);
113 }
114
115 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *c)
116 {
117         ENTRY;
118         CDEBUG(D_INFO, "connection=%p refcount %d\n",
119                c, atomic_read(&c->c_refcount) + 1);
120         atomic_inc(&c->c_refcount);
121         RETURN(c);
122 }
123
124 void ptlrpc_init_connection(void)
125 {
126         INIT_LIST_HEAD(&conn_list);
127         INIT_LIST_HEAD(&conn_unused_list);
128         conn_lock = SPIN_LOCK_UNLOCKED;
129 }
130
131 void ptlrpc_cleanup_connection(void)
132 {
133         struct list_head *tmp, *pos;
134         struct ptlrpc_connection *c;
135
136         spin_lock(&conn_lock);
137         list_for_each_safe(tmp, pos, &conn_unused_list) {
138                 c = list_entry(tmp, struct ptlrpc_connection, c_link);
139                 list_del(&c->c_link);
140                 OBD_FREE(c, sizeof(*c));
141         }
142         list_for_each_safe(tmp, pos, &conn_list) {
143                 c = list_entry(tmp, struct ptlrpc_connection, c_link);
144                 CERROR("Connection %p has refcount %d at cleanup (nid=%lu)!\n",
145                        c, atomic_read(&c->c_refcount),
146                        (unsigned long)c->c_peer.peer_nid);
147                 list_del(&c->c_link);
148                 OBD_FREE(c, sizeof(*c));
149         }
150         spin_unlock(&conn_lock);
151 }