1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Start up the internal library and clear all structures
6 * Called by the NAL when it initializes. Safe to call multiple times.
8 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * Copyright (c) 2001-2002 Sandia National Laboratories
11 * This file is part of Lustre, http://www.sf.net/projects/lustre/
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 # define DEBUG_SUBSYSTEM S_PORTALS
28 #include <portals/lib-p30.h>
31 # include <linux/string.h> /* for memset() */
32 # include <linux/kp30.h>
33 # ifdef KERNEL_ADDR_CACHE
34 # include <compute/OS/addrCache/cache.h>
38 # include <sys/time.h>
41 #ifndef PTL_USE_DESC_LISTS
42 static int ptl_slab_users;
44 atomic_t md_in_use_count = ATOMIC_INIT(0);
45 atomic_t msg_in_use_count = ATOMIC_INIT(0);
46 atomic_t me_in_use_count = ATOMIC_INIT(0);
47 atomic_t eq_in_use_count = ATOMIC_INIT(0);
50 kportal_descriptor_setup (nal_cb_t *nal)
57 kportal_descriptor_cleanup (nal_cb_t *nal)
59 if (--ptl_slab_users != 0)
62 LASSERT (atomic_read (&md_in_use_count) == 0);
63 LASSERT (atomic_read (&me_in_use_count) == 0);
64 LASSERT (atomic_read (&eq_in_use_count) == 0);
65 LASSERT (atomic_read (&msg_in_use_count) == 0);
70 lib_freelist_init (nal_cb_t *nal, lib_freelist_t *fl, int n, int size)
76 size += offsetof (lib_freeobj_t, fo_contents);
78 space = nal->cb_malloc (nal, n * size);
82 INIT_LIST_HEAD (&fl->fl_list);
85 fl->fl_objsize = size;
89 memset (space, 0, size);
90 list_add ((struct list_head *)space, &fl->fl_list);
98 lib_freelist_fini (nal_cb_t *nal, lib_freelist_t *fl)
100 struct list_head *el;
103 if (fl->fl_nobjs == 0)
107 for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next)
110 LASSERT (count == fl->fl_nobjs);
112 nal->cb_free (nal, fl->fl_objs, fl->fl_nobjs * fl->fl_objsize);
113 memset (fl, 0, sizeof (fl));
117 kportal_descriptor_setup (nal_cb_t *nal)
119 /* NB on failure caller must still call kportal_descriptor_cleanup */
123 memset (&nal->ni.ni_free_mes, 0, sizeof (nal->ni.ni_free_mes));
124 memset (&nal->ni.ni_free_msgs, 0, sizeof (nal->ni.ni_free_msgs));
125 memset (&nal->ni.ni_free_mds, 0, sizeof (nal->ni.ni_free_mds));
126 memset (&nal->ni.ni_free_eqs, 0, sizeof (nal->ni.ni_free_eqs));
128 rc = lib_freelist_init (nal, &nal->ni.ni_free_mes,
129 MAX_MES, sizeof (lib_me_t));
133 rc = lib_freelist_init (nal, &nal->ni.ni_free_msgs,
134 MAX_MSGS, sizeof (lib_msg_t));
138 rc = lib_freelist_init (nal, &nal->ni.ni_free_mds,
139 MAX_MDS, sizeof (lib_md_t));
143 rc = lib_freelist_init (nal, &nal->ni.ni_free_eqs,
144 MAX_EQS, sizeof (lib_eq_t));
149 kportal_descriptor_cleanup (nal_cb_t *nal)
151 lib_freelist_fini (nal, &nal->ni.ni_free_mes);
152 lib_freelist_fini (nal, &nal->ni.ni_free_msgs);
153 lib_freelist_fini (nal, &nal->ni.ni_free_mds);
154 lib_freelist_fini (nal, &nal->ni.ni_free_eqs);
160 lib_create_interface_cookie (nal_cb_t *nal)
162 /* NB the interface cookie in wire handles guards against delayed
163 * replies and ACKs appearing valid in a new instance of the same
164 * interface. Initialisation time, even if it's only implemented
165 * to millisecond resolution is probably easily good enough. */
169 int rc = gettimeofday (&tv, NULL);
172 do_gettimeofday(&tv);
176 cookie += tv.tv_usec;
181 lib_setup_handle_hash (nal_cb_t *nal)
183 lib_ni_t *ni = &nal->ni;
186 /* Arbitrary choice of hash table size */
188 ni->ni_lh_hash_size = PAGE_SIZE / sizeof (struct list_head);
190 ni->ni_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;
192 ni->ni_lh_hash_table =
193 (struct list_head *)nal->cb_malloc (nal, ni->ni_lh_hash_size
194 * sizeof (struct list_head));
195 if (ni->ni_lh_hash_table == NULL)
196 return (PTL_NOSPACE);
198 for (i = 0; i < ni->ni_lh_hash_size; i++)
199 INIT_LIST_HEAD (&ni->ni_lh_hash_table[i]);
201 ni->ni_next_object_cookie = PTL_COOKIE_TYPES;
207 lib_cleanup_handle_hash (nal_cb_t *nal)
209 lib_ni_t *ni = &nal->ni;
211 if (ni->ni_lh_hash_table == NULL)
214 nal->cb_free (nal, ni->ni_lh_hash_table,
215 ni->ni_lh_hash_size * sizeof (struct list_head));
219 lib_lookup_cookie (nal_cb_t *nal, __u64 cookie, int type)
221 /* ALWAYS called with statelock held */
222 lib_ni_t *ni = &nal->ni;
223 struct list_head *list;
224 struct list_head *el;
227 if ((cookie & (PTL_COOKIE_TYPES - 1)) != type)
230 hash = ((unsigned int)cookie) % ni->ni_lh_hash_size;
231 list = &ni->ni_lh_hash_table[hash];
233 list_for_each (el, list) {
234 lib_handle_t *lh = list_entry (el, lib_handle_t, lh_hash_chain);
236 if (lh->lh_cookie == cookie)
244 lib_initialise_handle (nal_cb_t *nal, lib_handle_t *lh, int type)
246 /* ALWAYS called with statelock held */
247 lib_ni_t *ni = &nal->ni;
250 LASSERT (type >= 0 && type < PTL_COOKIE_TYPES);
251 lh->lh_cookie = ni->ni_next_object_cookie | type;
252 ni->ni_next_object_cookie += PTL_COOKIE_TYPES;
254 hash = ((unsigned int)lh->lh_cookie) % ni->ni_lh_hash_size;
255 list_add (&lh->lh_hash_chain, &ni->ni_lh_hash_table[hash]);
259 lib_invalidate_handle (nal_cb_t *nal, lib_handle_t *lh)
261 list_del (&lh->lh_hash_chain);
265 lib_init(nal_cb_t * nal, ptl_nid_t nid, ptl_pid_t pid, int gsize,
266 ptl_pt_index_t ptl_size, ptl_ac_index_t acl_size)
269 lib_ni_t *ni = &nal->ni;
273 /* NB serialised in PtlNIInit() */
275 if (ni->refcnt != 0) { /* already initialised */
280 lib_assert_wire_constants ();
283 * Allocate the portal table for this interface
284 * and all per-interface objects.
286 memset(&ni->counters, 0, sizeof(lib_counters_t));
288 rc = kportal_descriptor_setup (nal);
292 INIT_LIST_HEAD (&ni->ni_active_msgs);
293 INIT_LIST_HEAD (&ni->ni_active_mds);
294 INIT_LIST_HEAD (&ni->ni_active_eqs);
296 INIT_LIST_HEAD (&ni->ni_test_peers);
298 ni->ni_interface_cookie = lib_create_interface_cookie (nal);
299 ni->ni_next_object_cookie = 0;
300 rc = lib_setup_handle_hash (nal);
307 ni->num_nodes = gsize;
308 ni->tbl.size = ptl_size;
310 ni->tbl.tbl = nal->cb_malloc(nal, sizeof(struct list_head) * ptl_size);
311 if (ni->tbl.tbl == NULL) {
316 for (i = 0; i < ptl_size; i++)
317 INIT_LIST_HEAD(&(ni->tbl.tbl[i]));
319 ni->debug = PTL_DEBUG_NONE;
325 lib_cleanup_handle_hash (nal);
326 kportal_descriptor_cleanup (nal);
333 lib_fini(nal_cb_t * nal)
335 lib_ni_t *ni = &nal->ni;
343 /* NB no stat_lock() since this is the last reference. The NAL
344 * should have shut down already, so it should be safe to unlink
345 * and free all descriptors, even those that appear committed to a
346 * network op (eg MD with non-zero pending count)
349 for (idx = 0; idx < ni->tbl.size; idx++)
350 while (!list_empty (&ni->tbl.tbl[idx])) {
351 lib_me_t *me = list_entry (ni->tbl.tbl[idx].next,
354 CERROR ("Active me %p on exit\n", me);
355 list_del (&me->me_list);
356 lib_me_free (nal, me);
359 while (!list_empty (&ni->ni_active_mds)) {
360 lib_md_t *md = list_entry (ni->ni_active_mds.next,
363 CERROR ("Active md %p on exit\n", md);
364 list_del (&md->md_list);
365 lib_md_free (nal, md);
368 while (!list_empty (&ni->ni_active_eqs)) {
369 lib_eq_t *eq = list_entry (ni->ni_active_eqs.next,
372 CERROR ("Active eq %p on exit\n", eq);
373 list_del (&eq->eq_list);
374 lib_eq_free (nal, eq);
377 while (!list_empty (&ni->ni_active_msgs)) {
378 lib_msg_t *msg = list_entry (ni->ni_active_msgs.next,
379 lib_msg_t, msg_list);
381 CERROR ("Active msg %p on exit\n", msg);
382 list_del (&msg->msg_list);
383 lib_msg_free (nal, msg);
386 nal->cb_free(nal, ni->tbl.tbl, sizeof(struct list_head) * ni->tbl.size);
389 lib_cleanup_handle_hash (nal);
390 kportal_descriptor_cleanup (nal);