1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Start up the internal library and clear all structures
6 * Called by the NAL when it initializes. Safe to call multiple times.
8 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * Copyright (c) 2001-2002 Sandia National Laboratories
11 * This file is part of Lustre, http://www.sf.net/projects/lustre/
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 # define DEBUG_SUBSYSTEM S_PORTALS
28 #include <portals/lib-p30.h>
31 # include <linux/string.h> /* for memset() */
32 # include <linux/kp30.h>
33 # ifdef KERNEL_ADDR_CACHE
34 # include <compute/OS/addrCache/cache.h>
38 # include <sys/time.h>
41 #ifdef PTL_USE_SLAB_CACHE
42 static int ptl_slab_users;
44 kmem_cache_t *ptl_md_slab;
45 kmem_cache_t *ptl_msg_slab;
46 kmem_cache_t *ptl_me_slab;
47 kmem_cache_t *ptl_eq_slab;
49 atomic_t md_in_use_count;
50 atomic_t msg_in_use_count;
51 atomic_t me_in_use_count;
52 atomic_t eq_in_use_count;
54 /* NB zeroing in ctor and on freeing ensures items that
55 * kmem_cache_validate() OK, but haven't been initialised
56 * as an MD/ME/EQ can't have valid handles
59 ptl_md_slab_ctor (void *obj, kmem_cache_t *slab, unsigned long flags)
61 memset (obj, 0, sizeof (lib_md_t));
65 ptl_me_slab_ctor (void *obj, kmem_cache_t *slab, unsigned long flags)
67 memset (obj, 0, sizeof (lib_me_t));
71 ptl_eq_slab_ctor (void *obj, kmem_cache_t *slab, unsigned long flags)
73 memset (obj, 0, sizeof (lib_eq_t));
77 kportal_descriptor_setup (nal_cb_t *nal)
79 /* NB on failure caller must still call kportal_descriptor_cleanup */
82 /* We'll have 1 set of slabs for ALL the nals :) */
87 ptl_md_slab = kmem_cache_create("portals_MD",
90 ptl_md_slab_ctor, NULL);
92 CERROR("couldn't allocate ptl_md_t slab");
96 /* NB no ctor for msgs; they don't need handle verification */
97 ptl_msg_slab = kmem_cache_create("portals_MSG",
102 CERROR("couldn't allocate ptl_msg_t slab");
103 RETURN (PTL_NOSPACE);
106 ptl_me_slab = kmem_cache_create("portals_ME",
109 ptl_me_slab_ctor, NULL);
111 CERROR("couldn't allocate ptl_me_t slab");
112 RETURN (PTL_NOSPACE);
115 ptl_eq_slab = kmem_cache_create("portals_EQ",
118 ptl_eq_slab_ctor, NULL);
120 CERROR("couldn't allocate ptl_eq_t slab");
121 RETURN (PTL_NOSPACE);
128 kportal_descriptor_cleanup (nal_cb_t *nal)
130 if (--ptl_slab_users != 0)
133 LASSERT (atomic_read (&md_in_use_count) == 0);
134 LASSERT (atomic_read (&me_in_use_count) == 0);
135 LASSERT (atomic_read (&eq_in_use_count) == 0);
136 LASSERT (atomic_read (&msg_in_use_count) == 0);
138 if (ptl_md_slab != NULL)
139 kmem_cache_destroy(ptl_md_slab);
140 if (ptl_msg_slab != NULL)
141 kmem_cache_destroy(ptl_msg_slab);
142 if (ptl_me_slab != NULL)
143 kmem_cache_destroy(ptl_me_slab);
144 if (ptl_eq_slab != NULL)
145 kmem_cache_destroy(ptl_eq_slab);
150 lib_freelist_init (nal_cb_t *nal, lib_freelist_t *fl, int n, int size)
156 size += offsetof (lib_freeobj_t, fo_contents);
158 space = nal->cb_malloc (nal, n * size);
160 return (PTL_NOSPACE);
162 INIT_LIST_HEAD (&fl->fl_list);
165 fl->fl_objsize = size;
169 memset (space, 0, size);
170 list_add ((struct list_head *)space, &fl->fl_list);
178 lib_freelist_fini (nal_cb_t *nal, lib_freelist_t *fl)
180 struct list_head *el;
183 if (fl->fl_nobjs == 0)
187 for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next)
190 LASSERT (count == fl->fl_nobjs);
192 nal->cb_free (nal, fl->fl_objs, fl->fl_nobjs * fl->fl_objsize);
193 memset (fl, 0, sizeof (fl));
197 kportal_descriptor_setup (nal_cb_t *nal)
199 /* NB on failure caller must still call kportal_descriptor_cleanup */
203 memset (&nal->ni.ni_free_mes, 0, sizeof (nal->ni.ni_free_mes));
204 memset (&nal->ni.ni_free_msgs, 0, sizeof (nal->ni.ni_free_msgs));
205 memset (&nal->ni.ni_free_mds, 0, sizeof (nal->ni.ni_free_mds));
206 memset (&nal->ni.ni_free_eqs, 0, sizeof (nal->ni.ni_free_eqs));
208 rc = lib_freelist_init (nal, &nal->ni.ni_free_mes,
209 MAX_MES, sizeof (lib_me_t));
213 rc = lib_freelist_init (nal, &nal->ni.ni_free_msgs,
214 MAX_MSGS, sizeof (lib_msg_t));
218 rc = lib_freelist_init (nal, &nal->ni.ni_free_mds,
219 MAX_MDS, sizeof (lib_md_t));
223 rc = lib_freelist_init (nal, &nal->ni.ni_free_eqs,
224 MAX_EQS, sizeof (lib_eq_t));
229 kportal_descriptor_cleanup (nal_cb_t *nal)
231 lib_freelist_fini (nal, &nal->ni.ni_free_mes);
232 lib_freelist_fini (nal, &nal->ni.ni_free_msgs);
233 lib_freelist_fini (nal, &nal->ni.ni_free_mds);
234 lib_freelist_fini (nal, &nal->ni.ni_free_eqs);
240 lib_create_interface_cookie (nal_cb_t *nal)
242 /* NB the interface cookie in wire handles guards against delayed
243 * replies and ACKs appearing valid in a new instance of the same
244 * interface. Initialisation time, even if it's only implemented
245 * to millisecond resolution is probably easily good enough. */
249 int rc = gettimeofday (&tv, NULL);
252 do_gettimeofday(&tv);
256 cookie += tv.tv_usec;
261 lib_setup_handle_hash (nal_cb_t *nal)
263 lib_ni_t *ni = &nal->ni;
266 /* Arbitrary choice of hash table size */
268 ni->ni_lh_hash_size = PAGE_SIZE / sizeof (struct list_head);
270 ni->ni_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;
272 ni->ni_lh_hash_table =
273 (struct list_head *)nal->cb_malloc (nal, ni->ni_lh_hash_size
274 * sizeof (struct list_head));
275 if (ni->ni_lh_hash_table == NULL)
276 return (PTL_NOSPACE);
278 for (i = 0; i < ni->ni_lh_hash_size; i++)
279 INIT_LIST_HEAD (&ni->ni_lh_hash_table[i]);
281 ni->ni_next_object_cookie = PTL_COOKIE_TYPES;
287 lib_cleanup_handle_hash (nal_cb_t *nal)
289 lib_ni_t *ni = &nal->ni;
291 if (ni->ni_lh_hash_table == NULL)
294 nal->cb_free (nal, ni->ni_lh_hash_table,
295 ni->ni_lh_hash_size * sizeof (struct list_head));
299 lib_lookup_cookie (nal_cb_t *nal, __u64 cookie, int type)
301 /* ALWAYS called with statelock held */
302 lib_ni_t *ni = &nal->ni;
303 struct list_head *list;
304 struct list_head *el;
307 if ((cookie & (PTL_COOKIE_TYPES - 1)) != type)
310 hash = ((unsigned int)cookie) % ni->ni_lh_hash_size;
311 list = &ni->ni_lh_hash_table[hash];
313 list_for_each (el, list) {
314 lib_handle_t *lh = list_entry (el, lib_handle_t, lh_hash_chain);
316 if (lh->lh_cookie == cookie)
324 lib_initialise_handle (nal_cb_t *nal, lib_handle_t *lh, int type)
326 /* ALWAYS called with statelock held */
327 lib_ni_t *ni = &nal->ni;
330 LASSERT (type >= 0 && type < PTL_COOKIE_TYPES);
331 lh->lh_cookie = ni->ni_next_object_cookie | type;
332 ni->ni_next_object_cookie += PTL_COOKIE_TYPES;
334 hash = ((unsigned int)lh->lh_cookie) % ni->ni_lh_hash_size;
335 list_add (&lh->lh_hash_chain, &ni->ni_lh_hash_table[hash]);
339 lib_invalidate_handle (nal_cb_t *nal, lib_handle_t *lh)
341 list_del (&lh->lh_hash_chain);
345 lib_init(nal_cb_t * nal, ptl_nid_t nid, ptl_pid_t pid, int gsize,
346 ptl_pt_index_t ptl_size, ptl_ac_index_t acl_size)
349 lib_ni_t *ni = &nal->ni;
353 /* NB serialised in PtlNIInit() */
355 if (ni->refcnt != 0) { /* already initialised */
360 lib_assert_wire_constants ();
363 * Allocate the portal table for this interface
364 * and all per-interface objects.
366 memset(&ni->counters, 0, sizeof(lib_counters_t));
368 rc = kportal_descriptor_setup (nal);
372 INIT_LIST_HEAD (&ni->ni_active_msgs);
373 INIT_LIST_HEAD (&ni->ni_active_mds);
374 INIT_LIST_HEAD (&ni->ni_active_eqs);
376 INIT_LIST_HEAD (&ni->ni_test_peers);
378 ni->ni_interface_cookie = lib_create_interface_cookie (nal);
379 ni->ni_next_object_cookie = 0;
380 rc = lib_setup_handle_hash (nal);
387 ni->num_nodes = gsize;
388 ni->tbl.size = ptl_size;
390 ni->tbl.tbl = nal->cb_malloc(nal, sizeof(struct list_head) * ptl_size);
391 if (ni->tbl.tbl == NULL) {
396 for (i = 0; i < ptl_size; i++)
397 INIT_LIST_HEAD(&(ni->tbl.tbl[i]));
399 ni->debug = PTL_DEBUG_NONE;
405 lib_cleanup_handle_hash (nal);
406 kportal_descriptor_cleanup (nal);
413 lib_fini(nal_cb_t * nal)
415 lib_ni_t *ni = &nal->ni;
423 /* NB no stat_lock() since this is the last reference. The NAL
424 * should have shut down already, so it should be safe to unlink
425 * and free all descriptors, even those that appear committed to a
426 * network op (eg MD with non-zero pending count)
429 for (idx = 0; idx < ni->tbl.size; idx++)
430 while (!list_empty (&ni->tbl.tbl[idx])) {
431 lib_me_t *me = list_entry (ni->tbl.tbl[idx].next,
434 CERROR ("Active me %p on exit\n", me);
435 list_del (&me->me_list);
436 lib_me_free (nal, me);
439 while (!list_empty (&ni->ni_active_mds)) {
440 lib_md_t *md = list_entry (ni->ni_active_mds.next,
443 CERROR ("Active md %p on exit\n", md);
444 list_del (&md->md_list);
445 lib_md_free (nal, md);
448 while (!list_empty (&ni->ni_active_eqs)) {
449 lib_eq_t *eq = list_entry (ni->ni_active_eqs.next,
452 CERROR ("Active eq %p on exit\n", eq);
453 list_del (&eq->eq_list);
454 lib_eq_free (nal, eq);
457 while (!list_empty (&ni->ni_active_msgs)) {
458 lib_msg_t *msg = list_entry (ni->ni_active_msgs.next,
459 lib_msg_t, msg_list);
461 CERROR ("Active msg %p on exit\n", msg);
462 list_del (&msg->msg_list);
463 lib_msg_free (nal, msg);
466 nal->cb_free(nal, ni->tbl.tbl, sizeof(struct list_head) * ni->tbl.size);
469 lib_cleanup_handle_hash (nal);
470 kportal_descriptor_cleanup (nal);