1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Start up the internal library and clear all structures
6 * Called by the NAL when it initializes. Safe to call multiple times.
8 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
9 * Copyright (c) 2001-2002 Sandia National Laboratories
11 * This file is part of Lustre, http://www.sf.net/projects/lustre/
13 * Lustre is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
17 * Lustre is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with Lustre; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 # define DEBUG_SUBSYSTEM S_PORTALS
28 #include <portals/lib-p30.h>
31 # include <linux/string.h> /* for memset() */
32 # include <linux/kp30.h>
33 # ifdef KERNEL_ADDR_CACHE
34 # include <compute/OS/addrCache/cache.h>
38 # include <sys/time.h>
41 #ifdef PTL_USE_SLAB_CACHE
42 static int ptl_slab_users;
44 kmem_cache_t *ptl_md_slab;
45 kmem_cache_t *ptl_msg_slab;
46 kmem_cache_t *ptl_me_slab;
47 kmem_cache_t *ptl_eq_slab;
49 atomic_t md_in_use_count;
50 atomic_t msg_in_use_count;
51 atomic_t me_in_use_count;
52 atomic_t eq_in_use_count;
54 /* NB zeroing in ctor and on freeing ensures items that
55 * kmem_cache_validate() OK, but haven't been initialised
56 * as an MD/ME/EQ can't have valid handles
59 ptl_md_slab_ctor (void *obj, kmem_cache_t *slab, unsigned long flags)
61 memset (obj, 0, sizeof (lib_md_t));
65 ptl_me_slab_ctor (void *obj, kmem_cache_t *slab, unsigned long flags)
67 memset (obj, 0, sizeof (lib_me_t));
71 ptl_eq_slab_ctor (void *obj, kmem_cache_t *slab, unsigned long flags)
73 memset (obj, 0, sizeof (lib_eq_t));
77 kportal_descriptor_setup (nal_cb_t *nal)
79 /* NB on failure caller must still call kportal_descriptor_cleanup */
82 /* We'll have 1 set of slabs for ALL the nals :) */
87 ptl_md_slab = kmem_cache_create("portals_MD",
90 ptl_md_slab_ctor, NULL);
92 CERROR("couldn't allocate ptl_md_t slab");
96 /* NB no ctor for msgs; they don't need handle verification */
97 ptl_msg_slab = kmem_cache_create("portals_MSG",
102 CERROR("couldn't allocate ptl_msg_t slab");
103 RETURN (PTL_NOSPACE);
106 ptl_me_slab = kmem_cache_create("portals_ME",
109 ptl_me_slab_ctor, NULL);
111 CERROR("couldn't allocate ptl_me_t slab");
112 RETURN (PTL_NOSPACE);
115 ptl_eq_slab = kmem_cache_create("portals_EQ",
118 ptl_eq_slab_ctor, NULL);
120 CERROR("couldn't allocate ptl_eq_t slab");
121 RETURN (PTL_NOSPACE);
128 kportal_descriptor_cleanup (nal_cb_t *nal)
132 if (--ptl_slab_users != 0)
135 LASSERT (atomic_read (&md_in_use_count) == 0);
136 LASSERT (atomic_read (&me_in_use_count) == 0);
137 LASSERT (atomic_read (&eq_in_use_count) == 0);
138 LASSERT (atomic_read (&msg_in_use_count) == 0);
140 if (ptl_md_slab != NULL) {
141 rc = kmem_cache_destroy(ptl_md_slab);
143 CERROR("unable to free MD slab\n");
145 if (ptl_msg_slab != NULL) {
146 rc = kmem_cache_destroy(ptl_msg_slab);
148 CERROR("unable to free MSG slab\n");
150 if (ptl_me_slab != NULL) {
151 rc = kmem_cache_destroy(ptl_me_slab);
153 CERROR("unable to free ME slab\n");
155 if (ptl_eq_slab != NULL) {
156 rc = kmem_cache_destroy(ptl_eq_slab);
158 CERROR("unable to free EQ slab\n");
164 lib_freelist_init (nal_cb_t *nal, lib_freelist_t *fl, int n, int size)
170 size += offsetof (lib_freeobj_t, fo_contents);
172 space = nal->cb_malloc (nal, n * size);
174 return (PTL_NOSPACE);
176 INIT_LIST_HEAD (&fl->fl_list);
179 fl->fl_objsize = size;
183 memset (space, 0, size);
184 list_add ((struct list_head *)space, &fl->fl_list);
192 lib_freelist_fini (nal_cb_t *nal, lib_freelist_t *fl)
194 struct list_head *el;
197 if (fl->fl_nobjs == 0)
201 for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next)
204 LASSERT (count == fl->fl_nobjs);
206 nal->cb_free (nal, fl->fl_objs, fl->fl_nobjs * fl->fl_objsize);
207 memset (fl, 0, sizeof (fl));
211 kportal_descriptor_setup (nal_cb_t *nal)
213 /* NB on failure caller must still call kportal_descriptor_cleanup */
217 memset (&nal->ni.ni_free_mes, 0, sizeof (nal->ni.ni_free_mes));
218 memset (&nal->ni.ni_free_msgs, 0, sizeof (nal->ni.ni_free_msgs));
219 memset (&nal->ni.ni_free_mds, 0, sizeof (nal->ni.ni_free_mds));
220 memset (&nal->ni.ni_free_eqs, 0, sizeof (nal->ni.ni_free_eqs));
222 rc = lib_freelist_init (nal, &nal->ni.ni_free_mes,
223 MAX_MES, sizeof (lib_me_t));
227 rc = lib_freelist_init (nal, &nal->ni.ni_free_msgs,
228 MAX_MSGS, sizeof (lib_msg_t));
232 rc = lib_freelist_init (nal, &nal->ni.ni_free_mds,
233 MAX_MDS, sizeof (lib_md_t));
237 rc = lib_freelist_init (nal, &nal->ni.ni_free_eqs,
238 MAX_EQS, sizeof (lib_eq_t));
243 kportal_descriptor_cleanup (nal_cb_t *nal)
245 lib_freelist_fini (nal, &nal->ni.ni_free_mes);
246 lib_freelist_fini (nal, &nal->ni.ni_free_msgs);
247 lib_freelist_fini (nal, &nal->ni.ni_free_mds);
248 lib_freelist_fini (nal, &nal->ni.ni_free_eqs);
254 lib_create_interface_cookie (nal_cb_t *nal)
256 /* NB the interface cookie in wire handles guards against delayed
257 * replies and ACKs appearing valid in a new instance of the same
258 * interface. Initialisation time, even if it's only implemented
259 * to millisecond resolution is probably easily good enough. */
263 int rc = gettimeofday (&tv, NULL);
266 do_gettimeofday(&tv);
270 cookie += tv.tv_usec;
275 lib_setup_handle_hash (nal_cb_t *nal)
277 lib_ni_t *ni = &nal->ni;
280 /* Arbitrary choice of hash table size */
282 ni->ni_lh_hash_size = PAGE_SIZE / sizeof (struct list_head);
284 ni->ni_lh_hash_size = (MAX_MES + MAX_MDS + MAX_EQS)/4;
286 ni->ni_lh_hash_table =
287 (struct list_head *)nal->cb_malloc (nal, ni->ni_lh_hash_size
288 * sizeof (struct list_head));
289 if (ni->ni_lh_hash_table == NULL)
290 return (PTL_NOSPACE);
292 for (i = 0; i < ni->ni_lh_hash_size; i++)
293 INIT_LIST_HEAD (&ni->ni_lh_hash_table[i]);
295 ni->ni_next_object_cookie = PTL_COOKIE_TYPES;
301 lib_cleanup_handle_hash (nal_cb_t *nal)
303 lib_ni_t *ni = &nal->ni;
305 if (ni->ni_lh_hash_table == NULL)
308 nal->cb_free (nal, ni->ni_lh_hash_table,
309 ni->ni_lh_hash_size * sizeof (struct list_head));
313 lib_lookup_cookie (nal_cb_t *nal, __u64 cookie, int type)
315 /* ALWAYS called with statelock held */
316 lib_ni_t *ni = &nal->ni;
317 struct list_head *list;
318 struct list_head *el;
321 if ((cookie & (PTL_COOKIE_TYPES - 1)) != type)
324 hash = ((unsigned int)cookie) % ni->ni_lh_hash_size;
325 list = &ni->ni_lh_hash_table[hash];
327 list_for_each (el, list) {
328 lib_handle_t *lh = list_entry (el, lib_handle_t, lh_hash_chain);
330 if (lh->lh_cookie == cookie)
338 lib_initialise_handle (nal_cb_t *nal, lib_handle_t *lh, int type)
340 /* ALWAYS called with statelock held */
341 lib_ni_t *ni = &nal->ni;
344 LASSERT (type >= 0 && type < PTL_COOKIE_TYPES);
345 lh->lh_cookie = ni->ni_next_object_cookie | type;
346 ni->ni_next_object_cookie += PTL_COOKIE_TYPES;
348 hash = ((unsigned int)lh->lh_cookie) % ni->ni_lh_hash_size;
349 list_add (&lh->lh_hash_chain, &ni->ni_lh_hash_table[hash]);
353 lib_invalidate_handle (nal_cb_t *nal, lib_handle_t *lh)
355 list_del (&lh->lh_hash_chain);
359 lib_init(nal_cb_t * nal, ptl_nid_t nid, ptl_pid_t pid, int gsize,
360 ptl_pt_index_t ptl_size, ptl_ac_index_t acl_size)
363 lib_ni_t *ni = &nal->ni;
367 /* NB serialised in PtlNIInit() */
369 if (ni->refcnt != 0) { /* already initialised */
374 lib_assert_wire_constants ();
377 * Allocate the portal table for this interface
378 * and all per-interface objects.
380 memset(&ni->counters, 0, sizeof(lib_counters_t));
382 rc = kportal_descriptor_setup (nal);
386 INIT_LIST_HEAD (&ni->ni_active_msgs);
387 INIT_LIST_HEAD (&ni->ni_active_mds);
388 INIT_LIST_HEAD (&ni->ni_active_eqs);
390 INIT_LIST_HEAD (&ni->ni_test_peers);
392 ni->ni_interface_cookie = lib_create_interface_cookie (nal);
393 ni->ni_next_object_cookie = 0;
394 rc = lib_setup_handle_hash (nal);
401 ni->num_nodes = gsize;
402 ni->tbl.size = ptl_size;
404 ni->tbl.tbl = nal->cb_malloc(nal, sizeof(struct list_head) * ptl_size);
405 if (ni->tbl.tbl == NULL) {
410 for (i = 0; i < ptl_size; i++)
411 INIT_LIST_HEAD(&(ni->tbl.tbl[i]));
413 ni->debug = PTL_DEBUG_NONE;
419 lib_cleanup_handle_hash (nal);
420 kportal_descriptor_cleanup (nal);
427 lib_fini(nal_cb_t * nal)
429 lib_ni_t *ni = &nal->ni;
437 /* NB no stat_lock() since this is the last reference. The NAL
438 * should have shut down already, so it should be safe to unlink
439 * and free all descriptors, even those that appear committed to a
440 * network op (eg MD with non-zero pending count)
443 for (idx = 0; idx < ni->tbl.size; idx++)
444 while (!list_empty (&ni->tbl.tbl[idx])) {
445 lib_me_t *me = list_entry (ni->tbl.tbl[idx].next,
448 CERROR ("Active me %p on exit\n", me);
449 list_del (&me->me_list);
450 lib_me_free (nal, me);
453 while (!list_empty (&ni->ni_active_mds)) {
454 lib_md_t *md = list_entry (ni->ni_active_mds.next,
457 CERROR ("Active md %p on exit\n", md);
458 list_del (&md->md_list);
459 lib_md_free (nal, md);
462 while (!list_empty (&ni->ni_active_eqs)) {
463 lib_eq_t *eq = list_entry (ni->ni_active_eqs.next,
466 CERROR ("Active eq %p on exit\n", eq);
467 list_del (&eq->eq_list);
468 lib_eq_free (nal, eq);
471 while (!list_empty (&ni->ni_active_msgs)) {
472 lib_msg_t *msg = list_entry (ni->ni_active_msgs.next,
473 lib_msg_t, msg_list);
475 CERROR ("Active msg %p on exit\n", msg);
476 list_del (&msg->msg_list);
477 lib_msg_free (nal, msg);
480 nal->cb_free(nal, ni->tbl.tbl, sizeof(struct list_head) * ni->tbl.size);
483 lib_cleanup_handle_hash (nal);
484 kportal_descriptor_cleanup (nal);