1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2005 Cluster File Systems, Inc. All rights reserved.
5 * Author: PJ Kirner <pjkirner@clusterfs.com>
7 * This file is part of the Lustre file system, http://www.lustre.org
8 * Lustre is a trademark of Cluster File Systems, Inc.
10 * This file is confidential source code owned by Cluster File Systems.
11 * No viewing, modification, compilation, redistribution, or any other
12 * form of use is permitted except through a signed license agreement.
14 * If you have not signed such an agreement, then you have no rights to
15 * this file. Please destroy it immediately and contact CFS.
23 .lnd_startup = kptllnd_startup,
24 .lnd_shutdown = kptllnd_shutdown,
25 .lnd_ctl = kptllnd_ctl,
26 .lnd_send = kptllnd_send,
27 .lnd_recv = kptllnd_recv,
28 .lnd_eager_recv = kptllnd_eager_recv,
31 kptl_data_t kptllnd_data;
34 kptllnd_ptlid2str(ptl_process_id_t id)
36 static char strs[64][32];
42 spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
44 if (idx >= sizeof(strs)/sizeof(strs[0]))
46 spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
48 snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
53 kptllnd_assert_wire_constants (void)
55 /* Wire protocol assertions generated by 'wirecheck'
56 * running on Linux fedora 2.6.11-co-0.6.4 #1 Mon Jun 19 05:36:13 UTC 2006 i686 i686 i386 GNU
57 * with gcc version 4.1.1 20060525 (Red Hat 4.1.1-1) */
61 CLASSERT (PTL_RESERVED_MATCHBITS == 0x100);
62 CLASSERT (LNET_MSG_MATCHBITS == 0);
63 CLASSERT (PTLLND_MSG_MAGIC == 0x50746C4E);
64 CLASSERT (PTLLND_MSG_VERSION == 0x04);
65 CLASSERT (PTLLND_RDMA_OK == 0x00);
66 CLASSERT (PTLLND_RDMA_FAIL == 0x01);
67 CLASSERT (PTLLND_MSG_TYPE_INVALID == 0x00);
68 CLASSERT (PTLLND_MSG_TYPE_PUT == 0x01);
69 CLASSERT (PTLLND_MSG_TYPE_GET == 0x02);
70 CLASSERT (PTLLND_MSG_TYPE_IMMEDIATE == 0x03);
71 CLASSERT (PTLLND_MSG_TYPE_NOOP == 0x04);
72 CLASSERT (PTLLND_MSG_TYPE_HELLO == 0x05);
73 CLASSERT (PTLLND_MSG_TYPE_NAK == 0x06);
75 /* Checks for struct kptl_msg_t */
76 CLASSERT ((int)sizeof(kptl_msg_t) == 136);
77 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_magic) == 0);
78 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_magic) == 4);
79 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_version) == 4);
80 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_version) == 2);
81 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_type) == 6);
82 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_type) == 1);
83 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_credits) == 7);
84 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_credits) == 1);
85 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_nob) == 8);
86 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_nob) == 4);
87 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_cksum) == 12);
88 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_cksum) == 4);
89 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_srcnid) == 16);
90 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_srcnid) == 8);
91 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_srcstamp) == 24);
92 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_srcstamp) == 8);
93 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_dstnid) == 32);
94 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_dstnid) == 8);
95 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_dststamp) == 40);
96 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_dststamp) == 8);
97 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_srcpid) == 48);
98 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_srcpid) == 4);
99 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_dstpid) == 52);
100 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_dstpid) == 4);
101 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_u.immediate) == 56);
102 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_u.immediate) == 72);
103 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_u.rdma) == 56);
104 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_u.rdma) == 80);
105 CLASSERT ((int)offsetof(kptl_msg_t, ptlm_u.hello) == 56);
106 CLASSERT ((int)sizeof(((kptl_msg_t *)0)->ptlm_u.hello) == 12);
108 /* Checks for struct kptl_immediate_msg_t */
109 CLASSERT ((int)sizeof(kptl_immediate_msg_t) == 72);
110 CLASSERT ((int)offsetof(kptl_immediate_msg_t, kptlim_hdr) == 0);
111 CLASSERT ((int)sizeof(((kptl_immediate_msg_t *)0)->kptlim_hdr) == 72);
112 CLASSERT ((int)offsetof(kptl_immediate_msg_t, kptlim_payload[13]) == 85);
113 CLASSERT ((int)sizeof(((kptl_immediate_msg_t *)0)->kptlim_payload[13]) == 1);
115 /* Checks for struct kptl_rdma_msg_t */
116 CLASSERT ((int)sizeof(kptl_rdma_msg_t) == 80);
117 CLASSERT ((int)offsetof(kptl_rdma_msg_t, kptlrm_hdr) == 0);
118 CLASSERT ((int)sizeof(((kptl_rdma_msg_t *)0)->kptlrm_hdr) == 72);
119 CLASSERT ((int)offsetof(kptl_rdma_msg_t, kptlrm_matchbits) == 72);
120 CLASSERT ((int)sizeof(((kptl_rdma_msg_t *)0)->kptlrm_matchbits) == 8);
122 /* Checks for struct kptl_hello_msg_t */
123 CLASSERT ((int)sizeof(kptl_hello_msg_t) == 12);
124 CLASSERT ((int)offsetof(kptl_hello_msg_t, kptlhm_matchbits) == 0);
125 CLASSERT ((int)sizeof(((kptl_hello_msg_t *)0)->kptlhm_matchbits) == 8);
126 CLASSERT ((int)offsetof(kptl_hello_msg_t, kptlhm_max_msg_size) == 8);
127 CLASSERT ((int)sizeof(((kptl_hello_msg_t *)0)->kptlhm_max_msg_size) == 4);
130 const char *kptllnd_evtype2str(int type)
132 #define DO_TYPE(x) case x: return #x;
135 DO_TYPE(PTL_EVENT_GET_START);
136 DO_TYPE(PTL_EVENT_GET_END);
137 DO_TYPE(PTL_EVENT_PUT_START);
138 DO_TYPE(PTL_EVENT_PUT_END);
139 DO_TYPE(PTL_EVENT_REPLY_START);
140 DO_TYPE(PTL_EVENT_REPLY_END);
141 DO_TYPE(PTL_EVENT_ACK);
142 DO_TYPE(PTL_EVENT_SEND_START);
143 DO_TYPE(PTL_EVENT_SEND_END);
144 DO_TYPE(PTL_EVENT_UNLINK);
146 return "<unknown event type>";
151 const char *kptllnd_msgtype2str(int type)
153 #define DO_TYPE(x) case x: return #x;
156 DO_TYPE(PTLLND_MSG_TYPE_INVALID);
157 DO_TYPE(PTLLND_MSG_TYPE_PUT);
158 DO_TYPE(PTLLND_MSG_TYPE_GET);
159 DO_TYPE(PTLLND_MSG_TYPE_IMMEDIATE);
160 DO_TYPE(PTLLND_MSG_TYPE_HELLO);
161 DO_TYPE(PTLLND_MSG_TYPE_NOOP);
162 DO_TYPE(PTLLND_MSG_TYPE_NAK);
164 return "<unknown msg type>";
169 const char *kptllnd_errtype2str(int type)
171 #define DO_TYPE(x) case x: return #x;
176 DO_TYPE(PTL_NO_SPACE);
177 DO_TYPE(PTL_ME_IN_USE);
178 DO_TYPE(PTL_NAL_FAILED);
179 DO_TYPE(PTL_NO_INIT);
180 DO_TYPE(PTL_IFACE_DUP);
181 DO_TYPE(PTL_IFACE_INVALID);
182 DO_TYPE(PTL_HANDLE_INVALID);
183 DO_TYPE(PTL_MD_INVALID);
184 DO_TYPE(PTL_ME_INVALID);
185 DO_TYPE(PTL_PROCESS_INVALID);
186 DO_TYPE(PTL_PT_INDEX_INVALID);
187 DO_TYPE(PTL_SR_INDEX_INVALID);
188 DO_TYPE(PTL_EQ_INVALID);
189 DO_TYPE(PTL_EQ_DROPPED);
190 DO_TYPE(PTL_EQ_EMPTY);
191 DO_TYPE(PTL_MD_NO_UPDATE);
193 DO_TYPE(PTL_AC_INDEX_INVALID);
194 DO_TYPE(PTL_MD_ILLEGAL);
195 DO_TYPE(PTL_ME_LIST_TOO_LONG);
196 DO_TYPE(PTL_MD_IN_USE);
197 DO_TYPE(PTL_NI_INVALID);
198 DO_TYPE(PTL_PID_INVALID);
199 DO_TYPE(PTL_PT_FULL);
200 DO_TYPE(PTL_VAL_FAILED);
201 DO_TYPE(PTL_NOT_IMPLEMENTED);
203 DO_TYPE(PTL_EQ_IN_USE);
204 DO_TYPE(PTL_PID_IN_USE);
205 DO_TYPE(PTL_INV_EQ_SIZE);
208 return "<unknown event type>";
214 kptllnd_cksum (void *ptr, int nob)
220 sum = ((sum << 1) | (sum >> 31)) + *c++;
222 /* ensure I don't return 0 (== no checksum) */
223 return (sum == 0) ? 1 : sum;
227 kptllnd_init_msg(kptl_msg_t *msg, int type, int body_nob)
229 msg->ptlm_type = type;
230 msg->ptlm_nob = (offsetof(kptl_msg_t, ptlm_u) + body_nob + 7) & ~7;
232 LASSERT(msg->ptlm_nob <= *kptllnd_tunables.kptl_max_msg_size);
236 kptllnd_msg_pack(kptl_msg_t *msg, kptl_peer_t *peer)
238 msg->ptlm_magic = PTLLND_MSG_MAGIC;
239 msg->ptlm_version = PTLLND_MSG_VERSION;
240 /* msg->ptlm_type Filled in kptllnd_init_msg() */
241 msg->ptlm_credits = peer->peer_outstanding_credits;
242 /* msg->ptlm_nob Filled in kptllnd_init_msg() */
244 msg->ptlm_srcnid = kptllnd_data.kptl_ni->ni_nid;
245 msg->ptlm_srcstamp = peer->peer_myincarnation;
246 msg->ptlm_dstnid = peer->peer_id.nid;
247 msg->ptlm_dststamp = peer->peer_incarnation;
248 msg->ptlm_srcpid = the_lnet.ln_pid;
249 msg->ptlm_dstpid = peer->peer_id.pid;
251 if (*kptllnd_tunables.kptl_checksum) {
252 /* NB ptlm_cksum zero while computing cksum */
253 msg->ptlm_cksum = kptllnd_cksum(msg,
254 offsetof(kptl_msg_t, ptlm_u));
259 kptllnd_msg_unpack(kptl_msg_t *msg, int nob)
261 const int hdr_size = offsetof(kptl_msg_t, ptlm_u);
266 /* 6 bytes are enough to have received magic + version */
268 CERROR("Very Short message: %d\n", nob);
273 * Determine if we need to flip
275 if (msg->ptlm_magic == PTLLND_MSG_MAGIC) {
277 } else if (msg->ptlm_magic == __swab32(PTLLND_MSG_MAGIC)) {
280 CERROR("Bad magic: %08x\n", msg->ptlm_magic);
284 msg_version = flip ? __swab16(msg->ptlm_version) : msg->ptlm_version;
286 if (msg_version != PTLLND_MSG_VERSION) {
287 CERROR("Bad version: got %04x expected %04x\n",
288 (__u32)msg_version, PTLLND_MSG_VERSION);
292 if (nob < hdr_size) {
293 CERROR("Short message: got %d, wanted at least %d\n",
298 /* checksum must be computed with
299 * 1) ptlm_cksum zero and
300 * 2) BEFORE anything gets modified/flipped
302 msg_cksum = flip ? __swab32(msg->ptlm_cksum) : msg->ptlm_cksum;
304 if (msg_cksum != 0 &&
305 msg_cksum != kptllnd_cksum(msg, hdr_size)) {
306 CERROR("Bad checksum\n");
310 msg->ptlm_version = msg_version;
311 msg->ptlm_cksum = msg_cksum;
314 /* These two are 1 byte long so we don't swap them
315 But check this assumtion*/
316 CLASSERT (sizeof(msg->ptlm_type) == 1);
317 CLASSERT (sizeof(msg->ptlm_credits) == 1);
318 /* src & dst stamps are opaque cookies */
319 __swab32s(&msg->ptlm_nob);
320 __swab64s(&msg->ptlm_srcnid);
321 __swab64s(&msg->ptlm_dstnid);
322 __swab32s(&msg->ptlm_srcpid);
323 __swab32s(&msg->ptlm_dstpid);
326 if (msg->ptlm_nob != nob) {
327 CERROR("msg_nob corrupt: got 0x%08x, wanted %08x\n",
332 switch(msg->ptlm_type)
334 case PTLLND_MSG_TYPE_PUT:
335 case PTLLND_MSG_TYPE_GET:
336 if (nob < hdr_size + sizeof(kptl_rdma_msg_t)) {
337 CERROR("Short rdma request: got %d, want %d\n",
338 nob, hdr_size + (int)sizeof(kptl_rdma_msg_t));
343 __swab64s(&msg->ptlm_u.rdma.kptlrm_matchbits);
345 if (msg->ptlm_u.rdma.kptlrm_matchbits < PTL_RESERVED_MATCHBITS) {
346 CERROR("Bad matchbits "LPX64"\n",
347 msg->ptlm_u.rdma.kptlrm_matchbits);
352 case PTLLND_MSG_TYPE_IMMEDIATE:
353 if (nob < offsetof(kptl_msg_t,
354 ptlm_u.immediate.kptlim_payload)) {
355 CERROR("Short immediate: got %d, want %d\n", nob,
356 (int)offsetof(kptl_msg_t,
357 ptlm_u.immediate.kptlim_payload));
363 case PTLLND_MSG_TYPE_NOOP:
364 case PTLLND_MSG_TYPE_NAK:
368 case PTLLND_MSG_TYPE_HELLO:
369 if (nob < hdr_size + sizeof(kptl_hello_msg_t)) {
370 CERROR("Short hello: got %d want %d\n",
371 nob, hdr_size + (int)sizeof(kptl_hello_msg_t));
375 __swab64s(&msg->ptlm_u.hello.kptlhm_matchbits);
376 __swab32s(&msg->ptlm_u.hello.kptlhm_max_msg_size);
381 CERROR("Bad message type: 0x%02x\n", (__u32)msg->ptlm_type);
389 kptllnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
391 struct libcfs_ioctl_data *data = arg;
394 CDEBUG(D_NET, ">>> kptllnd_ctl cmd=%u arg=%p\n", cmd, arg);
397 * Validate that the context block is actually
398 * pointing to this interface
400 LASSERT (ni == kptllnd_data.kptl_ni);
403 case IOC_LIBCFS_DEL_PEER: {
404 lnet_process_id_t id;
406 id.nid = data->ioc_nid;
407 id.pid = data->ioc_u32[1];
409 rc = kptllnd_peer_del(id);
413 case IOC_LIBCFS_GET_PEER: {
414 lnet_process_id_t id = {.nid = LNET_NID_ANY,
415 .pid = LNET_PID_ANY};
416 __u64 incarnation = 0;
417 __u64 next_matchbits = 0;
418 __u64 last_matchbits_seen = 0;
425 int outstanding_credits = 0;
427 rc = kptllnd_get_peer_info(data->ioc_count, &id,
429 &refcount, &incarnation,
430 &next_matchbits, &last_matchbits_seen,
432 &credits, &outstanding_credits);
434 data->ioc_nid = id.nid;
435 data->ioc_net = state;
436 data->ioc_flags = sent_hello;
437 data->ioc_count = refcount;
438 data->ioc_u64[0] = incarnation;
439 data->ioc_u32[0] = (__u32)next_matchbits;
440 data->ioc_u32[1] = (__u32)(next_matchbits >> 32);
441 data->ioc_u32[2] = (__u32)last_matchbits_seen;
442 data->ioc_u32[3] = (__u32)(last_matchbits_seen >> 32);
443 data->ioc_u32[4] = id.pid;
444 data->ioc_u32[5] = (nsendq << 16) | nactiveq;
445 data->ioc_u32[6] = (credits << 16) | outstanding_credits;
453 CDEBUG(D_NET, "<<< kptllnd_ctl rc=%d\n", rc);
458 kptllnd_startup (lnet_ni_t *ni)
466 LASSERT (ni->ni_lnd == &kptllnd_lnd);
468 if (kptllnd_data.kptl_init != PTLLND_INIT_NOTHING) {
469 CERROR("Only 1 instance supported\n");
473 if (*kptllnd_tunables.kptl_max_procs_per_node < 1) {
474 CERROR("max_procs_per_node must be > 1\n");
478 *kptllnd_tunables.kptl_max_msg_size &= ~7;
479 if (*kptllnd_tunables.kptl_max_msg_size < PTLLND_MIN_BUFFER_SIZE)
480 *kptllnd_tunables.kptl_max_msg_size = PTLLND_MIN_BUFFER_SIZE;
482 CLASSERT ((PTLLND_MIN_BUFFER_SIZE & 7) == 0);
483 CLASSERT (sizeof(kptl_msg_t) <= PTLLND_MIN_BUFFER_SIZE);
486 * zero pointers, flags etc
487 * put everything into a known state.
489 memset (&kptllnd_data, 0, sizeof (kptllnd_data));
490 kptllnd_data.kptl_eqh = PTL_INVALID_HANDLE;
491 kptllnd_data.kptl_nih = PTL_INVALID_HANDLE;
494 * Uptick the module reference count
499 * Setup pointers between the ni and context data block
501 kptllnd_data.kptl_ni = ni;
502 ni->ni_data = &kptllnd_data;
507 ni->ni_maxtxcredits = *kptllnd_tunables.kptl_credits;
508 ni->ni_peertxcredits = *kptllnd_tunables.kptl_peercredits;
510 kptllnd_data.kptl_expected_peers =
511 *kptllnd_tunables.kptl_max_nodes *
512 *kptllnd_tunables.kptl_max_procs_per_node;
515 * Initialize the Network interface instance
516 * We use the default because we don't have any
517 * way to choose a better interface.
518 * Requested and actual limits are ignored.
521 #ifdef _USING_LUSTRE_PORTALS_
526 *kptllnd_tunables.kptl_pid, NULL, NULL,
527 &kptllnd_data.kptl_nih);
530 * Note: PTL_IFACE_DUP simply means that the requested
531 * interface was already inited and that we're sharing it.
534 if (ptl_rc != PTL_OK && ptl_rc != PTL_IFACE_DUP) {
535 CERROR ("PtlNIInit: error %s(%d)\n",
536 kptllnd_errtype2str(ptl_rc), ptl_rc);
541 /* NB eq size irrelevant if using a callback */
542 ptl_rc = PtlEQAlloc(kptllnd_data.kptl_nih,
544 kptllnd_eq_callback, /* handler callback */
545 &kptllnd_data.kptl_eqh); /* output handle */
546 if (ptl_rc != PTL_OK) {
547 CERROR("PtlEQAlloc failed %s(%d)\n",
548 kptllnd_errtype2str(ptl_rc), ptl_rc);
554 * Fetch the lower NID
556 ptl_rc = PtlGetId(kptllnd_data.kptl_nih,
557 &kptllnd_data.kptl_portals_id);
558 if (ptl_rc != PTL_OK) {
559 CERROR ("PtlGetID: error %s(%d)\n",
560 kptllnd_errtype2str(ptl_rc), ptl_rc);
565 if (kptllnd_data.kptl_portals_id.pid != *kptllnd_tunables.kptl_pid) {
566 /* The kernel ptllnd must have the expected PID */
567 CERROR("Unexpected PID: %u (%u expected)\n",
568 kptllnd_data.kptl_portals_id.pid,
569 *kptllnd_tunables.kptl_pid);
574 ni->ni_nid = kptllnd_ptl2lnetnid(kptllnd_data.kptl_portals_id.nid);
576 CDEBUG(D_NET, "ptl id=%s, lnet id=%s\n",
577 kptllnd_ptlid2str(kptllnd_data.kptl_portals_id),
578 libcfs_nid2str(ni->ni_nid));
580 /* Initialized the incarnation - it must be for-all-time unique, even
581 * accounting for the fact that we increment it when we disconnect a
582 * peer that's using it */
583 do_gettimeofday(&tv);
584 kptllnd_data.kptl_incarnation = (((__u64)tv.tv_sec) * 1000000) +
586 CDEBUG(D_NET, "Incarnation="LPX64"\n", kptllnd_data.kptl_incarnation);
589 * Setup the sched locks/lists/waitq
591 spin_lock_init(&kptllnd_data.kptl_sched_lock);
592 init_waitqueue_head(&kptllnd_data.kptl_sched_waitq);
593 INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
594 INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
595 INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
598 * Setup the tx locks/lists
600 spin_lock_init(&kptllnd_data.kptl_tx_lock);
601 INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
602 atomic_set(&kptllnd_data.kptl_ntx, 0);
605 * Allocate and setup the peer hash table
607 rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
608 init_waitqueue_head(&kptllnd_data.kptl_watchdog_waitq);
609 INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
610 INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
612 spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
614 kptllnd_data.kptl_peer_hash_size =
615 *kptllnd_tunables.kptl_peer_hash_table_size;
616 LIBCFS_ALLOC(kptllnd_data.kptl_peers,
617 (kptllnd_data.kptl_peer_hash_size *
618 sizeof(struct list_head)));
619 if (kptllnd_data.kptl_peers == NULL) {
620 CERROR("Failed to allocate space for peer hash table size=%d\n",
621 kptllnd_data.kptl_peer_hash_size);
625 for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
626 INIT_LIST_HEAD(&kptllnd_data.kptl_peers[i]);
628 LIBCFS_ALLOC(kptllnd_data.kptl_nak_msg, offsetof(kptl_msg_t, ptlm_u));
629 if (kptllnd_data.kptl_nak_msg == NULL) {
630 CERROR("Can't allocate NAK msg\n");
634 memset(kptllnd_data.kptl_nak_msg, 0, offsetof(kptl_msg_t, ptlm_u));
635 kptllnd_init_msg(kptllnd_data.kptl_nak_msg, PTLLND_MSG_TYPE_NAK, 0);
636 kptllnd_data.kptl_nak_msg->ptlm_magic = PTLLND_MSG_MAGIC;
637 kptllnd_data.kptl_nak_msg->ptlm_version = PTLLND_MSG_VERSION;
638 kptllnd_data.kptl_nak_msg->ptlm_srcpid = the_lnet.ln_pid;
639 kptllnd_data.kptl_nak_msg->ptlm_srcnid = ni->ni_nid;
640 kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
641 kptllnd_data.kptl_nak_msg->ptlm_dstpid = LNET_PID_ANY;
642 kptllnd_data.kptl_nak_msg->ptlm_dstnid = LNET_NID_ANY;
644 kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
646 kptllnd_data.kptl_rx_cache =
647 cfs_mem_cache_create("ptllnd_rx",
649 *kptllnd_tunables.kptl_max_msg_size,
652 if (kptllnd_data.kptl_rx_cache == NULL) {
653 CERROR("Can't create slab for RX descriptors\n");
658 /* lists/ptrs/locks initialised */
659 kptllnd_data.kptl_init = PTLLND_INIT_DATA;
661 /*****************************************************/
663 rc = kptllnd_setup_tx_descs();
665 CERROR("Can't pre-allocate %d TX descriptors: %d\n",
666 *kptllnd_tunables.kptl_ntx, rc);
670 /* Start the scheduler threads for handling incoming requests. No need
671 * to advance the state because this will be automatically cleaned up
672 * now that PTLNAT_INIT_DATA state has been entered */
673 CDEBUG(D_NET, "starting %d scheduler threads\n", PTLLND_N_SCHED);
674 for (i = 0; i < PTLLND_N_SCHED; i++) {
675 rc = kptllnd_thread_start(kptllnd_scheduler, (void *)((long)i));
677 CERROR("Can't spawn scheduler[%d]: %d\n", i, rc);
682 rc = kptllnd_thread_start(kptllnd_watchdog, NULL);
684 CERROR("Can't spawn watchdog: %d\n", rc);
688 /* Ensure that 'rxb_nspare' buffers can be off the net (being emptied)
689 * and we will still have enough buffers posted for all our peers */
690 spares = *kptllnd_tunables.kptl_rxb_nspare *
691 ((*kptllnd_tunables.kptl_rxb_npages * PAGE_SIZE)/
692 *kptllnd_tunables.kptl_max_msg_size);
694 /* reserve and post the buffers */
695 rc = kptllnd_rx_buffer_pool_reserve(&kptllnd_data.kptl_rx_buffer_pool,
696 kptllnd_data.kptl_expected_peers +
699 CERROR("Can't reserve RX Buffer pool: %d\n", rc);
703 /* flag everything initialised */
704 kptllnd_data.kptl_init = PTLLND_INIT_ALL;
706 /*****************************************************/
708 if (*kptllnd_tunables.kptl_checksum)
709 CWARN("Checksumming enabled\n");
711 CDEBUG(D_NET, "<<< kptllnd_startup SUCCESS\n");
715 CDEBUG(D_NET, "kptllnd_startup failed rc=%d\n", rc);
716 kptllnd_shutdown(ni);
721 kptllnd_shutdown (lnet_ni_t *ni)
725 lnet_process_id_t process_id;
728 CDEBUG(D_MALLOC, "before LND cleanup: kmem %d\n",
729 atomic_read (&libcfs_kmemory));
731 LASSERT (ni == kptllnd_data.kptl_ni);
733 switch (kptllnd_data.kptl_init) {
737 case PTLLND_INIT_ALL:
738 case PTLLND_INIT_DATA:
740 kptllnd_rx_buffer_pool_fini(&kptllnd_data.kptl_rx_buffer_pool);
741 LASSERT (list_empty(&kptllnd_data.kptl_sched_rxq));
742 LASSERT (list_empty(&kptllnd_data.kptl_sched_rxbq));
744 /* Hold peertable lock to interleave cleanly with peer birth/death */
745 write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
747 LASSERT (kptllnd_data.kptl_shutdown == 0);
748 kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
750 /* no new peers possible now */
751 write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
754 /* nuke all existing peers */
755 process_id.nid = LNET_NID_ANY;
756 process_id.pid = LNET_PID_ANY;
757 kptllnd_peer_del(process_id);
759 read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
761 LASSERT (kptllnd_data.kptl_n_active_peers == 0);
764 while (kptllnd_data.kptl_npeers != 0) {
766 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
767 "Waiting for %d peers to terminate\n",
768 kptllnd_data.kptl_npeers);
770 read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
773 cfs_pause(cfs_time_seconds(1));
775 read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
779 LASSERT(list_empty(&kptllnd_data.kptl_closing_peers));
780 LASSERT(list_empty(&kptllnd_data.kptl_zombie_peers));
781 LASSERT (kptllnd_data.kptl_peers != NULL);
782 for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
783 LASSERT (list_empty (&kptllnd_data.kptl_peers[i]));
785 read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
786 CDEBUG(D_NET, "All peers deleted\n");
788 /* Shutdown phase 2: kill the daemons... */
789 kptllnd_data.kptl_shutdown = 2;
793 while (atomic_read (&kptllnd_data.kptl_nthreads) != 0) {
794 /* Wake up all threads*/
795 wake_up_all(&kptllnd_data.kptl_sched_waitq);
796 wake_up_all(&kptllnd_data.kptl_watchdog_waitq);
799 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
800 "Waiting for %d threads to terminate\n",
801 atomic_read(&kptllnd_data.kptl_nthreads));
802 cfs_pause(cfs_time_seconds(1));
805 CDEBUG(D_NET, "All Threads stopped\n");
806 LASSERT(list_empty(&kptllnd_data.kptl_sched_txq));
808 kptllnd_cleanup_tx_descs();
810 /* Nothing here now, but libcfs might soon require
811 * us to explicitly destroy wait queues and semaphores
812 * that would be done here */
816 case PTLLND_INIT_NOTHING:
817 CDEBUG(D_NET, "PTLLND_INIT_NOTHING\n");
821 if (!PtlHandleIsEqual(kptllnd_data.kptl_eqh, PTL_INVALID_HANDLE)) {
822 prc = PtlEQFree(kptllnd_data.kptl_eqh);
824 CERROR("Error %s(%d) freeing portals EQ\n",
825 kptllnd_errtype2str(prc), prc);
828 if (!PtlHandleIsEqual(kptllnd_data.kptl_nih, PTL_INVALID_HANDLE)) {
829 prc = PtlNIFini(kptllnd_data.kptl_nih);
831 CERROR("Error %s(%d) finalizing portals NI\n",
832 kptllnd_errtype2str(prc), prc);
835 LASSERT (atomic_read(&kptllnd_data.kptl_ntx) == 0);
836 LASSERT (list_empty(&kptllnd_data.kptl_idle_txs));
838 if (kptllnd_data.kptl_rx_cache != NULL)
839 cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
841 if (kptllnd_data.kptl_peers != NULL)
842 LIBCFS_FREE (kptllnd_data.kptl_peers,
843 sizeof (struct list_head) *
844 kptllnd_data.kptl_peer_hash_size);
846 if (kptllnd_data.kptl_nak_msg != NULL)
847 LIBCFS_FREE (kptllnd_data.kptl_nak_msg,
848 offsetof(kptl_msg_t, ptlm_u));
850 memset(&kptllnd_data, 0, sizeof(kptllnd_data));
852 CDEBUG(D_MALLOC, "after LND cleanup: kmem %d\n",
853 atomic_read (&libcfs_kmemory));
859 kptllnd_module_init (void)
863 kptllnd_assert_wire_constants();
865 rc = kptllnd_tunables_init();
869 kptllnd_init_ptltrace();
871 lnet_register_lnd(&kptllnd_lnd);
877 kptllnd_module_fini (void)
879 lnet_unregister_lnd(&kptllnd_lnd);
880 kptllnd_tunables_fini();
883 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
884 MODULE_DESCRIPTION("Kernel Portals LND v1.00");
885 MODULE_LICENSE("GPL");
887 module_init(kptllnd_module_init);
888 module_exit(kptllnd_module_fini);