2 * Copyright (C) 2012 Cray, Inc.
4 * Copyright (c) 2014, Intel Corporation.
6 * Author: Nic Henke <nic@cray.com>
7 * Author: James Shimek <jshimek@cray.com>
9 * This file is part of Lustre, http://www.lustre.org.
11 * Lustre is free software; you can redistribute it and/or
12 * modify it under the terms of version 2 of the GNU General Public
13 * License as published by the Free Software Foundation.
15 * Lustre is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with Lustre; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 kgnilnd_setup_smsg_attr(gni_smsg_attr_t *smsg_attr)
31 smsg_attr->mbox_maxcredit = *kgnilnd_tunables.kgn_mbox_credits;
32 smsg_attr->msg_maxsize = GNILND_MAX_MSG_SIZE;
33 smsg_attr->msg_type = GNI_SMSG_TYPE_MBOX_AUTO_RETRANSMIT;
37 kgnilnd_map_fmablk(kgn_device_t *device, kgn_fma_memblock_t *fma_blk)
40 __u32 flags = GNI_MEM_READWRITE;
41 static unsigned long reg_to;
42 int rfto = *kgnilnd_tunables.kgn_reg_fail_timeout;
44 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
45 flags |= GNI_MEM_PHYS_CONT;
48 fma_blk->gnm_hold_timeout = 0;
50 /* make sure we are mapping a clean block */
51 LASSERTF(fma_blk->gnm_hndl.qword1 == 0UL, "fma_blk %p dirty\n", fma_blk);
53 rrc = kgnilnd_mem_register(device->gnd_handle, (__u64)fma_blk->gnm_block,
54 fma_blk->gnm_blk_size, device->gnd_rcv_fma_cqh,
55 flags, &fma_blk->gnm_hndl);
56 if (rrc != GNI_RC_SUCCESS) {
57 if (rfto != GNILND_REGFAILTO_DISABLE) {
59 reg_to = jiffies + cfs_time_seconds(rfto);
60 } else if (time_after(jiffies, reg_to)) {
61 CERROR("FATAL:fmablk registration has failed "
63 cfs_duration_sec(jiffies - reg_to) +
69 CNETERR("register fmablk failed 0x%p mbox_size %d flags %u\n",
70 fma_blk, fma_blk->gnm_mbox_size, flags);
76 /* PHYS_CONT memory isn't really mapped, at least not in GART -
77 * but all mappings chew up a MDD
79 if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
80 atomic64_add(fma_blk->gnm_blk_size, &device->gnd_nbytes_map);
83 atomic_inc(&device->gnd_n_mdd);
84 /* nfmablk is live (mapped) blocks */
85 atomic_inc(&device->gnd_nfmablk);
91 kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
95 kgn_fma_memblock_t *fma_blk;
96 gni_smsg_attr_t smsg_attr;
97 unsigned long fmablk_vers;
99 #if defined(CONFIG_CRAY_XT) && !defined(CONFIG_CRAY_COMPUTE)
100 /* We allocate large blocks of memory here potentially leading
101 * to memory exhaustion during massive reconnects during a network
102 * outage. Limit the amount of fma blocks to use by always keeping
103 * a percent of pages free initially set to 25% of total memory. */
104 if (global_page_state(NR_FREE_PAGES) < kgnilnd_data.free_pages_limit) {
105 LCONSOLE_INFO("Exceeding free page limit of %ld. "
106 "Free pages available %ld\n",
107 kgnilnd_data.free_pages_limit,
108 global_page_state(NR_FREE_PAGES));
112 /* we'll use fmablk_vers and the gnd_fmablk_mutex to gate access
113 * to this allocation code. Everyone will sample the version
114 * before and after getting the mutex. If it has changed,
115 * we'll bail out to check the lists again - this indicates that
116 * some sort of change was made to the lists and it is possible
117 * that there is a mailbox for us to find now. This should prevent
118 * a ton of spinning in the case where there are lots of threads
119 * that need a yet-to-be-allocated mailbox for a connection. */
121 fmablk_vers = atomic_read(&device->gnd_fmablk_vers);
122 mutex_lock(&device->gnd_fmablk_mutex);
124 if (fmablk_vers != atomic_read(&device->gnd_fmablk_vers)) {
125 /* version changed while we were waiting for semaphore,
126 * we'll recheck the lists assuming something nice happened */
127 mutex_unlock(&device->gnd_fmablk_mutex);
131 LIBCFS_ALLOC(fma_blk, sizeof(kgn_fma_memblock_t));
132 if (fma_blk == NULL) {
133 CNETERR("could not allocate fma block descriptor\n");
138 INIT_LIST_HEAD(&fma_blk->gnm_bufflist);
140 kgnilnd_setup_smsg_attr(&smsg_attr);
142 gni_smsg_buff_size_needed(&smsg_attr, &fma_blk->gnm_mbox_size);
144 LASSERTF(fma_blk->gnm_mbox_size, "mbox size %d\n", fma_blk->gnm_mbox_size);
146 /* gni_smsg_buff_size_needed calculates the base mailbox size and since
147 * we want to hold kgn_peer_credits worth of messages in both directions,
148 * we add PAYLOAD to grow the mailbox size
151 fma_blk->gnm_mbox_size += GNILND_MBOX_PAYLOAD;
153 /* we'll only use physical during preallocate at startup -- this keeps it nice and
154 * clean for runtime decisions. We'll keep the PHYS ones around until shutdown
155 * as reallocating them is tough if there is memory fragmentation */
158 fma_blk->gnm_block = kmem_cache_alloc(kgnilnd_data.kgn_mbox_cache, GFP_ATOMIC);
159 if (fma_blk->gnm_block == NULL) {
160 CNETERR("could not allocate physical SMSG mailbox memory\n");
164 fma_blk->gnm_blk_size = GNILND_MBOX_SIZE;
165 num_mbox = fma_blk->gnm_blk_size / fma_blk->gnm_mbox_size;
167 LASSERTF(num_mbox >= 1,
168 "num_mbox %d blk_size %u mbox_size %d\n",
169 num_mbox, fma_blk->gnm_blk_size, fma_blk->gnm_mbox_size);
171 fma_blk->gnm_state = GNILND_FMABLK_PHYS;
174 num_mbox = *kgnilnd_tunables.kgn_mbox_per_block;
175 fma_blk->gnm_blk_size = num_mbox * fma_blk->gnm_mbox_size;
177 LASSERTF(num_mbox >= 1 && num_mbox >= *kgnilnd_tunables.kgn_mbox_per_block,
178 "num_mbox %d blk_size %u mbox_size %d tunable %d\n",
179 num_mbox, fma_blk->gnm_blk_size, fma_blk->gnm_mbox_size,
180 *kgnilnd_tunables.kgn_mbox_per_block);
182 fma_blk->gnm_block = kgnilnd_vzalloc(fma_blk->gnm_blk_size);
183 if (fma_blk->gnm_block == NULL) {
184 CNETERR("could not allocate virtual SMSG mailbox memory, %d bytes\n", fma_blk->gnm_blk_size);
189 fma_blk->gnm_state = GNILND_FMABLK_VIRT;
192 /* allocate just enough space for the bits to track the mailboxes */
193 CFS_ALLOC_PTR_ARRAY(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox));
194 if (fma_blk->gnm_bit_array == NULL) {
195 CNETERR("could not allocate mailbox bitmask, %lu bytes for %d mbox\n",
196 sizeof(unsigned long) * BITS_TO_LONGS(num_mbox), num_mbox);
200 bitmap_zero(fma_blk->gnm_bit_array, num_mbox);
202 /* now that the num_mbox is set based on allocation type, get debug
205 CFS_ALLOC_PTR_ARRAY(fma_blk->gnm_mbox_info, num_mbox);
206 if (fma_blk->gnm_mbox_info == NULL) {
207 CNETERR("could not allocate mailbox debug, %lu bytes for %d mbox\n",
208 sizeof(kgn_mbox_info_t) * num_mbox, num_mbox);
213 rc = kgnilnd_map_fmablk(device, fma_blk);
218 fma_blk->gnm_next_avail_mbox = 0;
219 fma_blk->gnm_avail_mboxs = fma_blk->gnm_num_mboxs = num_mbox;
221 CDEBUG(D_MALLOC, "alloc fmablk 0x%p num %d msg_maxsize %d credits %d "
222 "mbox_size %d MDD %#llx.%#llx\n",
223 fma_blk, num_mbox, smsg_attr.msg_maxsize, smsg_attr.mbox_maxcredit,
224 fma_blk->gnm_mbox_size, fma_blk->gnm_hndl.qword1,
225 fma_blk->gnm_hndl.qword2);
227 /* lock Is protecting data structures, not semaphore */
229 spin_lock(&device->gnd_fmablk_lock);
230 list_add_tail(&fma_blk->gnm_bufflist, &device->gnd_fma_buffs);
232 /* toggle under the lock so once they change the list is also
233 * ready for others to traverse */
234 atomic_inc(&device->gnd_fmablk_vers);
236 spin_unlock(&device->gnd_fmablk_lock);
238 mutex_unlock(&device->gnd_fmablk_mutex);
243 CFS_FREE_PTR_ARRAY(fma_blk->gnm_mbox_info, num_mbox);
245 CFS_FREE_PTR_ARRAY(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox));
247 if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
248 kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
250 kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
253 LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
255 mutex_unlock(&device->gnd_fmablk_mutex);
260 kgnilnd_unmap_fmablk(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
264 /* if some held, set hold_timeout from conn timeouts used in this block
265 * but not during shutdown, then just nuke and pave
266 * During a stack reset, we need to deregister with a hold timeout
267 * set so we don't use the same mdd after reset is complete */
268 if ((fma_blk->gnm_held_mboxs && !kgnilnd_data.kgn_shutdown) ||
269 kgnilnd_data.kgn_in_reset) {
270 fma_blk->gnm_hold_timeout = GNILND_TIMEOUT2DEADMAN;
273 /* we are changing the state of a block, tickle version to tell
274 * proc code list is stale now */
275 atomic_inc(&dev->gnd_fmablk_vers);
277 rrc = kgnilnd_mem_deregister(dev->gnd_handle, &fma_blk->gnm_hndl, fma_blk->gnm_hold_timeout);
279 CDEBUG(rrc == GNI_RC_SUCCESS ? D_MALLOC : D_CONSOLE|D_NETERROR,
280 "unmap fmablk 0x%p@%s sz %u total %d avail %d held %d mbox_size %d "
282 fma_blk, kgnilnd_fmablk_state2str(fma_blk->gnm_state),
283 fma_blk->gnm_blk_size, fma_blk->gnm_num_mboxs,
284 fma_blk->gnm_avail_mboxs, fma_blk->gnm_held_mboxs,
285 fma_blk->gnm_mbox_size, fma_blk->gnm_hold_timeout);
287 LASSERTF(rrc == GNI_RC_SUCCESS,
288 "tried to double unmap or something bad, fma_blk %p (rrc %d)\n",
291 if (fma_blk->gnm_hold_timeout &&
292 !(kgnilnd_data.kgn_in_reset &&
293 fma_blk->gnm_state == GNILND_FMABLK_PHYS)) {
294 atomic_inc(&dev->gnd_n_mdd_held);
296 atomic_dec(&dev->gnd_n_mdd);
299 /* PHYS blocks don't get mapped */
300 if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
301 atomic64_sub(fma_blk->gnm_blk_size, &dev->gnd_nbytes_map);
302 fma_blk->gnm_state = GNILND_FMABLK_IDLE;
303 } else if (kgnilnd_data.kgn_in_reset) {
304 /* in stack reset, clear MDD handle for PHYS blocks, as we'll
305 * re-use the fma_blk after reset so we don't have to drop/allocate
306 * all of those physical blocks */
307 fma_blk->gnm_hndl.qword1 = fma_blk->gnm_hndl.qword2 = 0UL;
310 /* Decrement here as this is the # of mapped blocks */
311 atomic_dec(&dev->gnd_nfmablk);
315 /* needs lock on gnd_fmablk_lock to cover gnd_fma_buffs */
317 kgnilnd_free_fmablk_locked(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
319 LASSERTF(fma_blk->gnm_avail_mboxs == fma_blk->gnm_num_mboxs,
320 "fma_blk %p@%d free in bad state (%d): blk total %d avail %d held %d\n",
321 fma_blk, fma_blk->gnm_state, fma_blk->gnm_hold_timeout, fma_blk->gnm_num_mboxs,
322 fma_blk->gnm_avail_mboxs, fma_blk->gnm_held_mboxs);
324 atomic_inc(&dev->gnd_fmablk_vers);
326 if (fma_blk->gnm_hold_timeout) {
327 CDEBUG(D_MALLOC, "mdd release fmablk 0x%p sz %u avail %d held %d "
329 fma_blk, fma_blk->gnm_blk_size, fma_blk->gnm_avail_mboxs,
330 fma_blk->gnm_held_mboxs, fma_blk->gnm_mbox_size);
332 /* We leave MDD dangling over stack reset */
333 if (!kgnilnd_data.kgn_in_reset) {
334 kgnilnd_mem_mdd_release(dev->gnd_handle, &fma_blk->gnm_hndl);
336 /* ignoring the return code - if kgni/ghal can't find it
337 * it must be released already */
338 atomic_dec(&dev->gnd_n_mdd_held);
339 atomic_dec(&dev->gnd_n_mdd);
342 /* we cant' free the gnm_block until all the conns have released their
343 * purgatory holds. While we have purgatory holds, we might check the conn
344 * RX mailbox during the CLOSING process. It is possible that kgni might
345 * try to look into the RX side for credits when sending the CLOSE msg too */
346 CDEBUG(D_MALLOC, "fmablk %p free buffer %p mbox_size %d\n",
347 fma_blk, fma_blk->gnm_block, fma_blk->gnm_mbox_size);
349 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
350 kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
352 kgnilnd_vfree(fma_blk->gnm_block, fma_blk->gnm_blk_size);
354 fma_blk->gnm_state = GNILND_FMABLK_FREED;
356 list_del(&fma_blk->gnm_bufflist);
358 CFS_FREE_PTR_ARRAY(fma_blk->gnm_mbox_info, fma_blk->gnm_num_mboxs);
359 CFS_FREE_PTR_ARRAY(fma_blk->gnm_bit_array,
360 BITS_TO_LONGS(fma_blk->gnm_num_mboxs));
361 LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
365 kgnilnd_find_free_mbox(kgn_conn_t *conn)
367 kgn_device_t *dev = conn->gnc_device;
368 gni_smsg_attr_t *smsg_attr = &conn->gnpr_smsg_attr;
369 kgn_fma_memblock_t *fma_blk;
370 kgn_mbox_info_t *mbox = NULL;
373 spin_lock(&dev->gnd_fmablk_lock);
375 list_for_each_entry(fma_blk, &conn->gnc_device->gnd_fma_buffs,
377 if (fma_blk->gnm_avail_mboxs <= 0 ||
378 fma_blk->gnm_state <= GNILND_FMABLK_IDLE) {
381 /* look in bitarray for available mailbox */
383 id = find_next_zero_bit(
384 fma_blk->gnm_bit_array,
385 fma_blk->gnm_num_mboxs,
386 fma_blk->gnm_next_avail_mbox);
387 if (id == fma_blk->gnm_num_mboxs &&
388 fma_blk->gnm_next_avail_mbox != 0) {
390 fma_blk->gnm_next_avail_mbox = 0;
396 LASSERTF(id < fma_blk->gnm_num_mboxs, "id %d max %d\n",
397 id, fma_blk->gnm_num_mboxs);
398 set_bit(id, (volatile unsigned long *)fma_blk->gnm_bit_array);
399 conn->gnc_mbox_id = id;
401 fma_blk->gnm_next_avail_mbox =
402 (id == (fma_blk->gnm_num_mboxs - 1)) ? 0 : (id + 1);
403 fma_blk->gnm_avail_mboxs--;
404 conn->gnc_fma_blk = fma_blk;
406 kgnilnd_setup_smsg_attr(smsg_attr);
408 smsg_attr->msg_buffer = fma_blk->gnm_block;
409 smsg_attr->mbox_offset = fma_blk->gnm_mbox_size * id;
410 smsg_attr->mem_hndl = fma_blk->gnm_hndl;
411 smsg_attr->buff_size = fma_blk->gnm_mbox_size;
413 /* We'll set the hndl to zero for PHYS blocks unmapped during stack
414 * reset and re-use the same fma_blk after stack reset. This ensures we've
415 * properly mapped it before we use it */
416 LASSERTF(fma_blk->gnm_hndl.qword1 != 0UL, "unmapped fma_blk %p, state %d\n",
417 fma_blk, fma_blk->gnm_state);
419 CDEBUG(D_NET, "conn %p smsg %p fmablk %p "
420 "allocating SMSG mbox %d buf %p "
421 "offset %u hndl %#llx.%#llx\n",
422 conn, smsg_attr, fma_blk, id,
423 smsg_attr->msg_buffer, smsg_attr->mbox_offset,
424 fma_blk->gnm_hndl.qword1,
425 fma_blk->gnm_hndl.qword2);
427 mbox = &fma_blk->gnm_mbox_info[id];
428 mbox->mbx_create_conn_memset = jiffies;
430 mbox->mbx_nallocs_total++;
432 /* zero mbox to remove any old data from our last use.
433 * this better be safe, if not our purgatory timers
434 * are too short or a peer really is misbehaving */
435 memset(smsg_attr->msg_buffer + smsg_attr->mbox_offset,
436 0, smsg_attr->buff_size);
440 spin_unlock(&dev->gnd_fmablk_lock);
444 kgnilnd_setup_mbox(kgn_conn_t *conn)
446 gni_smsg_attr_t *smsg_attr = &conn->gnpr_smsg_attr;
449 smsg_attr->msg_buffer = NULL;
450 /* Look for available mbox */
452 kgnilnd_find_free_mbox(conn);
454 /* nothing in the existing buffers, make a new one */
455 if (smsg_attr->msg_buffer == NULL) {
456 /* for runtime allocations, we only want vmalloc */
457 err = kgnilnd_alloc_fmablk(conn->gnc_device, 0);
462 } while (smsg_attr->msg_buffer == NULL);
465 CNETERR("couldn't allocate SMSG mbox for conn %p Error: %d\n",
471 kgnilnd_release_mbox(kgn_conn_t *conn, int purgatory_hold)
473 kgn_device_t *dev = conn->gnc_device;
474 gni_smsg_attr_t *smsg_attr = &conn->gnpr_smsg_attr;
475 kgn_fma_memblock_t *fma_blk = NULL;
476 kgn_mbox_info_t *mbox = NULL;
480 /* if we failed to setup mbox and now destroying conn */
481 if (smsg_attr->msg_buffer == NULL) {
485 id = conn->gnc_mbox_id;
487 spin_lock(&dev->gnd_fmablk_lock);
488 /* make sure our conn points at a valid fma_blk
489 * We use this instead of a mem block search out of smsg_attr
490 * because we could have freed a block for fma_blk #1 but the fma_blk
491 * is still in the list for a purgatory hold. This would induce a false
492 * match if that same block gets reallocated to fma_blk #2 */
493 list_for_each_entry(fma_blk, &dev->gnd_fma_buffs, gnm_bufflist) {
494 if (fma_blk == conn->gnc_fma_blk) {
499 LASSERTF(found, "unable to find conn 0x%p with gnc_fma_blk %p "
500 "anywhere in the world\n", conn, conn->gnc_fma_blk);
502 LASSERTF(id < fma_blk->gnm_num_mboxs,
503 "bad id %d max %d\n",
504 id, fma_blk->gnm_num_mboxs);
506 /* < 0 - was held, now free it
507 * == 0 - just free it
508 * > 0 - hold it for now */
509 if (purgatory_hold == 0) {
510 CDEBUG(D_NET, "conn %p smsg %p fmablk %p freeing SMSG mbox %d "
511 "hndl %#llx.%#llx\n",
512 conn, smsg_attr, fma_blk, id,
513 fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
514 fma_blk->gnm_avail_mboxs++;
516 } else if (purgatory_hold > 0) {
517 CDEBUG(D_NET, "conn %p smsg %p fmablk %p holding SMSG mbox %d "
518 "hndl %#llx.%#llx\n",
519 conn, smsg_attr, fma_blk, id,
520 fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
522 fma_blk->gnm_held_mboxs++;
523 fma_blk->gnm_max_timeout = max_t(long, fma_blk->gnm_max_timeout,
526 CDEBUG(D_NET, "conn %p smsg %p fmablk %p release SMSG mbox %d "
527 "hndl %#llx.%#llx\n",
528 conn, smsg_attr, fma_blk, id,
529 fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
531 fma_blk->gnm_held_mboxs--;
532 fma_blk->gnm_avail_mboxs++;
535 if (purgatory_hold <= 0) {
536 /* if kgni is retransmitting, freeing the smsg block before the EP
537 * is destroyed gets messy. Bug 768295. */
538 LASSERTF(conn->gnc_ephandle == NULL,
539 "can't release mbox before EP is nuked. conn 0x%p\n", conn);
541 mbox = &fma_blk->gnm_mbox_info[id];
542 mbox->mbx_release_from_purgatory = jiffies;
544 /* clear conn gnc_fmablk if it is gone - this allows us to
545 * not worry about state so much in kgnilnd_destroy_conn
546 * and makes the guaranteed cleanup of the resources easier */
547 LASSERTF(test_and_clear_bit(id, fma_blk->gnm_bit_array),
548 "conn %p bit %d already cleared in fma_blk %p\n",
550 conn->gnc_fma_blk = NULL;
554 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_FMABLK_AVAIL)) {
555 CERROR("LBUGs in your future: forcibly marking fma_blk %p "
556 "as mapped\n", fma_blk);
557 fma_blk->gnm_state = GNILND_FMABLK_VIRT;
560 /* we don't release or unmap PHYS blocks as part of the normal cycle --
561 * those are controlled manually from startup/shutdown */
562 if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
563 /* we can unmap once all are unused (held or avail)
564 * but check hold_timeout to make sure we are not trying to double
565 * unmap this buffer. If there was no hold_timeout set due to
566 * held_mboxs, we'll free the mobx here shortly and won't have to
567 * worry about catching a double free for a 'clean' fma_blk */
568 if (((fma_blk->gnm_avail_mboxs + fma_blk->gnm_held_mboxs) == fma_blk->gnm_num_mboxs) &&
569 (!fma_blk->gnm_hold_timeout)) {
570 kgnilnd_unmap_fmablk(dev, fma_blk);
573 /* But we can only free once they are all avail */
574 if (fma_blk->gnm_avail_mboxs == fma_blk->gnm_num_mboxs &&
575 fma_blk->gnm_held_mboxs == 0) {
576 /* all mailboxes are released, free fma_blk */
577 kgnilnd_free_fmablk_locked(dev, fma_blk);
581 spin_unlock(&dev->gnd_fmablk_lock);
585 kgnilnd_count_phys_mbox(kgn_device_t *device)
588 kgn_fma_memblock_t *fma_blk;
590 spin_lock(&device->gnd_fmablk_lock);
592 list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
593 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
594 i += fma_blk->gnm_num_mboxs;
596 spin_unlock(&device->gnd_fmablk_lock);
602 kgnilnd_allocate_phys_fmablk(kgn_device_t *device)
606 while (kgnilnd_count_phys_mbox(device) < *kgnilnd_tunables.kgn_nphys_mbox) {
608 rc = kgnilnd_alloc_fmablk(device, 1);
610 CERROR("failed phys mbox allocation, stopping at %d, rc %d\n",
611 kgnilnd_count_phys_mbox(device), rc);
619 kgnilnd_map_phys_fmablk(kgn_device_t *device)
623 kgn_fma_memblock_t *fma_blk;
625 /* use mutex to gate access to single thread, just in case */
626 mutex_lock(&device->gnd_fmablk_mutex);
628 spin_lock(&device->gnd_fmablk_lock);
630 list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
631 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
632 rc = kgnilnd_map_fmablk(device, fma_blk);
637 spin_unlock(&device->gnd_fmablk_lock);
639 mutex_unlock(&device->gnd_fmablk_mutex);
645 kgnilnd_unmap_fma_blocks(kgn_device_t *device)
648 kgn_fma_memblock_t *fma_blk;
650 /* use mutex to gate access to single thread, just in case */
651 mutex_lock(&device->gnd_fmablk_mutex);
653 spin_lock(&device->gnd_fmablk_lock);
655 list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
656 kgnilnd_unmap_fmablk(device, fma_blk);
658 spin_unlock(&device->gnd_fmablk_lock);
660 mutex_unlock(&device->gnd_fmablk_mutex);
664 kgnilnd_free_phys_fmablk(kgn_device_t *device)
667 kgn_fma_memblock_t *fma_blk, *fma_blkN;
669 /* use mutex to gate access to single thread, just in case */
670 mutex_lock(&device->gnd_fmablk_mutex);
672 spin_lock(&device->gnd_fmablk_lock);
674 list_for_each_entry_safe(fma_blk, fma_blkN, &device->gnd_fma_buffs, gnm_bufflist) {
675 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
676 kgnilnd_free_fmablk_locked(device, fma_blk);
678 spin_unlock(&device->gnd_fmablk_lock);
680 mutex_unlock(&device->gnd_fmablk_mutex);
683 /* kgnilnd dgram nid->struct managment */
685 static inline struct list_head *
686 kgnilnd_nid2dgramlist(kgn_device_t *dev, lnet_nid_t nid)
688 unsigned int hash = ((unsigned int)nid) % *kgnilnd_tunables.kgn_peer_hash_size;
690 RETURN(&dev->gnd_dgrams[hash]);
694 /* needs dev->gnd_dgram_lock held */
696 kgnilnd_find_dgram_locked(kgn_device_t *dev, lnet_nid_t dst_nid)
698 struct list_head *dgram_list = kgnilnd_nid2dgramlist(dev, dst_nid);
701 list_for_each_entry(dgram, dgram_list, gndg_list) {
703 /* if state > POSTED, we are already handling cancel/completion */
704 if ((dgram->gndg_conn_out.gncr_dstnid != dst_nid) ||
705 dgram->gndg_state > GNILND_DGRAM_POSTED)
708 CDEBUG(D_NET, "got dgram [%p] -> %s\n",
709 dgram, libcfs_nid2str(dst_nid));
716 kgnilnd_find_and_cancel_dgram(kgn_device_t *dev, lnet_nid_t dst_nid)
720 spin_lock(&dev->gnd_dgram_lock);
721 dgram = kgnilnd_find_dgram_locked(dev, dst_nid);
724 kgnilnd_cancel_dgram_locked(dgram);
726 spin_unlock(&dev->gnd_dgram_lock);
728 RETURN(!!(dgram == NULL));
732 kgnilnd_pack_connreq(kgn_connreq_t *connreq, kgn_conn_t *conn,
733 lnet_nid_t srcnid, lnet_nid_t dstnid,
734 kgn_connreq_type_t type)
738 /* ensure we haven't violated max datagram size */
739 BUILD_BUG_ON(sizeof(kgn_connreq_t) > GNI_DATAGRAM_MAXSIZE);
741 /* no need to zero out, we do that when allocating dgram */
742 connreq->gncr_magic = GNILND_MSG_MAGIC;
744 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PACK_SRCNID)) {
746 } else if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PACK_DSTNID)) {
750 connreq->gncr_srcnid = srcnid;
751 connreq->gncr_dstnid = dstnid;
753 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
754 connreq->gncr_version = 99;
756 connreq->gncr_version = GNILND_CONNREQ_VERSION;
758 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
759 connreq->gncr_type = 99;
761 connreq->gncr_type = type;
763 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
764 connreq->gncr_peerstamp = 0;
766 connreq->gncr_peerstamp = kgnilnd_data.kgn_peerstamp;
768 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
769 connreq->gncr_connstamp = 0;
771 connreq->gncr_connstamp = conn->gnc_my_connstamp;
773 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
774 connreq->gncr_timeout = 0;
776 connreq->gncr_timeout = conn->gnc_timeout;
779 /* the rest pack the data into the payload in other places */
780 if (type == GNILND_CONNREQ_REQ) {
781 kgn_gniparams_t *req_params = &connreq->gncr_gnparams;
782 req_params->gnpr_host_id = conn->gnc_device->gnd_host_id;
783 req_params->gnpr_cqid = conn->gnc_cqid;
785 /* allocate mailbox for this connection */
786 err = kgnilnd_setup_mbox(conn);
788 CERROR("Failed to setup FMA mailbox (%d)\n", err);
790 req_params->gnpr_smsg_attr = conn->gnpr_smsg_attr;
793 /* XXX Nic: TBD - checksum computation */
799 kgnilnd_unpack_connreq(kgn_dgram_t *dgram)
801 kgn_connreq_t *connreq = &dgram->gndg_conn_in;
805 /* the following fields must be handled in a backwards compatible
806 * manner to ensure we can always send and interpret NAKs */
808 if (connreq->gncr_magic != GNILND_MSG_MAGIC &&
809 connreq->gncr_magic != __swab32(GNILND_MSG_MAGIC)) {
810 /* Unexpected magic! */
811 CERROR("Unexpected magic %08x\n",
812 connreq->gncr_magic);
816 swab = (connreq->gncr_magic == __swab32(GNILND_MSG_MAGIC));
818 __swab32s(&connreq->gncr_magic);
819 __swab32s(&connreq->gncr_cksum);
820 __swab16s(&connreq->gncr_type);
821 __swab16s(&connreq->gncr_version);
822 __swab32s(&connreq->gncr_timeout);
823 __swab64s(&connreq->gncr_srcnid);
824 __swab64s(&connreq->gncr_dstnid);
825 __swab64s(&connreq->gncr_peerstamp);
826 __swab64s(&connreq->gncr_connstamp);
829 /* Do NOT return anything but -EBADF before we munge
830 * connreq->gncr_srcnid - we need that to send the nak */
832 if (dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
833 lnet_nid_t incoming = connreq->gncr_srcnid;
835 /* even if the incoming packet is hosed, we know who we sent
836 * the original and can set the srcnid so that we can properly
837 * look up our peer to close the loop on this connreq. We still use
838 * -EBADF to prevent a NAK - just in case there are issues with
839 * the payload coming from a random spot, etc. */
840 connreq->gncr_srcnid = dgram->gndg_conn_out.gncr_dstnid;
842 if (LNET_NIDADDR(dgram->gndg_conn_out.gncr_dstnid) !=
843 LNET_NIDADDR(incoming)) {
844 /* we got a datagram match for the wrong nid... */
845 CERROR("matched datagram 0x%p with srcnid %s "
846 "(%x), expecting %s (%x)\n",
848 libcfs_nid2str(incoming),
849 LNET_NIDADDR(incoming),
850 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
851 LNET_NIDADDR(dgram->gndg_conn_out.gncr_dstnid));
855 /* if we have a wildcard datagram it should match an
856 * incoming "active" datagram that should have a fully formed
857 * srcnid and dstnid. If we couldn't unpack it, we drop as
858 * corrupted packet, otherwise we'll just verify that the dstnid
859 * matches the NID for the NET that the dgram was posted */
861 /* make sure their wildcard didn't match ours, that is unpossible */
862 LASSERTF(connreq->gncr_dstnid != LNET_NID_ANY,
863 "dgram 0x%p from %s, connreq 0x%p; "
864 "wildcard matched wildcard \n", dgram,
865 libcfs_nid2str(connreq->gncr_srcnid), connreq);
867 rc = kgnilnd_find_net(connreq->gncr_dstnid, &net);
869 if (rc == -ESHUTDOWN) {
870 CERROR("Looking up network: device is in shutdown");
872 } else if (rc == -ENONET) {
873 CERROR("Connection data from %s: she sent "
874 "dst_nid %s, but net lookup failed on "
876 libcfs_nid2str(connreq->gncr_srcnid),
877 libcfs_nid2str(connreq->gncr_dstnid),
878 dgram, kgnilnd_dgram_type2str(dgram));
882 if (net->gnn_ni->ni_nid != connreq->gncr_dstnid) {
883 CERROR("Bad connection data from %s: she sent "
884 "dst_nid %s, but I am %s with dgram 0x%p@%s\n",
885 libcfs_nid2str(connreq->gncr_srcnid),
886 libcfs_nid2str(connreq->gncr_dstnid),
887 libcfs_nid2str(net->gnn_ni->ni_nid),
888 dgram, kgnilnd_dgram_type2str(dgram));
889 kgnilnd_net_decref(net);
893 /* kgnilnd_find_net takes a ref on the net it finds, You need to decref it when not needed. */
894 kgnilnd_net_decref(net);
897 if (connreq->gncr_version != GNILND_CONNREQ_VERSION) {
898 CERROR("Unexpected version %d\n", connreq->gncr_version);
902 /* XXX Nic: TBD - checksum validation */
903 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_DROP)) {
907 if (swab && connreq->gncr_type == GNILND_CONNREQ_REQ) {
908 __u64 msg_addr = (__u64) connreq->gncr_gnparams.gnpr_smsg_attr.msg_buffer;
910 __swab32s(&connreq->gncr_gnparams.gnpr_host_id);
911 __swab32s(&connreq->gncr_gnparams.gnpr_cqid);
912 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.buff_size);
913 __swab16s(&connreq->gncr_gnparams.gnpr_smsg_attr.mbox_maxcredit);
914 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.mbox_offset);
915 __swab64s(&connreq->gncr_gnparams.gnpr_smsg_attr.mem_hndl.qword1);
916 __swab64s(&connreq->gncr_gnparams.gnpr_smsg_attr.mem_hndl.qword2);
917 __swab64s(&msg_addr);
918 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.msg_maxsize);
919 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.msg_type);
920 } else if (swab && connreq->gncr_type == GNILND_CONNREQ_NAK) {
921 __swab32s(&connreq->gncr_nakdata.gnnd_errno);
924 /* since we use a unique instance ID for each network, the driver
925 * will take care of dropping datagrams if we don't have that network.
928 /* few more idiot software or configuration checks */
930 switch (connreq->gncr_type) {
931 case GNILND_CONNREQ_REQ:
932 /* wire up EP and SMSG block - this will check the incoming data
933 * and barf a NAK back if need to */
934 rc = kgnilnd_set_conn_params(dgram);
938 case GNILND_CONNREQ_NAK:
939 case GNILND_CONNREQ_CLOSE:
942 CERROR("unknown connreq packet type %d\n", connreq->gncr_type);
946 if (connreq->gncr_peerstamp == 0 || connreq->gncr_connstamp == 0) {
947 CERROR("Recived bad timestamps peer %llu conn %llu\n",
948 connreq->gncr_peerstamp, connreq->gncr_connstamp);
952 if (connreq->gncr_timeout < GNILND_MIN_TIMEOUT) {
953 CERROR("Received timeout %d < MIN %d\n",
954 connreq->gncr_timeout, GNILND_MIN_TIMEOUT);
962 kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t type)
966 dgram = kmem_cache_zalloc(kgnilnd_data.kgn_dgram_cache, GFP_ATOMIC);
970 INIT_LIST_HEAD(&dgram->gndg_list);
971 dgram->gndg_state = GNILND_DGRAM_USED;
972 dgram->gndg_type = type;
973 dgram->gndg_magic = GNILND_DGRAM_MAGIC;
975 atomic_inc(&dev->gnd_ndgrams);
977 CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p %s ndgrams"
979 sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
980 atomic_read(&dev->gnd_ndgrams));
986 /* call this on a dgram that came back from kgnilnd_ep_postdata_test_by_id
987 * returns < 0 on dgram to be cleaned up
988 * > 0 on dgram that isn't done yet
989 * == 0 on dgram that is ok and needs connreq processing */
991 kgnilnd_process_dgram(kgn_dgram_t *dgram, gni_post_state_t post_state)
995 switch (post_state) {
996 case GNI_POST_COMPLETED:
997 /* normal state for dgrams that need actual processing */
998 /* GOTO to avoid processing dgram as canceled/done */
999 GOTO(process_out, rc);
1001 case GNI_POST_PENDING:
1002 /* we should only see this if we are testing a WC dgram after a
1003 * cancel - it means that it needs a full cycle of waiting
1004 * for kgni_sm_task to finish moving it to TERMINATED */
1005 LASSERTF((dgram->gndg_type == GNILND_DGRAM_WC_REQ) &&
1006 (dgram->gndg_state == GNILND_DGRAM_CANCELED),
1007 "POST_PENDING dgram 0x%p with bad type %d(%s) or state %d(%s)\n",
1008 dgram, dgram->gndg_type, kgnilnd_dgram_type2str(dgram),
1009 dgram->gndg_state, kgnilnd_dgram_state2str(dgram));
1011 /* positive RC as this dgram isn't done yet */
1014 /* GOTO as this isn't done yet */
1015 GOTO(process_out, rc);
1018 case GNI_POST_TERMINATED:
1019 /* we've called cancel and it is done or remote guy called cancel and
1020 * we've receved it on a WC dgram */
1022 /* we are seeing weird terminations on non WC dgrams when we have not
1025 LASSERTF(dgram->gndg_state == GNILND_DGRAM_CANCELED ||
1026 dgram->gndg_conn_out.gncr_dstnid == LNET_NID_ANY,
1027 "dgram 0x%p with bad state %d(%s) or dst nid %s\n",
1028 dgram, dgram->gndg_state, kgnilnd_dgram_state2str(dgram),
1029 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid));
1032 CDEBUG(D_NETTRACE, "dgram 0x%p saw %s, cleaning it up\n", dgram,
1033 dgram->gndg_state == GNILND_DGRAM_CANCELED ? "canceled" : "terminated");
1038 case GNI_POST_TIMEOUT:
1039 /* we could have a timeout on a wildcard dgram too - if
1040 * we got the incoming request but the remote node beefed
1041 * before kgni could send the match data back. We'll just error
1042 * on the active case and bail out gracefully */
1043 if (dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
1044 CNETERR("hardware timeout for connect to "
1045 "%s after %lu seconds. Is node dead?\n",
1046 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
1047 cfs_duration_sec(jiffies - dgram->gndg_post_time));
1054 CERROR("dgram 0x%p with bad post_state %d\n", dgram, post_state);
1058 /* now finish cleaning up a dgram that is canceled/terminated and needs to
1061 /* If this was actively canceled, drop the count now that we are processing */
1062 if (dgram->gndg_state == GNILND_DGRAM_CANCELED) {
1063 atomic_dec(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1064 /* caller responsible for gndg_list removal */
1072 /* needs dev->gnd_dgram_lock held */
1074 kgnilnd_cancel_dgram_locked(kgn_dgram_t *dgram)
1078 if (dgram->gndg_state != GNILND_DGRAM_POSTED) {
1082 LASSERTF(dgram->gndg_conn != NULL,
1083 "dgram 0x%p with NULL conn\n", dgram);
1085 /* C.E - WC dgrams could be canceled immediately but
1086 * if there was some match pending, we need to call
1087 * test_by_id to clear it out. If that test returns
1088 * POST_PENDING, it is half done and needs to go along
1089 * with the rest of dgrams and go through a kgni_sm_task cycle
1090 * and deliver a GNI_POST_TERMINATED event before they
1091 * are actually canceled */
1093 dgram->gndg_state = GNILND_DGRAM_CANCELED;
1095 if (dgram->gndg_conn->gnc_state >= GNILND_CONN_ESTABLISHED) {
1096 /* we don't need to cancel_by_id if the datagram was good */
1100 /* let folks know there are outstanding cancels */
1101 atomic_inc(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1102 /* leave on nid list until cancel is done for debugging fun */
1103 grc = kgnilnd_ep_postdata_cancel_by_id(dgram->gndg_conn->gnc_ephandle, (__u64) dgram);
1105 /* if we don't get success here, we have hosed up the dgram tracking
1106 * code and need to bail out */
1107 LASSERTF(grc == GNI_RC_SUCCESS,
1108 "postdata_cancel returned %d for conn 0x%p to %s\n",
1109 grc, dgram->gndg_conn,
1110 dgram->gndg_conn->gnc_peer ?
1111 libcfs_nid2str(dgram->gndg_conn->gnc_peer->gnp_nid)
1115 "canceled dgram 0x%p conn 0x%p ephandle 0x%p\n",
1116 dgram, dgram->gndg_conn,
1117 dgram->gndg_conn->gnc_ephandle);
1119 if (dgram->gndg_type == GNILND_DGRAM_WC_REQ) {
1120 gni_post_state_t post_state;
1122 __u32 remote_addr = 0, remote_id = 0;
1124 grc = kgnilnd_ep_postdata_test_by_id(dgram->gndg_conn->gnc_ephandle,
1125 (__u64)dgram, &post_state,
1126 &remote_addr, &remote_id);
1128 LASSERTF(grc == GNI_RC_NO_MATCH || grc == GNI_RC_SUCCESS,
1129 "bad grc %d from test_by_id on dgram 0x%p\n",
1132 /* if WC was canceled immediately, we get NO_MATCH, if needs to go
1133 * through full cycle, we get SUCCESS and need to parse post_state */
1135 CDEBUG(D_NET, "grc %d dgram 0x%p type %s post_state %d "
1136 "remote_addr %u remote_id %u\n", grc, dgram,
1137 kgnilnd_dgram_type2str(dgram),
1138 post_state, remote_addr, remote_id);
1140 if (grc == GNI_RC_NO_MATCH) {
1141 /* she's gone, reduce count and move along */
1142 dgram->gndg_state = GNILND_DGRAM_DONE;
1143 atomic_dec(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1147 rc = kgnilnd_process_dgram(dgram, post_state);
1150 /* if for some weird reason we get a valid dgram back, just mark as done
1151 * so we can drop it and move along.
1152 * C.E - if it was completed, we'll just release the conn/mbox
1153 * back into the pool and it'll get reused. That said, we should only
1154 * be canceling a WC dgram on stack rest or shutdown, so that is moot */
1155 dgram->gndg_state = GNILND_DGRAM_DONE;
1156 atomic_dec(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1158 /* caller context responsible for calling kgnilnd_release_dgram() */
1160 /* still pending, let it simmer until golden brown and delicious */
1164 /* for non WC dgrams, they are still on the nid list but marked canceled waiting
1165 * for kgni to return their ID to us via probe - that is when we'll complete their
1166 * cancel processing */
1170 kgnilnd_cleanup_dgram(kgn_dgram_t *dgram)
1172 /* release the dgram ref on conn */
1173 if (dgram->gndg_conn) {
1174 kgnilnd_conn_decref(dgram->gndg_conn);
1175 dgram->gndg_conn = NULL;
1180 kgnilnd_free_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
1182 LASSERTF(dgram->gndg_state == GNILND_DGRAM_USED ||
1183 dgram->gndg_state == GNILND_DGRAM_DONE,
1184 "dgram 0x%p with bad state %s\n",
1185 dgram, kgnilnd_dgram_state2str(dgram));
1187 /* bit of poisoning to help detect bad driver data */
1188 dgram->gndg_magic = 0x6f5a6b5f;
1189 atomic_dec(&dev->gnd_ndgrams);
1191 kmem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
1192 CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p %s"
1194 sizeof(*dgram), dgram, kgnilnd_dgram_type2str(dgram),
1195 atomic_read(&dev->gnd_ndgrams));
1199 kgnilnd_post_dgram(kgn_device_t *dev, lnet_nid_t dstnid, kgn_connreq_type_t type,
1203 kgn_dgram_t *dgram = NULL;
1204 kgn_dgram_t *tmpdgram;
1205 kgn_dgram_type_t dgtype;
1211 case GNILND_CONNREQ_REQ:
1212 if (dstnid == LNET_NID_ANY)
1213 dgtype = GNILND_DGRAM_WC_REQ;
1215 dgtype = GNILND_DGRAM_REQ;
1217 case GNILND_CONNREQ_NAK:
1218 LASSERTF(dstnid != LNET_NID_ANY, "can't NAK to LNET_NID_ANY\n");
1219 dgtype = GNILND_DGRAM_NAK;
1222 CERROR("unknown connreq type %d\n", type);
1226 rc = kgnilnd_alloc_dgram(&dgram, dev, dgtype);
1229 GOTO(post_failed, rc);
1232 rc = kgnilnd_create_conn(&dgram->gndg_conn, dev);
1234 GOTO(post_failed, rc);
1237 if (dgram->gndg_type == GNILND_DGRAM_WC_REQ) {
1238 /* clear buffer for sanity on reuse of wildcard */
1239 memset(&dgram->gndg_conn_in, 0, sizeof(kgn_connreq_t));
1242 if (dstnid == LNET_NID_ANY) {
1243 /* set here to reset any dgram re-use */
1244 dgram->gndg_conn->gnc_state = GNILND_CONN_LISTEN;
1248 rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(dstnid), 1, &host_id);
1251 GOTO(post_failed, rc);
1254 dgram->gndg_conn->gnc_state = GNILND_CONN_CONNECTING;
1256 /* don't need to serialize, there are no CQs for the dgram
1257 * EP on the kgn_net_t */
1258 grc = kgnilnd_ep_bind(dgram->gndg_conn->gnc_ephandle, host_id, dev->gnd_id);
1260 if (grc != GNI_RC_SUCCESS) {
1262 GOTO(post_failed, rc);
1267 /* If we are posting wildcards post using a net of 0, otherwise we'll use the
1268 * net of the destination node.
1271 if (dstnid == LNET_NID_ANY) {
1272 srcnid = LNET_MKNID(LNET_MKNET(GNILND, 0), dev->gnd_nid);
1274 srcnid = LNET_MKNID(LNET_NIDNET(dstnid), dev->gnd_nid);
1277 rc = kgnilnd_pack_connreq(&dgram->gndg_conn_out, dgram->gndg_conn,
1278 srcnid, dstnid, type);
1280 GOTO(post_failed, rc);
1283 if (type == GNILND_CONNREQ_NAK)
1284 dgram->gndg_conn_out.gncr_nakdata.gnnd_errno = data_rc;
1286 dgram->gndg_post_time = jiffies;
1288 /* XXX Nic: here is where we'd add in logical network multiplexing */
1290 CDEBUG(D_NETTRACE, "dgram 0x%p type %s %s->%s cdm %d\n",
1291 dgram, kgnilnd_dgram_type2str(dgram),
1292 libcfs_nid2str(srcnid),
1293 libcfs_nid2str(dstnid), dev->gnd_id);
1295 /* this allocates memory, can't hold locks across */
1296 grc = kgnilnd_ep_postdata_w_id(dgram->gndg_conn->gnc_ephandle,
1297 &dgram->gndg_conn_out, sizeof(kgn_connreq_t),
1298 &dgram->gndg_conn_in, sizeof(kgn_connreq_t),
1301 if (grc != GNI_RC_SUCCESS) {
1302 CNETERR("dropping failed dgram post id 0x%p type %s"
1303 " reqtype %s to %s: rc %d\n",
1304 dgram, kgnilnd_dgram_type2str(dgram),
1305 kgnilnd_connreq_type2str(&dgram->gndg_conn_out),
1306 libcfs_nid2str(dstnid), grc);
1307 rc = (grc == GNI_RC_ERROR_NOMEM) ? -ENOMEM : -EBADR;
1308 GOTO(post_failed, rc);
1311 /* we don't need to add earlier - if someone does del_peer during post,
1312 * that peer will get marked as unlinked and the callers wil take care of it.
1313 * The dgram code is largely kgn_peer_t ignorant, so at worst, we'll just drop
1314 * the completed dgram later when we cant find a peer to stuff it into */
1316 spin_lock(&dev->gnd_dgram_lock);
1318 /* make sure we are not double posting targeted dgrams
1319 * - we can multiple post WC dgrams to help with processing speed */
1320 if (dstnid != LNET_NID_ANY) {
1321 tmpdgram = kgnilnd_find_dgram_locked(dev, dstnid);
1323 LASSERTF(tmpdgram == NULL,
1324 "dgram 0x%p->%s already posted\n",
1325 dgram, libcfs_nid2str(dstnid));
1328 /* unmunge dstnid to help processing code cope... */
1329 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PACK_DSTNID)) {
1330 dgram->gndg_conn_out.gncr_dstnid = dstnid;
1333 list_add_tail(&dgram->gndg_list, kgnilnd_nid2dgramlist(dev, dstnid));
1334 dgram->gndg_state = GNILND_DGRAM_POSTED;
1335 spin_unlock(&dev->gnd_dgram_lock);
1338 if (rc < 0 && dgram != NULL) {
1339 kgnilnd_cleanup_dgram(dgram);
1340 kgnilnd_free_dgram(dev, dgram);
1346 /* The shutdown flag is set from the shutdown and stack reset threads. */
1348 kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram, int shutdown)
1350 /* The conns of canceled active dgrams need to be put in purgatory so
1351 * we don't reuse the mailbox */
1352 if (unlikely(dgram->gndg_state == GNILND_DGRAM_CANCELED)) {
1354 kgn_conn_t *conn = dgram->gndg_conn;
1355 lnet_nid_t nid = dgram->gndg_conn_out.gncr_dstnid;
1357 dgram->gndg_state = GNILND_DGRAM_DONE;
1359 /* During shutdown we've already removed the peer so we don't
1360 * need to add a peer. During stack reset we don't care about
1361 * MDDs since they are all released. */
1363 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1364 peer = kgnilnd_find_peer_locked(nid);
1367 CDEBUG(D_NET, "adding peer's conn with nid %s "
1368 "to purgatory\n", libcfs_nid2str(nid));
1369 kgnilnd_conn_addref(conn);
1370 conn->gnc_peer = peer;
1371 kgnilnd_peer_addref(peer);
1372 kgnilnd_admin_addref(conn->gnc_peer->gnp_dirty_eps);
1373 conn->gnc_state = GNILND_CONN_CLOSED;
1374 list_add_tail(&conn->gnc_list,
1376 kgnilnd_add_purgatory_locked(conn,
1378 kgnilnd_schedule_conn(conn);
1380 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1384 spin_lock(&dev->gnd_dgram_lock);
1385 kgnilnd_cancel_dgram_locked(dgram);
1386 spin_unlock(&dev->gnd_dgram_lock);
1388 kgnilnd_cleanup_dgram(dgram);
1390 /* if the dgram is 'canceled' it needs to be wait until the event
1391 * comes up from kgni that tells us it is safe to release */
1392 if (dgram->gndg_state != GNILND_DGRAM_CANCELED) {
1393 dgram->gndg_state = GNILND_DGRAM_DONE;
1395 LASSERTF(list_empty(&dgram->gndg_list), "dgram 0x%p on list\n", dgram);
1397 /* if it is a wildcard and we are in an appropriate state, repost
1400 if ((dgram->gndg_type == GNILND_DGRAM_WC_REQ) &&
1401 (!kgnilnd_data.kgn_wc_kill && !kgnilnd_data.kgn_in_reset)) {
1404 rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
1406 /* We failed to repost the WC dgram for some reason
1407 * mark it so the repost system attempts to repost */
1408 kgnilnd_admin_addref(dev->gnd_nwcdgrams);
1412 /* always free the old dgram */
1413 kgnilnd_free_dgram(dev, dgram);
1419 kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
1421 kgn_dgram_t *dgram = NULL;
1422 gni_post_state_t post_state;
1426 __u32 remote_addr = 0, remote_id = 0;
1429 /* Probe with the lock held. That way if we get a dgram we dont have it canceled
1430 * between finding the ready dgram and grabbing the lock to remove it from the
1431 * list. Otherwise we could be left in an inconsistent state. We own the dgram
1432 * once its off the list so we don't need to worry about others changing it at
1434 spin_lock(&dev->gnd_dgram_lock);
1435 grc = kgnilnd_postdata_probe_by_id(dev->gnd_handle, &readyid);
1436 if (grc != GNI_RC_SUCCESS) {
1437 spin_unlock(&dev->gnd_dgram_lock);
1438 /* return 0 to indicate nothing happened */
1442 CDEBUG(D_NET, "ready %#llx on device 0x%p\n",
1445 dgram = (kgn_dgram_t *)readyid;
1447 LASSERTF(dgram->gndg_magic == GNILND_DGRAM_MAGIC,
1448 "dgram 0x%p from id %#llx with bad magic %x\n",
1449 dgram, readyid, dgram->gndg_magic);
1451 LASSERTF(dgram->gndg_state == GNILND_DGRAM_POSTED ||
1452 dgram->gndg_state == GNILND_DGRAM_CANCELED,
1453 "dgram 0x%p with bad state %s\n",
1454 dgram, kgnilnd_dgram_state2str(dgram));
1456 LASSERTF(!list_empty(&dgram->gndg_list),
1457 "dgram 0x%p with bad list state %s type %s\n",
1458 dgram, kgnilnd_dgram_state2str(dgram),
1459 kgnilnd_dgram_type2str(dgram));
1461 /* now we know that the datagram structure is ok, so pull off list */
1462 list_del_init(&dgram->gndg_list);
1464 /* while we have the gnn_dgram_lock and BEFORE we call test_by_id
1465 * change the state from POSTED to PROCESSING to ensure that
1466 * nobody cancels it after we've pulled it from the wire */
1467 if (dgram->gndg_state == GNILND_DGRAM_POSTED) {
1468 dgram->gndg_state = GNILND_DGRAM_PROCESSING;
1471 LASSERTF(dgram->gndg_conn != NULL,
1472 "dgram 0x%p with NULL conn\n", dgram);
1474 grc = kgnilnd_ep_postdata_test_by_id(dgram->gndg_conn->gnc_ephandle,
1475 (__u64)dgram, &post_state,
1476 &remote_addr, &remote_id);
1478 /* we now "own" this datagram */
1479 spin_unlock(&dev->gnd_dgram_lock);
1481 LASSERTF(grc != GNI_RC_NO_MATCH, "kgni lied! probe_by_id told us that"
1482 " id %llu was ready\n", readyid);
1484 CDEBUG(D_NET, "grc %d dgram 0x%p type %s post_state %d "
1485 "remote_addr %u remote_id %u\n", grc, dgram,
1486 kgnilnd_dgram_type2str(dgram),
1487 post_state, remote_addr, remote_id);
1489 if (unlikely(grc != GNI_RC_SUCCESS)) {
1490 CNETERR("getting data for dgram 0x%p->%s failed rc %d. Dropping it\n",
1491 dgram, libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
1494 GOTO(probe_for_out, rc);
1497 rc = kgnilnd_process_dgram(dgram, post_state);
1499 /* we should never get probe finding a dgram for us and then it
1500 * being a WC dgram that is still in the middle of processing */
1501 LASSERTF(rc <= 0, "bad rc %d from process_dgram 0x%p state %d\n",
1502 rc, dgram, post_state);
1505 /* dgram is good enough for the data to be used */
1506 dgram->gndg_state = GNILND_DGRAM_PROCESSING;
1507 /* fake rc to mark that we've done something */
1510 /* let kgnilnd_release_dgram take care of canceled dgrams */
1511 if (dgram->gndg_state != GNILND_DGRAM_CANCELED) {
1512 dgram->gndg_state = GNILND_DGRAM_DONE;
1521 kgnilnd_release_dgram(dev, dgram, 0);
1526 kgnilnd_setup_wildcard_dgram(kgn_device_t *dev)
1528 /* if kgn_wildcard is zero, return error */
1529 int rc = -ENOENT, i;
1532 for (i = 0; i < *kgnilnd_tunables.kgn_nwildcard; i++) {
1533 rc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
1535 CERROR("error %d: could not post wildcard datagram # %d\n",
1547 kgnilnd_cancel_net_dgrams(kgn_net_t *net)
1549 kgn_dgram_t *dg, *dgN;
1554 /* we want to cancel any outstanding dgrams - we don't want to rely
1555 * on del_peer_or_conn catching all of them. This helps protect us in cases
1556 * where we don't quite keep the peer->dgram mapping in sync due to some
1557 * race conditions */
1559 LASSERTF(net->gnn_shutdown || kgnilnd_data.kgn_in_reset,
1560 "called with LND invalid state: net shutdown %d "
1561 "in reset %d\n", net->gnn_shutdown,
1562 kgnilnd_data.kgn_in_reset);
1564 spin_lock(&net->gnn_dev->gnd_dgram_lock);
1566 for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1567 list_for_each_entry_safe(dg, dgN, &net->gnn_dev->gnd_dgrams[i], gndg_list) {
1569 /* skip nids not on our net or are wildcards */
1572 if (dg->gndg_type == GNILND_DGRAM_WC_REQ ||
1573 net->gnn_netnum != LNET_NETNUM(LNET_NIDNET(dg->gndg_conn_out.gncr_dstnid)))
1576 kgnilnd_cancel_dgram_locked(dg);
1580 spin_unlock(&net->gnn_dev->gnd_dgram_lock);
1586 kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
1588 kgn_dgram_t *dg, *dgN;
1592 /* Time to kill the outstanding WC's
1593 * WC's exist on net 0 only but match on any net...
1596 LASSERTF(kgnilnd_data.kgn_in_reset || kgnilnd_data.kgn_wc_kill,
1597 "called with LND invalid state: WC shutdown %d "
1598 "in reset %d\n", kgnilnd_data.kgn_wc_kill,
1599 kgnilnd_data.kgn_in_reset);
1601 spin_lock(&dev->gnd_dgram_lock);
1604 dg = kgnilnd_find_dgram_locked(dev, LNET_NID_ANY);
1606 LASSERTF(dg->gndg_type == GNILND_DGRAM_WC_REQ,
1607 "dgram 0x%p->%s with bad type %d (%s)\n",
1608 dg, libcfs_nid2str(dg->gndg_conn_out.gncr_dstnid),
1609 dg->gndg_type, kgnilnd_dgram_type2str(dg));
1611 kgnilnd_cancel_dgram_locked(dg);
1613 /* WC could be DONE already, check and if so add to list to be released */
1614 if (dg->gndg_state == GNILND_DGRAM_DONE)
1615 list_move_tail(&dg->gndg_list, &zombies);
1617 } while (dg != NULL);
1619 spin_unlock(&dev->gnd_dgram_lock);
1621 list_for_each_entry_safe(dg, dgN, &zombies, gndg_list) {
1622 list_del_init(&dg->gndg_list);
1623 kgnilnd_release_dgram(dev, dg, 1);
1630 kgnilnd_cancel_dgrams(kgn_device_t *dev)
1632 kgn_dgram_t *dg, *dgN;
1636 /* Cancel any outstanding non wildcard datagrams regardless
1637 * of which net they are on as we are in base shutdown and
1638 * dont care about connecting anymore.
1641 LASSERTF(kgnilnd_data.kgn_wc_kill == 1,"We didnt get called from base shutdown\n");
1643 spin_lock(&dev->gnd_dgram_lock);
1645 for (i = 0; i < (*kgnilnd_tunables.kgn_peer_hash_size -1); i++) {
1646 list_for_each_entry_safe(dg, dgN, &dev->gnd_dgrams[i], gndg_list) {
1647 if (dg->gndg_type != GNILND_DGRAM_WC_REQ)
1648 kgnilnd_cancel_dgram_locked(dg);
1652 spin_unlock(&dev->gnd_dgram_lock);
1659 kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
1667 /* use do while to get at least one check run to allow
1668 * regression test for 762072 to hit bug if there */
1670 /* This function races with the dgram mover during shutdown so it is possible for
1671 * a dgram to be seen in kgnilnd_postdata_probe_wait_by_id but be handled in the
1672 * dgram mover thread instead of inside of this function.
1675 /* This should only be called from within shutdown, baseshutdown, or stack reset.
1676 * there are no assertions here to verify since base_shutdown has nothing in it we can check
1677 * the net is gone by then.
1682 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1683 "Waiting for %d canceled datagrams to clear on device %d\n",
1684 atomic_read(&dev->gnd_canceled_dgrams), dev->gnd_id);
1686 /* check once a second */
1687 grc = kgnilnd_postdata_probe_wait_by_id(dev->gnd_handle,
1690 if (grc != GNI_RC_SUCCESS)
1693 CDEBUG(D_NET, "ready %#llx on device %d->0x%p\n",
1694 readyid, dev->gnd_id, dev);
1696 rc = kgnilnd_probe_for_dgram(dev, &dgram);
1698 /* if we got a valid dgram or one that is now done, clean up */
1699 kgnilnd_release_dgram(dev, dgram, 1);
1701 } while (atomic_read(&dev->gnd_canceled_dgrams));
1705 kgnilnd_start_connect(kgn_peer_t *peer)
1708 /* sync point for kgnilnd_del_peer_locked - do an early check to
1709 * catch the most common hits where del_peer is done by the
1710 * time we get here */
1711 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GNP_CONNECTING1)) {
1712 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_GNP_CONNECTING1, 1)) {};
1715 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1716 if (!kgnilnd_peer_active(peer) || peer->gnp_connecting != GNILND_PEER_CONNECT) {
1717 /* raced with peer getting unlinked */
1718 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1722 peer->gnp_connecting = GNILND_PEER_POSTING;
1723 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1725 set_mb(peer->gnp_last_dgram_time, jiffies);
1726 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GNP_CONNECTING2)) {
1727 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_GNP_CONNECTING2, 1)) {};
1730 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GNP_CONNECTING3)) {
1731 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_GNP_CONNECTING3, 1)) {};
1732 rc = cfs_fail_val ? cfs_fail_val : -ENOMEM;
1734 rc = kgnilnd_post_dgram(peer->gnp_net->gnn_dev,
1735 peer->gnp_nid, GNILND_CONNREQ_REQ, 0);
1738 set_mb(peer->gnp_last_dgram_errno, rc);
1742 /* while we're posting someone could have decided this peer/dgram needed to
1743 * die a quick death, so we check for state change and process accordingly */
1745 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1746 if (!kgnilnd_peer_active(peer) || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1747 if (peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1748 peer->gnp_connecting = GNILND_PEER_KILL;
1750 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1751 /* positive RC to avoid dgram cleanup - we'll have to
1752 * wait for the kgni GNI_POST_TERMINATED event to
1753 * finish cleaning up */
1755 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev, peer->gnp_nid);
1758 peer->gnp_connecting = GNILND_PEER_POSTED;
1759 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1760 /* reaper thread will take care of any timeouts */
1761 CDEBUG(D_NET, "waiting for connect to finish to %s rc %d\n",
1762 libcfs_nid2str(peer->gnp_nid), rc);
1767 CDEBUG(D_NET, "connect to %s failed: rc %d \n",
1768 libcfs_nid2str(peer->gnp_nid), rc);
1774 kgnilnd_finish_connect(kgn_dgram_t *dgram)
1776 kgn_conn_t *conn = dgram->gndg_conn;
1777 lnet_nid_t her_nid = dgram->gndg_conn_in.gncr_srcnid;
1778 kgn_peer_t *new_peer, *peer = NULL;
1781 kgn_mbox_info_t *mbox;
1785 /* try to find a peer that matches the nid we got in the connreq
1786 * kgnilnd_unpack_connreq makes sure that conn_in.gncr_srcnid is
1787 * HER and conn_out.gncr_srcnid is ME for both active and WC dgrams */
1789 /* assume this is a new peer - it makes locking cleaner when it isn't */
1790 /* no holding kgn_net_rw_sem - already are at the kgnilnd_dgram_mover level */
1792 rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL, GNILND_PEER_UP);
1794 CERROR("Can't create peer for %s\n", libcfs_nid2str(her_nid));
1798 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1800 /* this transfers ref from create_peer to the kgn_peer table */
1801 kgnilnd_add_peer_locked(her_nid, new_peer, &peer);
1803 /* if we found an existing peer, is it really ready for a new conn ? */
1804 if (peer != new_peer) {
1805 /* if this was an active connect attempt but we can't find a peer waiting for it
1806 * we will dump in the trash */
1808 if (peer->gnp_connecting == GNILND_PEER_IDLE && dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
1809 CDEBUG(D_NET, "dropping completed connreq for %s peer 0x%p->%s\n",
1810 libcfs_nid2str(her_nid), peer, libcfs_nid2str(peer->gnp_nid));
1811 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1816 /* check to see if we can catch a connecting peer before it is
1817 * removed from the connd_peers list - if not, we need to
1818 * let the connreqs race and be handled by kgnilnd_conn_isdup_locked() */
1819 if (peer->gnp_connecting != GNILND_PEER_IDLE) {
1820 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1821 if (!list_empty(&peer->gnp_connd_list)) {
1822 list_del_init(&peer->gnp_connd_list);
1823 /* drop connd ref */
1824 kgnilnd_peer_decref(peer);
1826 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1827 /* clear rc to make sure we don't have fake error */
1831 /* no matter what, we are no longer waiting to connect this peer now */
1832 peer->gnp_connecting = GNILND_PEER_IDLE;
1834 /* Refuse to duplicate an existing connection (both sides might try to
1835 * connect at once). NB we return success! We _are_ connected so we
1836 * _don't_ have any blocked txs to complete with failure. */
1837 rc = kgnilnd_conn_isdup_locked(peer, conn);
1839 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1840 CDEBUG(D_NET, "Not creating duplicate connection to %s: %d\n",
1841 libcfs_nid2str(her_nid), rc);
1847 if (peer->gnp_state == GNILND_PEER_DOWN) {
1848 CNETERR("Received connection request from down nid %s\n",
1849 libcfs_nid2str(her_nid));
1852 peer->gnp_state = GNILND_PEER_UP;
1853 nstale = kgnilnd_close_stale_conns_locked(peer, conn);
1855 /* either way with peer (new or existing), we are ok with ref counts here as the
1856 * kgnilnd_add_peer_locked will use our ref on new_peer (from create_peer_safe) as the
1857 * ref for the peer table. */
1859 /* at this point, the connection request is a winner */
1861 /* mark 'DONE' to avoid cancel being called from release */
1862 dgram->gndg_state = GNILND_DGRAM_DONE;
1864 /* initialise timestamps before reaper looks at them */
1865 conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
1867 /* last_tx is initialized to jiffies - (keepalive*2) so that if the NOOP fails it will
1868 * immediatly send a NOOP in the reaper thread during the call to
1869 * kgnilnd_check_conn_timeouts_locked
1871 conn->gnc_last_tx = jiffies - (cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout)) * 2);
1872 conn->gnc_state = GNILND_CONN_ESTABLISHED;
1874 /* save the dgram type used to establish this connection */
1875 conn->gnc_dgram_type = dgram->gndg_type;
1877 /* refs are not transferred from dgram to tables, so increment to
1879 kgnilnd_conn_addref(conn);
1880 kgnilnd_peer_addref(peer);
1881 conn->gnc_peer = peer;
1882 list_add_tail(&conn->gnc_list, &peer->gnp_conns);
1884 kgnilnd_conn_addref(conn); /* +1 ref for conn table */
1885 list_add_tail(&conn->gnc_hashlist,
1886 kgnilnd_cqid2connlist(conn->gnc_cqid));
1887 kgnilnd_data.kgn_conn_version++;
1889 /* Dont send NOOP if fail_loc is set
1891 if (!CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP)) {
1892 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, peer->gnp_net->gnn_ni->ni_nid);
1894 CNETERR("can't get TX to initiate NOOP to %s\n",
1895 libcfs_nid2str(peer->gnp_nid));
1897 kgnilnd_queue_tx(conn, tx);
1901 /* Schedule all packets blocking for a connection */
1902 list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1903 /* lock held here is the peer_conn lock */
1904 kgnilnd_tx_del_state_locked(tx, peer, NULL, GNILND_TX_ALLOCD);
1905 kgnilnd_queue_tx(conn, tx);
1908 /* If this is an active connection lets mark its timestamp on the MBoX */
1909 if (dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
1910 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1911 /* conn->gnc_last_rx is jiffies it better exist as it was just set */
1912 mbox->mbx_release_purg_active_dgram = conn->gnc_last_rx;
1915 /* Bug 765042: wake up scheduler for a race with finish_connect and
1916 * complete_conn_closed with a conn in purgatory
1917 * since we can't use CFS_RACE due to mutex_holds in kgnilnd_process_conns,
1918 * we just check for set and then clear */
1919 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG)) {
1921 /* get scheduler thread moving again */
1922 kgnilnd_schedule_device(conn->gnc_device);
1925 CDEBUG(D_NET, "New conn 0x%p->%s dev %d\n",
1926 conn, libcfs_nid2str(her_nid), conn->gnc_device->gnd_id);
1928 /* make sure we reset peer reconnect interval now that we have a good conn */
1929 kgnilnd_peer_alive(peer);
1930 peer->gnp_reconnect_interval = 0;
1932 /* clear the unlink attribute if we dont clear it kgnilnd_del_conn_or_peer will wait
1933 * on the atomic forever
1935 if (peer->gnp_pending_unlink) {
1936 peer->gnp_pending_unlink = 0;
1937 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1938 CDEBUG(D_NET, "Clearing peer unlink %p\n",peer);
1941 /* add ref to make it hang around until after we drop the lock */
1942 kgnilnd_conn_addref(conn);
1944 /* Once the peer_conn lock is dropped, the conn could actually move into
1945 * CLOSING->CLOSED->DONE in the scheduler thread, so hold the
1946 * lock until we are really done */
1947 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1949 /* Notify LNET that we now have a working connection to this peer.
1950 * This is a Cray extension to the "standard" LND behavior.
1952 lnet_notify(peer->gnp_net->gnn_ni, peer->gnp_nid, true, true,
1953 ktime_get_seconds());
1955 /* drop our 'hold' ref */
1956 kgnilnd_conn_decref(conn);
1963 kgnilnd_send_nak(kgn_device_t *dev, lnet_nid_t dst_nid, int error)
1968 LASSERTF(dst_nid != LNET_NID_ANY, "bad dst_nid %s\n", libcfs_nid2str(dst_nid));
1970 CDEBUG(D_NET, "NAK to %s errno %d\n", libcfs_nid2str(dst_nid), error);
1972 rc = kgnilnd_post_dgram(dev, dst_nid, GNILND_CONNREQ_NAK, error);
1975 CDEBUG(D_NET, "NAK to %s failed: rc %d \n", libcfs_nid2str(dst_nid), rc);
1981 kgnilnd_process_nak(kgn_dgram_t *dgram)
1983 kgn_connreq_t *connreq = &dgram->gndg_conn_in;
1984 lnet_nid_t src_nid = connreq->gncr_srcnid;
1985 int errno = connreq->gncr_nakdata.gnnd_errno;
1989 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1991 peer = kgnilnd_find_peer_locked(src_nid);
1993 /* we likely dropped him from bad data when we processed
1994 * the original REQ */
1995 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1999 /* need to check peerstamp/connstamp against the ones we find
2000 * to make sure we don't close new (and good?) conns that we
2001 * formed after this connreq failed */
2002 if (peer->gnp_connecting == GNILND_PEER_IDLE) {
2005 if (list_empty(&peer->gnp_conns)) {
2006 /* assume already procced datagram and it barfed up
2007 * on this side too */
2008 CDEBUG(D_NET, "dropping NAK from %s; "
2009 "peer %s is already not connected\n",
2010 libcfs_nid2str(connreq->gncr_srcnid),
2011 libcfs_nid2str(connreq->gncr_dstnid));
2012 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2016 /* stub up a connection with the connreq XXX_stamps to allow
2017 * use to use close_stale_conns_locked */
2018 conn.gnc_peerstamp = connreq->gncr_peerstamp;
2019 conn.gnc_my_connstamp = connreq->gncr_connstamp;
2020 conn.gnc_peer_connstamp = connreq->gncr_connstamp;
2021 conn.gnc_device = peer->gnp_net->gnn_dev;
2023 rc = kgnilnd_close_stale_conns_locked(peer, &conn);
2025 LCONSOLE_INFO("Received NAK from %s for %s errno %d; "
2026 "closed %d connections\n",
2027 libcfs_nid2str(connreq->gncr_srcnid),
2028 libcfs_nid2str(connreq->gncr_dstnid), errno, rc);
2030 spin_lock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
2032 if (list_empty(&peer->gnp_connd_list)) {
2033 /* if peer isn't on waiting list, try to find one to nuke */
2034 rc = kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
2038 LCONSOLE_INFO("Received NAK from %s for %s errno %d; "
2039 "canceled pending connect request\n",
2040 libcfs_nid2str(connreq->gncr_srcnid),
2041 libcfs_nid2str(connreq->gncr_dstnid), errno);
2044 /* if we can't find a waiting dgram, we just drop the nak - the conn
2045 * connect must have failed (didn't find conn above and clear connecting
2046 * -- so nothing to do besides drop */
2048 /* peer is on list, meaning it is a new connect attempt from the one
2049 * we started that generated the NAK - so just drop NAK */
2051 /* use negative to prevent error message */
2054 spin_unlock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
2057 /* success! we found a peer and at least marked pending_nak */
2058 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2064 kgnilnd_process_connreq(kgn_dgram_t *dgram, int *needs_nak)
2068 rc = kgnilnd_unpack_connreq(dgram);
2071 /* only NAK if we have good srcnid to use */
2077 switch (dgram->gndg_conn_in.gncr_type) {
2078 case GNILND_CONNREQ_REQ:
2079 /* wire up peer & conn, send queued TX */
2080 rc = kgnilnd_finish_connect(dgram);
2082 /* don't nak when the nid is hosed */
2088 case GNILND_CONNREQ_NAK:
2089 rc = kgnilnd_process_nak(dgram);
2090 /* return early to prevent reconnect bump */
2093 CERROR("unexpected connreq type %s (%d) from %s\n",
2094 kgnilnd_connreq_type2str(&dgram->gndg_conn_in),
2095 dgram->gndg_conn_in.gncr_type,
2096 libcfs_nid2str(dgram->gndg_conn_in.gncr_srcnid));
2107 kgnilnd_probe_and_process_dgram(kgn_device_t *dev)
2111 lnet_nid_t nak_dstnid = LNET_NID_ANY;
2112 lnet_nid_t orig_dstnid;
2113 kgn_dgram_t *dgram = NULL;
2117 if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PAUSE_DGRAM_COMP)) {
2120 rc = kgnilnd_probe_for_dgram(dev, &dgram);
2125 } else if (rc < 0) {
2126 GOTO(inform_peer, rc);
2128 /* rc > 1 means it did something, reset for this func */
2132 switch (dgram->gndg_type) {
2133 case GNILND_DGRAM_WC_REQ:
2134 case GNILND_DGRAM_REQ:
2135 rc = kgnilnd_process_connreq(dgram, &needs_nak);
2137 case GNILND_DGRAM_NAK:
2138 CDEBUG(D_NETTRACE, "NAK to %s done\n",
2139 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid));
2142 CERROR("unknown datagram type %s (%d)\n",
2143 kgnilnd_dgram_type2str(dgram), dgram->gndg_type);
2147 /* stash data to use after releasing current datagram */
2148 /* don't stash net - we are operating on a net already,
2149 * so the lock on rw_net_lock is sufficient */
2151 nak_dstnid = dgram->gndg_conn_in.gncr_srcnid;
2154 LASSERTF(dgram != NULL, "dgram 0x%p rc %d needs_nak %d\n", dgram, rc, needs_nak);
2156 orig_dstnid = dgram->gndg_conn_out.gncr_dstnid;
2158 kgnilnd_release_dgram(dev, dgram, 0);
2160 CDEBUG(D_NET, "cleaning up dgram to %s, rc %d\n",
2161 libcfs_nid2str(orig_dstnid), rc);
2163 /* if this was a WC_REQ that matched an existing peer, it'll get marked done
2164 * in kgnilnd_finish_connect - if errors are from before we get to there,
2165 * we just drop as it is a WC_REQ - the peer CAN'T be waiting for it */
2166 if ((orig_dstnid != LNET_NID_ANY) && (rc < 0)) {
2167 /* if we have a negative rc, we want to find a peer to inform about
2168 * the bad connection attempt. Sorry buddy, better luck next time! */
2170 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2171 peer = kgnilnd_find_peer_locked(orig_dstnid);
2174 /* add ref to make sure he stays around past the possible unlink
2175 * so we can tell LNet about him */
2176 kgnilnd_peer_addref(peer);
2178 /* if he still cares about the outstanding connect */
2179 if (peer->gnp_connecting >= GNILND_PEER_CONNECT) {
2180 /* check if he is on the connd list and remove.. */
2181 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2182 if (!list_empty(&peer->gnp_connd_list)) {
2183 list_del_init(&peer->gnp_connd_list);
2184 /* drop connd ref */
2185 kgnilnd_peer_decref(peer);
2187 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2189 /* clear gnp_connecting so we don't have a non-connecting peer
2190 * on gnd_connd_list */
2191 peer->gnp_connecting = GNILND_PEER_IDLE;
2193 set_mb(peer->gnp_last_dgram_errno, rc);
2195 kgnilnd_peer_increase_reconnect_locked(peer);
2198 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2200 /* now that we are outside the lock, tell Mommy */
2202 kgnilnd_peer_notify(peer, rc, 0);
2203 kgnilnd_peer_decref(peer);
2208 kgnilnd_send_nak(dev, nak_dstnid, rc);
2215 kgnilnd_reaper_dgram_check(kgn_device_t *dev)
2217 kgn_dgram_t *dgram, *tmp;
2220 spin_lock(&dev->gnd_dgram_lock);
2222 for (i = 0; i < (*kgnilnd_tunables.kgn_peer_hash_size - 1); i++) {
2223 list_for_each_entry_safe(dgram, tmp, &dev->gnd_dgrams[i], gndg_list) {
2224 unsigned long now = jiffies;
2225 unsigned long timeout;
2227 /* don't timeout stuff if the network is mucked or shutting down */
2228 if (kgnilnd_check_hw_quiesce()) {
2232 if ((dgram->gndg_state != GNILND_DGRAM_POSTED) ||
2233 (dgram->gndg_type == GNILND_DGRAM_WC_REQ)) {
2236 CDEBUG(D_NETTRACE, "checking dgram 0x%p type %s "
2237 "state %s conn 0x%p to %s age %lus\n",
2238 dgram, kgnilnd_dgram_type2str(dgram),
2239 kgnilnd_dgram_state2str(dgram), dgram->gndg_conn,
2240 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
2241 cfs_duration_sec(now - dgram->gndg_post_time));
2243 timeout = cfs_time_seconds(*kgnilnd_tunables.kgn_timeout);
2245 if (time_before(now, (dgram->gndg_post_time + timeout)))
2248 CNETERR("%s datagram to %s timed out @ %lus dgram "
2249 "0x%p state %s conn 0x%p\n",
2250 kgnilnd_dgram_type2str(dgram),
2251 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
2252 cfs_duration_sec(now - dgram->gndg_post_time),
2253 dgram, kgnilnd_dgram_state2str(dgram),
2256 kgnilnd_cancel_dgram_locked(dgram);
2259 spin_unlock(&dev->gnd_dgram_lock);
2263 /* use a thread for the possibly long-blocking wait_by_id to prevent
2264 * stalling the global workqueues */
2266 kgnilnd_dgram_waitq(void *arg)
2268 kgn_device_t *dev = (kgn_device_t *) arg;
2272 DEFINE_WAIT(mover_done);
2274 snprintf(name, sizeof(name), "kgnilnd_dgn_%02d", dev->gnd_id);
2276 /* all gnilnd threads need to run fairly urgently */
2277 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2279 /* we dont shut down until the device shuts down ... */
2280 while (!kgnilnd_data.kgn_shutdown) {
2281 /* to quiesce or to not quiesce, that is the question */
2282 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2283 KGNILND_SPIN_QUIESCE;
2286 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_DGRAM_COMP, 1)) {}
2288 /* check once a second */
2289 grc = kgnilnd_postdata_probe_wait_by_id(dev->gnd_handle,
2292 if (grc == GNI_RC_SUCCESS) {
2293 CDEBUG(D_INFO, "waking up dgram mover thread\n");
2294 kgnilnd_schedule_dgram(dev);
2296 /* wait for dgram thread to ping us before spinning again */
2297 prepare_to_wait(&dev->gnd_dgping_waitq, &mover_done,
2298 TASK_INTERRUPTIBLE);
2300 /* don't sleep if we need to quiesce */
2301 if (likely(!kgnilnd_data.kgn_quiesce_trigger)) {
2304 finish_wait(&dev->gnd_dgping_waitq, &mover_done);
2308 kgnilnd_thread_fini();
2313 kgnilnd_start_outbound_dgrams(kgn_device_t *dev, unsigned long deadline)
2315 int did_something = 0, rc;
2316 kgn_peer_t *peer = NULL;
2318 spin_lock(&dev->gnd_connd_lock);
2320 /* Active connect - we added this in kgnilnd_launch_tx */
2321 while (!list_empty(&dev->gnd_connd_peers) && time_before(jiffies, deadline)) {
2322 peer = list_first_entry(&dev->gnd_connd_peers,
2323 kgn_peer_t, gnp_connd_list);
2325 /* ref for connd removed in if/else below */
2326 list_del_init(&peer->gnp_connd_list);
2328 /* gnp_connecting and membership on gnd_connd_peers should be
2329 * done coherently to avoid double adding, etc */
2330 /* don't need kgnilnd_data.kgn_peer_conn_lock here as that is only needed
2331 * to get the peer to gnp_connecting in the first place. We just need to
2332 * rely on gnd_connd_lock to serialize someone pulling him from the list
2333 * BEFORE clearing gnp_connecting */
2334 LASSERTF(peer->gnp_connecting != GNILND_PEER_IDLE, "peer 0x%p->%s not connecting\n",
2335 peer, libcfs_nid2str(peer->gnp_nid));
2337 spin_unlock(&dev->gnd_connd_lock);
2339 CDEBUG(D_NET, "processing connect to %s\n",
2340 libcfs_nid2str(peer->gnp_nid));
2343 rc = kgnilnd_start_connect(peer);
2345 if (likely(rc >= 0)) {
2346 /* 0 on success, positive on 'just drop peer' errors */
2347 kgnilnd_peer_decref(peer);
2348 } else if (rc == -ENOMEM) {
2349 /* if we are out of wildcards, add back to
2350 * connd_list - then break out and we'll try later
2351 * if other errors, we'll bail & cancel pending tx */
2352 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2353 if (peer->gnp_connecting == GNILND_PEER_POSTING) {
2354 peer->gnp_connecting = GNILND_PEER_CONNECT;
2355 spin_lock(&dev->gnd_connd_lock);
2356 list_add_tail(&peer->gnp_connd_list,
2357 &dev->gnd_connd_peers);
2359 /* connecting changed while we were posting */
2361 LASSERTF(peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH, "Peer is in invalid"
2362 " state 0x%p->%s, connecting %d\n",
2363 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
2364 peer->gnp_connecting = GNILND_PEER_KILL;
2365 spin_lock(&dev->gnd_connd_lock);
2366 /* remove the peer ref frrom the cond list */
2367 kgnilnd_peer_decref(peer);
2368 /* let the system handle itself */
2370 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2371 /* the datagrams are a global pool,
2372 * so break out of trying and hope some free
2377 /* something bad happened, you lose */
2378 CNETERR("could not start connecting to %s "
2379 "rc %d: Will retry until TX timeout\n",
2380 libcfs_nid2str(peer->gnp_nid), rc);
2381 /* It didnt post so just set connecting back to zero now.
2382 * The reaper will reattempt the connection if it needs too.
2383 * If the peer needs death set it so the reaper will cleanup.
2385 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2386 if (peer->gnp_connecting == GNILND_PEER_POSTING) {
2387 peer->gnp_connecting = GNILND_PEER_IDLE;
2388 kgnilnd_peer_increase_reconnect_locked(peer);
2390 LASSERTF(peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH, "Peer is in invalid"
2391 " state 0x%p->%s, connecting %d\n",
2392 peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
2393 peer->gnp_connecting = GNILND_PEER_KILL;
2395 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2397 /* hold onto ref until we are really done - if it was
2398 * unlinked this could result in a destroy */
2399 kgnilnd_peer_decref(peer);
2401 spin_lock(&dev->gnd_connd_lock);
2404 spin_unlock(&dev->gnd_connd_lock);
2405 RETURN(did_something);
2409 kgnilnd_repost_wc_dgrams(kgn_device_t *dev)
2411 int did_something = 0, to_repost, i;
2412 to_repost = atomic_read(&dev->gnd_nwcdgrams);
2415 for (i = 0; i < to_repost; ++i) {
2417 rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
2419 kgnilnd_admin_decref(dev->gnd_nwcdgrams);
2422 CDEBUG(D_NETERROR, "error %d: dev %d could not post wildcard datagram\n",
2428 RETURN(did_something);
2432 kgnilnd_dgram_poke_with_stick(unsigned long arg)
2435 kgn_device_t *dev = &kgnilnd_data.kgn_devices[dev_id];
2437 wake_up(&dev->gnd_dgram_waitq);
2440 /* use single thread for dgrams - should be sufficient for performance */
2442 kgnilnd_dgram_mover(void *arg)
2444 kgn_device_t *dev = (kgn_device_t *)arg;
2446 int rc, did_something;
2447 unsigned long next_purge_check = jiffies - 1;
2448 unsigned long timeout;
2449 struct timer_list timer;
2450 unsigned long deadline = 0;
2453 snprintf(name, sizeof(name), "kgnilnd_dg_%02d", dev->gnd_id);
2455 /* all gnilnd threads need to run fairly urgently */
2456 set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2458 /* we are ok not locking for these variables as the dgram waitq threads
2459 * will block both due to tying up net (kgn_shutdown) and the completion
2460 * event for the dgram_waitq (kgn_quiesce_trigger) */
2461 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
2462 while (!kgnilnd_data.kgn_shutdown) {
2463 /* Safe: kgn_shutdown only set when quiescent */
2465 /* race with stack reset - we want to hold off seeing any new incoming dgrams
2466 * so we can force a dirty WC dgram for Bug 762072 - put right before
2467 * quiesce check so that it'll go right into that and not do any
2469 CFS_RACE(CFS_FAIL_GNI_WC_DGRAM_FREE);
2471 /* to quiesce or to not quiesce, that is the question */
2472 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2473 KGNILND_SPIN_QUIESCE;
2477 CFS_RACE(CFS_FAIL_GNI_QUIESCE_RACE);
2479 /* process any newly completed dgrams */
2480 down_read(&kgnilnd_data.kgn_net_rw_sem);
2482 rc = kgnilnd_probe_and_process_dgram(dev);
2484 did_something += rc;
2487 up_read(&kgnilnd_data.kgn_net_rw_sem);
2489 CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_DGRAM_DEADLINE,
2490 (*kgnilnd_tunables.kgn_dgram_timeout + 1));
2491 /* start new outbound dgrams */
2492 did_something += kgnilnd_start_outbound_dgrams(dev, deadline);
2494 /* find dead dgrams */
2495 if (time_after_eq(jiffies, next_purge_check)) {
2496 /* these don't need to be checked that often */
2497 kgnilnd_reaper_dgram_check(dev);
2499 next_purge_check = (long) jiffies +
2500 cfs_time_seconds(kgnilnd_data.kgn_new_min_timeout / 4);
2503 did_something += kgnilnd_repost_wc_dgrams(dev);
2505 /* careful with the jiffy wrap... */
2506 timeout = (long)(next_purge_check - jiffies);
2508 CDEBUG(D_INFO, "did %d timeout %lu next %lu jiffies %lu\n",
2509 did_something, timeout, next_purge_check, jiffies);
2511 if ((did_something || timeout <= 0) && time_before(jiffies, deadline)) {
2516 prepare_to_wait(&dev->gnd_dgram_waitq, &wait, TASK_INTERRUPTIBLE);
2518 setup_timer(&timer, kgnilnd_dgram_poke_with_stick, dev->gnd_id);
2519 mod_timer(&timer, (long) jiffies + timeout);
2521 /* last second chance for others to poke us */
2522 did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
2524 /* check flag variables before committing even if we
2525 * did something; if we are after the deadline call
2527 if ((!did_something || time_after(jiffies, deadline)) &&
2528 !kgnilnd_data.kgn_shutdown &&
2529 !kgnilnd_data.kgn_quiesce_trigger) {
2530 CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
2531 timeout, cfs_duration_sec(timeout));
2532 wake_up_all(&dev->gnd_dgping_waitq);
2534 CDEBUG(D_INFO, "awake after schedule\n");
2535 deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
2538 del_singleshot_timer_sync(&timer);
2539 finish_wait(&dev->gnd_dgram_waitq, &wait);
2542 kgnilnd_thread_fini();