Whamcloud - gitweb
LU-1346 gnilnd: remove libcfs abstractions
[fs/lustre-release.git] / lnet / klnds / gnilnd / gnilnd_conn.c
1 /*
2  * Copyright (C) 2012 Cray, Inc.
3  *
4  *   Author: Nic Henke <nic@cray.com>
5  *   Author: James Shimek <jshimek@cray.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #include "gnilnd.h"
25
26 void
27 kgnilnd_setup_smsg_attr(gni_smsg_attr_t *smsg_attr)
28 {
29         smsg_attr->mbox_maxcredit = *kgnilnd_tunables.kgn_mbox_credits;
30         smsg_attr->msg_maxsize = GNILND_MAX_MSG_SIZE;
31         smsg_attr->msg_type = GNI_SMSG_TYPE_MBOX_AUTO_RETRANSMIT;
32 }
33
34 int
35 kgnilnd_map_fmablk(kgn_device_t *device, kgn_fma_memblock_t *fma_blk)
36 {
37         gni_return_t            rrc;
38         __u32                   flags = GNI_MEM_READWRITE;
39
40         if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
41                 flags |= GNI_MEM_PHYS_CONT;
42         }
43
44         /* make sure we are mapping a clean block */
45         LASSERTF(fma_blk->gnm_hndl.qword1 == 0UL, "fma_blk %p dirty\n", fma_blk);
46
47         rrc = kgnilnd_mem_register(device->gnd_handle, (__u64)fma_blk->gnm_block,
48                                    fma_blk->gnm_blk_size, device->gnd_rcv_fma_cqh,
49                                    flags, &fma_blk->gnm_hndl);
50         if (rrc != GNI_RC_SUCCESS) {
51                 /* XXX Nic: need a way to silence this for runtime stuff that is ok to fail
52                  * -- like when under MDD or GART pressure on big systems
53                  */
54                 CNETERR("register fmablk failed 0x%p mbox_size %d flags %u\n",
55                         fma_blk, fma_blk->gnm_mbox_size, flags);
56                 RETURN(-ENOMEM);
57         }
58
59         /* PHYS_CONT memory isn't really mapped, at least not in GART -
60          *  but all mappings chew up a MDD
61          */
62         if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
63                 atomic64_add(fma_blk->gnm_blk_size, &device->gnd_nbytes_map);
64         }
65
66         atomic_inc(&device->gnd_n_mdd);
67         /* nfmablk is live (mapped) blocks */
68         atomic_inc(&device->gnd_nfmablk);
69
70         RETURN(0);
71 }
72
73 int
74 kgnilnd_alloc_fmablk(kgn_device_t *device, int use_phys)
75 {
76         int                     rc = 0;
77         int                     num_mbox;
78         kgn_fma_memblock_t     *fma_blk;
79         gni_smsg_attr_t         smsg_attr;
80         unsigned long           fmablk_vers;
81
82         /* we'll use fmablk_vers and the gnd_fmablk_sem to gate access
83          * to this allocation code. Everyone will sample the version
84          * before and after getting the semaphore. If it has changed,
85          * we'll bail out to check the lists again - this indicates that
86          * some sort of change was made to the lists and it is possible
87          * that there is a mailbox for us to find now. This should prevent
88          * a ton of spinning in the case where there are lots of threads
89          * that need a yet-to-be-allocated mailbox for a connection. */
90
91         fmablk_vers = atomic_read(&device->gnd_fmablk_vers);
92         down(&device->gnd_fmablk_sem);
93
94         if (fmablk_vers != atomic_read(&device->gnd_fmablk_vers)) {
95                 /* version changed while we were waiting for semaphore,
96                  * we'll recheck the lists assuming something nice happened */
97                 up(&device->gnd_fmablk_sem);
98                 return 0;
99         }
100
101         LIBCFS_ALLOC(fma_blk, sizeof(kgn_fma_memblock_t));
102         if (fma_blk == NULL) {
103                 CNETERR("could not allocate fma block descriptor\n");
104                 rc = -ENOMEM;
105                 GOTO(out, rc);
106         }
107
108         INIT_LIST_HEAD(&fma_blk->gnm_bufflist);
109
110         kgnilnd_setup_smsg_attr(&smsg_attr);
111
112         gni_smsg_buff_size_needed(&smsg_attr, &fma_blk->gnm_mbox_size);
113
114         LASSERTF(fma_blk->gnm_mbox_size, "mbox size %d\n", fma_blk->gnm_mbox_size);
115
116         /* gni_smsg_buff_size_needed calculates the base mailbox size and since
117          * we want to hold kgn_peer_credits worth of messages in both directions,
118          * we add PAYLOAD to grow the mailbox size
119          */
120
121         fma_blk->gnm_mbox_size += GNILND_MBOX_PAYLOAD;
122
123         /* we'll only use physical during preallocate at startup -- this keeps it nice and
124          * clean for runtime decisions. We'll keep the PHYS ones around until shutdown
125          * as reallocating them is tough if there is memory fragmentation */
126
127         if (use_phys) {
128                 fma_blk->gnm_block = kmem_cache_alloc(kgnilnd_data.kgn_mbox_cache, GFP_ATOMIC);
129                 if (fma_blk->gnm_block == NULL) {
130                         CNETERR("could not allocate physical SMSG mailbox memory\n");
131                         rc = -ENOMEM;
132                         GOTO(free_desc, rc);
133                 }
134                 fma_blk->gnm_blk_size = KMALLOC_MAX_SIZE;
135                 num_mbox = fma_blk->gnm_blk_size / fma_blk->gnm_mbox_size;
136
137                 LASSERTF(num_mbox >= 1,
138                          "num_mbox %d blk_size %u mbox_size %d\n",
139                           num_mbox, fma_blk->gnm_blk_size, fma_blk->gnm_mbox_size);
140
141                 fma_blk->gnm_state = GNILND_FMABLK_PHYS;
142
143         } else {
144                 num_mbox = *kgnilnd_tunables.kgn_mbox_per_block;
145                 fma_blk->gnm_blk_size = num_mbox * fma_blk->gnm_mbox_size;
146
147                 LASSERTF(num_mbox >= 1 && num_mbox >= *kgnilnd_tunables.kgn_mbox_per_block,
148                          "num_mbox %d blk_size %u mbox_size %d tunable %d\n",
149                          num_mbox, fma_blk->gnm_blk_size, fma_blk->gnm_mbox_size,
150                          *kgnilnd_tunables.kgn_mbox_per_block);
151
152                 LIBCFS_ALLOC(fma_blk->gnm_block, fma_blk->gnm_blk_size);
153                 if (fma_blk->gnm_block == NULL) {
154                         CNETERR("could not allocate virtual SMSG mailbox memory, %d bytes\n", fma_blk->gnm_blk_size);
155                         rc = -ENOMEM;
156                         GOTO(free_desc, rc);
157                 }
158
159                 fma_blk->gnm_state = GNILND_FMABLK_VIRT;
160         }
161
162         /* allocate just enough space for the bits to track the mailboxes */
163         LIBCFS_ALLOC(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof(unsigned long));
164         if (fma_blk->gnm_bit_array == NULL) {
165                 CNETERR("could not allocate mailbox bitmask, %lu bytes for %d mbox\n",
166                        sizeof(unsigned long) * BITS_TO_LONGS(num_mbox), num_mbox);
167                 rc = -ENOMEM;
168                 GOTO(free_blk, rc);
169         }
170         bitmap_zero(fma_blk->gnm_bit_array, num_mbox);
171
172         /* now that the num_mbox is set based on allocation type, get debug info setup */
173         LIBCFS_ALLOC(fma_blk->gnm_mbox_info, sizeof(kgn_mbox_info_t) * num_mbox);
174         if (fma_blk->gnm_mbox_info == NULL) {
175                 CNETERR("could not allocate mailbox debug, %lu bytes for %d mbox\n",
176                        sizeof(kgn_mbox_info_t) * num_mbox, num_mbox);
177                 rc = -ENOMEM;
178                 GOTO(free_bit, rc);
179         }
180
181         rc = kgnilnd_map_fmablk(device, fma_blk);
182         if (rc) {
183                 GOTO(free_info, rc);
184         }
185
186         fma_blk->gnm_next_avail_mbox = 0;
187         fma_blk->gnm_avail_mboxs = fma_blk->gnm_num_mboxs = num_mbox;
188
189         CDEBUG(D_MALLOC, "alloc fmablk 0x%p num %d msg_maxsize %d credits %d "
190                 "mbox_size %d MDD "LPX64"."LPX64"\n",
191                 fma_blk, num_mbox, smsg_attr.msg_maxsize, smsg_attr.mbox_maxcredit,
192                 fma_blk->gnm_mbox_size, fma_blk->gnm_hndl.qword1,
193                 fma_blk->gnm_hndl.qword2);
194
195         /* lock Is protecting data structures, not semaphore */
196
197         spin_lock(&device->gnd_fmablk_lock);
198         list_add_tail(&fma_blk->gnm_bufflist, &device->gnd_fma_buffs);
199
200         /* toggle under the lock so once they change the list is also
201          * ready for others to traverse */
202         atomic_inc(&device->gnd_fmablk_vers);
203
204         spin_unlock(&device->gnd_fmablk_lock);
205
206         up(&device->gnd_fmablk_sem);
207
208         return 0;
209
210 free_info:
211         LIBCFS_FREE(fma_blk->gnm_mbox_info, sizeof(kgn_mbox_info_t)*num_mbox);
212 free_bit:
213         LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(num_mbox) * sizeof (unsigned long));
214 free_blk:
215         if (fma_blk->gnm_state == GNILND_FMABLK_VIRT) {
216                 LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
217         } else {
218                 kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
219         }
220 free_desc:
221         LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
222 out:
223         up(&device->gnd_fmablk_sem);
224         return rc;
225 }
226
227 void
228 kgnilnd_unmap_fmablk(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
229 {
230         gni_return_t            rrc;
231
232         /* if some held, set hold_timeout from conn timeouts used in this block
233          * but not during shutdown, then just nuke and pave */
234         if (fma_blk->gnm_held_mboxs && (!kgnilnd_data.kgn_shutdown)) {
235                 fma_blk->gnm_hold_timeout = GNILND_TIMEOUT2DEADMAN;
236         }
237
238         /* we are changing the state of a block, tickle version to tell
239          * proc code list is stale now */
240         atomic_inc(&dev->gnd_fmablk_vers);
241
242         rrc = kgnilnd_mem_deregister(dev->gnd_handle, &fma_blk->gnm_hndl, fma_blk->gnm_hold_timeout);
243
244         CDEBUG(rrc == GNI_RC_SUCCESS ? D_MALLOC : D_CONSOLE|D_NETERROR,
245                "unmap fmablk 0x%p@%s sz %u total %d avail %d held %d mbox_size %d "
246                 "hold_timeout %d\n",
247                fma_blk, kgnilnd_fmablk_state2str(fma_blk->gnm_state),
248                fma_blk->gnm_blk_size, fma_blk->gnm_num_mboxs,
249                fma_blk->gnm_avail_mboxs, fma_blk->gnm_held_mboxs,
250                fma_blk->gnm_mbox_size, fma_blk->gnm_hold_timeout);
251
252         LASSERTF(rrc == GNI_RC_SUCCESS,
253                 "tried to double unmap or something bad, fma_blk %p (rrc %d)\n",
254                 fma_blk, rrc);
255
256         if (fma_blk->gnm_hold_timeout) {
257                 atomic_inc(&dev->gnd_n_mdd_held);
258         } else {
259                 atomic_dec(&dev->gnd_n_mdd);
260         }
261
262         /* PHYS blocks don't get mapped */
263         if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
264                 atomic64_sub(fma_blk->gnm_blk_size, &dev->gnd_nbytes_map);
265                 fma_blk->gnm_state = GNILND_FMABLK_IDLE;
266         } else if (kgnilnd_data.kgn_in_reset) {
267                 /* in stack reset, clear MDD handle for PHYS blocks, as we'll
268                  * re-use the fma_blk after reset so we don't have to drop/allocate
269                  * all of those physical blocks */
270                 fma_blk->gnm_hndl.qword1 = fma_blk->gnm_hndl.qword2 = 0UL;
271         }
272
273         /* Decrement here as this is the # of mapped blocks */
274         atomic_dec(&dev->gnd_nfmablk);
275 }
276
277
278 /* needs lock on gnd_fmablk_lock to cover gnd_fma_buffs */
279 void
280 kgnilnd_free_fmablk_locked(kgn_device_t *dev, kgn_fma_memblock_t *fma_blk)
281 {
282         LASSERTF(fma_blk->gnm_avail_mboxs == fma_blk->gnm_num_mboxs,
283                  "fma_blk %p@%d free in bad state (%d): blk total %d avail %d held %d\n",
284                  fma_blk, fma_blk->gnm_state, fma_blk->gnm_hold_timeout, fma_blk->gnm_num_mboxs,
285                 fma_blk->gnm_avail_mboxs, fma_blk->gnm_held_mboxs);
286
287         atomic_inc(&dev->gnd_fmablk_vers);
288
289         if (fma_blk->gnm_hold_timeout) {
290                 CDEBUG(D_MALLOC, "mdd release fmablk 0x%p sz %u avail %d held %d "
291                         "mbox_size %d\n",
292                         fma_blk, fma_blk->gnm_blk_size, fma_blk->gnm_avail_mboxs,
293                         fma_blk->gnm_held_mboxs, fma_blk->gnm_mbox_size);
294
295                 /* We leave MDD dangling over stack reset */
296                 if (!kgnilnd_data.kgn_in_reset) {
297                         kgnilnd_mem_mdd_release(dev->gnd_handle, &fma_blk->gnm_hndl);
298                 }
299                 /* ignoring the return code - if kgni/ghal can't find it
300                  * it must be released already */
301                 atomic_dec(&dev->gnd_n_mdd_held);
302                 atomic_dec(&dev->gnd_n_mdd);
303         }
304
305         /* we cant' free the gnm_block until all the conns have released their
306          * purgatory holds. While we have purgatory holds, we might check the conn
307          * RX mailbox during the CLOSING process. It is possible that kgni might
308          * try to look into the RX side for credits when sending the CLOSE msg too */
309         CDEBUG(D_MALLOC, "fmablk %p free buffer %p mbox_size %d\n",
310                 fma_blk, fma_blk->gnm_block, fma_blk->gnm_mbox_size);
311
312         if (fma_blk->gnm_state == GNILND_FMABLK_PHYS) {
313                 kmem_cache_free(kgnilnd_data.kgn_mbox_cache, fma_blk->gnm_block);
314         } else {
315                 LIBCFS_FREE(fma_blk->gnm_block, fma_blk->gnm_blk_size);
316         }
317         fma_blk->gnm_state = GNILND_FMABLK_FREED;
318
319         list_del(&fma_blk->gnm_bufflist);
320
321         LIBCFS_FREE(fma_blk->gnm_mbox_info, sizeof(kgn_mbox_info_t)*fma_blk->gnm_num_mboxs);
322         LIBCFS_FREE(fma_blk->gnm_bit_array, BITS_TO_LONGS(fma_blk->gnm_num_mboxs) * sizeof (unsigned long));
323         LIBCFS_FREE(fma_blk, sizeof(kgn_fma_memblock_t));
324 }
325
326 void
327 kgnilnd_find_free_mbox(kgn_conn_t *conn)
328 {
329         kgn_device_t            *dev = conn->gnc_device;
330         gni_smsg_attr_t         *smsg_attr = &conn->gnpr_smsg_attr;
331         kgn_fma_memblock_t      *fma_blk;
332         kgn_mbox_info_t         *mbox = NULL;
333         int                     id;
334
335         spin_lock(&dev->gnd_fmablk_lock);
336
337         list_for_each_entry(fma_blk, &conn->gnc_device->gnd_fma_buffs,
338                             gnm_bufflist) {
339                 if (fma_blk->gnm_avail_mboxs <= 0 ||
340                     fma_blk->gnm_state <= GNILND_FMABLK_IDLE) {
341                         continue;
342                 }
343                 /* look in bitarray for available mailbox */
344                 do {
345                         id = find_next_zero_bit(
346                                 fma_blk->gnm_bit_array,
347                                 fma_blk->gnm_num_mboxs,
348                                 fma_blk->gnm_next_avail_mbox);
349                       if (id == fma_blk->gnm_num_mboxs &&
350                           fma_blk->gnm_next_avail_mbox != 0) {
351                                 /* wrap around */
352                                 fma_blk->gnm_next_avail_mbox = 0;
353                         } else {
354                                 break;
355                         }
356                 } while (1);
357
358                 LASSERTF(id < fma_blk->gnm_num_mboxs, "id %d max %d\n",
359                          id, fma_blk->gnm_num_mboxs);
360                 set_bit(id, (volatile unsigned long *)fma_blk->gnm_bit_array);
361                 conn->gnc_mbox_id = id;
362
363                 fma_blk->gnm_next_avail_mbox =
364                         (id == (fma_blk->gnm_num_mboxs - 1)) ? 0 : (id + 1);
365                 fma_blk->gnm_avail_mboxs--;
366                 conn->gnc_fma_blk = fma_blk;
367
368                 kgnilnd_setup_smsg_attr(smsg_attr);
369
370                 smsg_attr->msg_buffer = fma_blk->gnm_block;
371                 smsg_attr->mbox_offset = fma_blk->gnm_mbox_size * id;
372                 smsg_attr->mem_hndl = fma_blk->gnm_hndl;
373                 smsg_attr->buff_size = fma_blk->gnm_mbox_size;
374
375                 /* We'll set the hndl to zero for PHYS blocks unmapped during stack
376                  * reset and re-use the same fma_blk after stack reset. This ensures we've
377                  * properly mapped it before we use it */
378                 LASSERTF(fma_blk->gnm_hndl.qword1 != 0UL, "unmapped fma_blk %p, state %d\n",
379                          fma_blk, fma_blk->gnm_state);
380
381                 CDEBUG(D_NET, "conn %p smsg %p fmablk %p "
382                         "allocating SMSG mbox %d buf %p "
383                         "offset %u hndl "LPX64"."LPX64"\n",
384                         conn, smsg_attr, fma_blk, id,
385                         smsg_attr->msg_buffer, smsg_attr->mbox_offset,
386                         fma_blk->gnm_hndl.qword1,
387                         fma_blk->gnm_hndl.qword2);
388
389                 mbox = &fma_blk->gnm_mbox_info[id];
390                 mbox->mbx_create_conn_memset = jiffies;
391                 mbox->mbx_nallocs++;
392                 mbox->mbx_nallocs_total++;
393
394                 /* zero mbox to remove any old data from our last use.
395                  * this better be safe, if not our purgatory timers
396                  * are too short or a peer really is misbehaving */
397                 memset(smsg_attr->msg_buffer + smsg_attr->mbox_offset,
398                        0, smsg_attr->buff_size);
399                 break;
400         }
401
402         spin_unlock(&dev->gnd_fmablk_lock);
403 }
404
405 int
406 kgnilnd_setup_mbox(kgn_conn_t *conn)
407 {
408         gni_smsg_attr_t         *smsg_attr = &conn->gnpr_smsg_attr;
409         int                      err = 0;
410
411         smsg_attr->msg_buffer = NULL;
412         /* Look for available mbox */
413         do {
414                 kgnilnd_find_free_mbox(conn);
415
416                 /* nothing in the existing buffers, make a new one */
417                 if (smsg_attr->msg_buffer == NULL) {
418                         /* for runtime allocations, we only want vmalloc */
419                         err = kgnilnd_alloc_fmablk(conn->gnc_device, 0);
420                         if (err) {
421                                 break;
422                         }
423                 }
424         } while (smsg_attr->msg_buffer == NULL);
425
426         if (err)
427                 CNETERR("couldn't allocate SMSG mbox for conn %p Error: %d\n",
428                         conn, err);
429         return err;
430 }
431
432 void
433 kgnilnd_release_mbox(kgn_conn_t *conn, int purgatory_hold)
434 {
435         kgn_device_t           *dev = conn->gnc_device;
436         gni_smsg_attr_t        *smsg_attr = &conn->gnpr_smsg_attr;
437         kgn_fma_memblock_t     *fma_blk = NULL;
438         kgn_mbox_info_t        *mbox = NULL;
439         int                     found = 0;
440         int                     id;
441
442         /* if we failed to setup mbox and now destroying conn */
443         if (smsg_attr->msg_buffer == NULL) {
444                 return;
445         }
446
447         id = conn->gnc_mbox_id;
448
449         spin_lock(&dev->gnd_fmablk_lock);
450         /* make sure our conn points at a valid fma_blk
451          * We use this instead of a mem block search out of smsg_attr
452          * because we could have freed a block for fma_blk #1 but the fma_blk
453          * is still in the list for a purgatory hold. This would induce a false
454          * match if that same block gets reallocated to fma_blk #2 */
455         list_for_each_entry(fma_blk, &dev->gnd_fma_buffs, gnm_bufflist) {
456                 if (fma_blk == conn->gnc_fma_blk) {
457                         found = 1;
458                         break;
459                 }
460         }
461         LASSERTF(found, "unable to find conn 0x%p with gnc_fma_blk %p "
462                  "anywhere in the world\n", conn, conn->gnc_fma_blk);
463
464         LASSERTF(id < fma_blk->gnm_num_mboxs,
465                 "bad id %d max %d\n",
466                 id, fma_blk->gnm_num_mboxs);
467
468         /* < 0 - was held, now free it
469          * == 0 - just free it
470          * > 0 - hold it for now */
471         if (purgatory_hold == 0) {
472                 CDEBUG(D_NET, "conn %p smsg %p fmablk %p freeing SMSG mbox %d "
473                         "hndl "LPX64"."LPX64"\n",
474                         conn, smsg_attr, fma_blk, id,
475                         fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
476                 fma_blk->gnm_avail_mboxs++;
477
478         } else if (purgatory_hold > 0) {
479                 CDEBUG(D_NET, "conn %p smsg %p fmablk %p holding SMSG mbox %d "
480                         "hndl "LPX64"."LPX64"\n",
481                         conn, smsg_attr, fma_blk, id,
482                         fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
483
484                 fma_blk->gnm_held_mboxs++;
485                 fma_blk->gnm_max_timeout = MAX(fma_blk->gnm_max_timeout,
486                                                 conn->gnc_timeout);
487         } else {
488                 CDEBUG(D_NET, "conn %p smsg %p fmablk %p release SMSG mbox %d "
489                         "hndl "LPX64"."LPX64"\n",
490                         conn, smsg_attr, fma_blk, id,
491                         fma_blk->gnm_hndl.qword1, fma_blk->gnm_hndl.qword2);
492
493                 fma_blk->gnm_held_mboxs--;
494                 fma_blk->gnm_avail_mboxs++;
495         }
496
497         if (purgatory_hold <= 0) {
498                 /* if kgni is retransmitting, freeing the smsg block before the EP
499                  * is destroyed gets messy. Bug 768295. */
500                 LASSERTF(conn->gnc_ephandle == NULL,
501                          "can't release mbox before EP is nuked. conn 0x%p\n", conn);
502
503                 mbox = &fma_blk->gnm_mbox_info[id];
504                 mbox->mbx_release_from_purgatory = jiffies;
505
506                 /* clear conn gnc_fmablk if it is gone - this allows us to
507                  * not worry about state so much in kgnilnd_destroy_conn
508                  * and makes the guaranteed cleanup of the resources easier */
509                 LASSERTF(test_and_clear_bit(id, fma_blk->gnm_bit_array),
510                         "conn %p bit %d already cleared in fma_blk %p\n",
511                          conn, id, fma_blk);
512                 conn->gnc_fma_blk = NULL;
513                 mbox->mbx_nallocs--;
514         }
515
516         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_FMABLK_AVAIL)) {
517                 CERROR("LBUGs in your future: forcibly marking fma_blk %p "
518                        "as mapped\n", fma_blk);
519                 fma_blk->gnm_state = GNILND_FMABLK_VIRT;
520         }
521
522         /* we don't release or unmap PHYS blocks as part of the normal cycle --
523          * those are controlled manually from startup/shutdown */
524         if (fma_blk->gnm_state != GNILND_FMABLK_PHYS) {
525                 /* we can unmap once all are unused (held or avail)
526                  * but check hold_timeout to make sure we are not trying to double
527                  * unmap this buffer. If there was no hold_timeout set due to
528                  * held_mboxs, we'll free the mobx here shortly and won't have to
529                  * worry about catching a double free for a 'clean' fma_blk */
530                 if (((fma_blk->gnm_avail_mboxs + fma_blk->gnm_held_mboxs) == fma_blk->gnm_num_mboxs) &&
531                     (!fma_blk->gnm_hold_timeout)) {
532                         kgnilnd_unmap_fmablk(dev, fma_blk);
533                 }
534
535                 /* But we can only free once they are all avail */
536                 if (fma_blk->gnm_avail_mboxs == fma_blk->gnm_num_mboxs &&
537                     fma_blk->gnm_held_mboxs == 0) {
538                         /* all mailboxes are released, free fma_blk */
539                         kgnilnd_free_fmablk_locked(dev, fma_blk);
540                 }
541         }
542
543         spin_unlock(&dev->gnd_fmablk_lock);
544 }
545
546 int
547 kgnilnd_count_phys_mbox(kgn_device_t *device)
548 {
549         int                     i = 0;
550         kgn_fma_memblock_t     *fma_blk;
551
552         spin_lock(&device->gnd_fmablk_lock);
553
554         list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
555                 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
556                         i += fma_blk->gnm_num_mboxs;
557         }
558         spin_unlock(&device->gnd_fmablk_lock);
559
560         RETURN(i);
561 }
562
563 int
564 kgnilnd_allocate_phys_fmablk(kgn_device_t *device)
565 {
566         int     rc;
567
568         while (kgnilnd_count_phys_mbox(device) < *kgnilnd_tunables.kgn_nphys_mbox) {
569
570                 rc = kgnilnd_alloc_fmablk(device, 1);
571                 if (rc) {
572                         CERROR("failed phys mbox allocation, stopping at %d, rc %d\n",
573                                 kgnilnd_count_phys_mbox(device), rc);
574                         RETURN(rc);
575                 }
576         }
577         RETURN(0);
578 }
579
580 int
581 kgnilnd_map_phys_fmablk(kgn_device_t *device)
582 {
583
584         int                     rc = 0;
585         kgn_fma_memblock_t     *fma_blk;
586
587         /* use sem to gate access to single thread, just in case */
588         down(&device->gnd_fmablk_sem);
589
590         spin_lock(&device->gnd_fmablk_lock);
591
592         list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
593                 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
594                         rc = kgnilnd_map_fmablk(device, fma_blk);
595                         if (rc)
596                                 break;
597         }
598         spin_unlock(&device->gnd_fmablk_lock);
599
600         up(&device->gnd_fmablk_sem);
601
602         RETURN(rc);
603 }
604
605 void
606 kgnilnd_unmap_phys_fmablk(kgn_device_t *device)
607 {
608
609         kgn_fma_memblock_t      *fma_blk;
610
611         /* use sem to gate access to single thread, just in case */
612         down(&device->gnd_fmablk_sem);
613
614         spin_lock(&device->gnd_fmablk_lock);
615
616         list_for_each_entry(fma_blk, &device->gnd_fma_buffs, gnm_bufflist) {
617                 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
618                         kgnilnd_unmap_fmablk(device, fma_blk);
619         }
620         spin_unlock(&device->gnd_fmablk_lock);
621
622         up(&device->gnd_fmablk_sem);
623 }
624
625 void
626 kgnilnd_free_phys_fmablk(kgn_device_t *device)
627 {
628
629         kgn_fma_memblock_t      *fma_blk, *fma_blkN;
630
631         /* use sem to gate access to single thread, just in case */
632         down(&device->gnd_fmablk_sem);
633
634         spin_lock(&device->gnd_fmablk_lock);
635
636         list_for_each_entry_safe(fma_blk, fma_blkN, &device->gnd_fma_buffs, gnm_bufflist) {
637                 if (fma_blk->gnm_state == GNILND_FMABLK_PHYS)
638                         kgnilnd_free_fmablk_locked(device, fma_blk);
639         }
640         spin_unlock(&device->gnd_fmablk_lock);
641
642         up(&device->gnd_fmablk_sem);
643 }
644
645 /* kgnilnd dgram nid->struct managment */
646
647 static inline struct list_head *
648 kgnilnd_nid2dgramlist(kgn_device_t *dev, lnet_nid_t nid)
649 {
650         unsigned int hash = ((unsigned int)nid) % *kgnilnd_tunables.kgn_peer_hash_size;
651
652         RETURN(&dev->gnd_dgrams[hash]);
653 }
654
655
656 /* needs dev->gnd_dgram_lock held */
657 kgn_dgram_t *
658 kgnilnd_find_dgram_locked(kgn_device_t *dev, lnet_nid_t dst_nid)
659 {
660         struct list_head *dgram_list = kgnilnd_nid2dgramlist(dev, dst_nid);
661         kgn_dgram_t      *dgram;
662
663         list_for_each_entry(dgram, dgram_list, gndg_list) {
664
665                 /* if state > POSTED, we are already handling cancel/completion */
666                 if ((dgram->gndg_conn_out.gncr_dstnid != dst_nid) ||
667                      dgram->gndg_state > GNILND_DGRAM_POSTED)
668                         continue;
669
670                 CDEBUG(D_NET, "got dgram [%p] -> %s\n",
671                        dgram, libcfs_nid2str(dst_nid));
672                 return dgram;
673         }
674         return NULL;
675 }
676
677 int
678 kgnilnd_find_and_cancel_dgram(kgn_device_t *dev, lnet_nid_t dst_nid)
679 {
680         kgn_dgram_t     *dgram;
681
682         spin_lock(&dev->gnd_dgram_lock);
683         dgram = kgnilnd_find_dgram_locked(dev, dst_nid);
684
685         if (dgram) {
686                 kgnilnd_cancel_dgram_locked(dgram);
687         }
688         spin_unlock(&dev->gnd_dgram_lock);
689
690         RETURN(!!(dgram == NULL));
691 }
692
693 int
694 kgnilnd_pack_connreq(kgn_connreq_t *connreq, kgn_conn_t *conn,
695                      lnet_nid_t srcnid, lnet_nid_t dstnid,
696                      kgn_connreq_type_t type)
697 {
698         int err = 0;
699
700         /* ensure we haven't violated max datagram size */
701         CLASSERT(sizeof(kgn_connreq_t) <= GNI_DATAGRAM_MAXSIZE);
702
703         /* no need to zero out, we do that when allocating dgram */
704         connreq->gncr_magic     = GNILND_MSG_MAGIC;
705
706         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PACK_SRCNID)) {
707                 srcnid = 0xABADBABE;
708         } else if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PACK_DSTNID)) {
709                 dstnid = 0xDEFEC8ED;
710         }
711
712         connreq->gncr_srcnid    = srcnid;
713         connreq->gncr_dstnid    = dstnid;
714
715         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
716                 connreq->gncr_version = 99;
717         } else {
718                 connreq->gncr_version   = GNILND_CONNREQ_VERSION;
719         }
720         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
721                 connreq->gncr_type = 99;
722         } else {
723                 connreq->gncr_type      = type;
724         }
725         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
726                 connreq->gncr_peerstamp = 0;
727         } else {
728                 connreq->gncr_peerstamp = kgnilnd_data.kgn_peerstamp;
729         }
730         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
731                 connreq->gncr_connstamp = 0;
732         } else {
733                 connreq->gncr_connstamp = conn->gnc_my_connstamp;
734         }
735         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_PROTO)) {
736                 connreq->gncr_timeout = 0;
737         } else {
738                 connreq->gncr_timeout   = conn->gnc_timeout;
739         }
740
741         /* the rest pack the data into the payload in other places */
742         if (type == GNILND_CONNREQ_REQ) {
743                 kgn_gniparams_t       *req_params = &connreq->gncr_gnparams;
744                 req_params->gnpr_host_id = conn->gnc_device->gnd_host_id;
745                 req_params->gnpr_cqid = conn->gnc_cqid;
746
747                 /* allocate mailbox for this connection */
748                 err = kgnilnd_setup_mbox(conn);
749                 if (err != 0) {
750                         CERROR("Failed to setup FMA mailbox (%d)\n", err);
751                 }
752                 req_params->gnpr_smsg_attr = conn->gnpr_smsg_attr;
753         }
754
755         /* XXX Nic: TBD - checksum computation */
756
757         return err;
758 }
759
760 int
761 kgnilnd_unpack_connreq(kgn_dgram_t *dgram)
762 {
763         kgn_connreq_t           *connreq = &dgram->gndg_conn_in;
764         int                      swab, rc = 0;
765         kgn_net_t               *net;
766
767         /* the following fields must be handled in a backwards compatible
768          * manner to ensure we can always send and interpret NAKs */
769
770         if (connreq->gncr_magic != GNILND_MSG_MAGIC &&
771             connreq->gncr_magic != __swab32(GNILND_MSG_MAGIC)) {
772                 /* Unexpected magic! */
773                 CERROR("Unexpected magic %08x\n",
774                        connreq->gncr_magic);
775                 return -EBADF;
776         }
777
778         swab = (connreq->gncr_magic == __swab32(GNILND_MSG_MAGIC));
779         if (swab) {
780                 __swab32s(&connreq->gncr_magic);
781                 __swab32s(&connreq->gncr_cksum);
782                 __swab16s(&connreq->gncr_type);
783                 __swab16s(&connreq->gncr_version);
784                 __swab32s(&connreq->gncr_timeout);
785                 __swab64s(&connreq->gncr_srcnid);
786                 __swab64s(&connreq->gncr_dstnid);
787                 __swab64s(&connreq->gncr_peerstamp);
788                 __swab64s(&connreq->gncr_connstamp);
789         }
790
791         /* Do NOT return anything but -EBADF before we munge
792          * connreq->gncr_srcnid - we need that to send the nak */
793
794         if (dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
795                 lnet_nid_t      incoming = connreq->gncr_srcnid;
796
797                 /* even if the incoming packet is hosed, we know who we sent
798                  * the original and can set the srcnid so that we can properly
799                  * look up our peer to close the loop on this connreq. We still use
800                  * -EBADF to prevent a NAK - just in case there are issues with
801                  * the payload coming from a random spot, etc. */
802                 connreq->gncr_srcnid = dgram->gndg_conn_out.gncr_dstnid;
803
804                 if (LNET_NIDADDR(dgram->gndg_conn_out.gncr_dstnid) !=
805                                 LNET_NIDADDR(incoming)) {
806                         /* we got a datagram match for the wrong nid... */
807                         CERROR("matched datagram 0x%p with srcnid %s "
808                                 "(%x), expecting %s (%x)\n",
809                                 dgram,
810                                 libcfs_nid2str(incoming),
811                                 LNET_NIDADDR(incoming),
812                                 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
813                                 LNET_NIDADDR(dgram->gndg_conn_out.gncr_dstnid));
814                         return -EBADF;
815                 }
816         } else {
817                 /* if we have a wildcard datagram it should match an
818                  * incoming "active" datagram that should have a fully formed
819                  * srcnid and dstnid. If we couldn't unpack it, we drop as
820                  * corrupted packet, otherwise we'll just verify that the dstnid
821                  * matches the NID for the NET that the dgram was posted */
822
823                 /* make sure their wildcard didn't match ours, that is unpossible */
824                 LASSERTF(connreq->gncr_dstnid != LNET_NID_ANY,
825                          "dgram 0x%p from %s, connreq 0x%p; "
826                          "wildcard matched wildcard \n", dgram,
827                          libcfs_nid2str(connreq->gncr_srcnid), connreq);
828
829                 rc = kgnilnd_find_net(connreq->gncr_dstnid, &net);
830
831                 if (rc == -ESHUTDOWN) {
832                         CERROR("Looking up network: device is in shutdown");
833                         return rc;
834                 } else if (rc == -ENONET) {
835                         CERROR("Connection data from %s: she sent "
836                         "dst_nid %s, but net lookup failed on "
837                         "dgram 0x%p@%s\n",
838                         libcfs_nid2str(connreq->gncr_srcnid),
839                         libcfs_nid2str(connreq->gncr_dstnid),
840                         dgram, kgnilnd_dgram_type2str(dgram));
841                         return rc;
842                 }
843
844                 if (net->gnn_ni->ni_nid != connreq->gncr_dstnid) {
845                         CERROR("Bad connection data from %s: she sent "
846                                "dst_nid %s, but I am %s with dgram 0x%p@%s\n",
847                                libcfs_nid2str(connreq->gncr_srcnid),
848                                libcfs_nid2str(connreq->gncr_dstnid),
849                                libcfs_nid2str(net->gnn_ni->ni_nid),
850                                dgram, kgnilnd_dgram_type2str(dgram));
851                         kgnilnd_net_decref(net);
852                         return -EBADSLT;
853                 }
854
855                 /* kgnilnd_find_net takes a ref on the net it finds, You need to decref it when not needed. */
856                 kgnilnd_net_decref(net);
857         }
858
859         if (connreq->gncr_version != GNILND_CONNREQ_VERSION) {
860                 CERROR("Unexpected version %d\n", connreq->gncr_version);
861                 return -EPROTO;
862         }
863
864         /* XXX Nic: TBD - checksum validation */
865         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_CONNREQ_DROP)) {
866                 return -EBADF;
867         }
868
869         if (swab && connreq->gncr_type == GNILND_CONNREQ_REQ) {
870                 __u64 msg_addr = (__u64) connreq->gncr_gnparams.gnpr_smsg_attr.msg_buffer;
871
872                 __swab32s(&connreq->gncr_gnparams.gnpr_host_id);
873                 __swab32s(&connreq->gncr_gnparams.gnpr_cqid);
874                 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.buff_size);
875                 __swab16s(&connreq->gncr_gnparams.gnpr_smsg_attr.mbox_maxcredit);
876                 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.mbox_offset);
877                 __swab64s(&connreq->gncr_gnparams.gnpr_smsg_attr.mem_hndl.qword1);
878                 __swab64s(&connreq->gncr_gnparams.gnpr_smsg_attr.mem_hndl.qword2);
879                 __swab64s(&msg_addr);
880                 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.msg_maxsize);
881                 __swab32s(&connreq->gncr_gnparams.gnpr_smsg_attr.msg_type);
882         } else if (swab && connreq->gncr_type == GNILND_CONNREQ_NAK) {
883                 __swab32s(&connreq->gncr_nakdata.gnnd_errno);
884         }
885
886         /* since we use a unique instance ID for each network, the driver
887          * will take care of dropping datagrams if we don't have that network.
888          */
889
890         /* few more idiot software or configuration checks */
891
892         switch (connreq->gncr_type) {
893         case GNILND_CONNREQ_REQ:
894                 /* wire up EP and SMSG block - this will check the incoming data
895                  * and barf a NAK back if need to */
896                 rc = kgnilnd_set_conn_params(dgram);
897                 if (rc)
898                         return rc;
899                 break;
900         case GNILND_CONNREQ_NAK:
901         case GNILND_CONNREQ_CLOSE:
902                 break;
903         default:
904                 CERROR("unknown connreq packet type %d\n", connreq->gncr_type);
905                 return -EPROTO;
906         }
907
908         if (connreq->gncr_peerstamp == 0 || connreq->gncr_connstamp == 0) {
909                 CERROR("Recived bad timestamps peer "LPU64" conn "LPU64"\n",
910                 connreq->gncr_peerstamp, connreq->gncr_connstamp);
911                 return -EPROTO;
912         }
913
914         if (connreq->gncr_timeout < GNILND_MIN_TIMEOUT) {
915                 CERROR("Received timeout %d < MIN %d\n",
916                        connreq->gncr_timeout, GNILND_MIN_TIMEOUT);
917                 return -EPROTO;
918         }
919
920         return 0;
921 }
922
923 int
924 kgnilnd_alloc_dgram(kgn_dgram_t **dgramp, kgn_device_t *dev, kgn_dgram_type_t type)
925 {
926         kgn_dgram_t         *dgram;
927
928         dgram = kmem_cache_alloc(kgnilnd_data.kgn_dgram_cache, GFP_ATOMIC);
929         if (dgram == NULL)
930                 return -ENOMEM;
931
932         /* cache alloc'd memory is not zeroed */
933         memset((void *)dgram, 0, sizeof(*dgram)) ;
934
935         INIT_LIST_HEAD(&dgram->gndg_list);
936         dgram->gndg_state = GNILND_DGRAM_USED;
937         dgram->gndg_type = type;
938         dgram->gndg_magic = GNILND_DGRAM_MAGIC;
939
940         atomic_inc(&dev->gnd_ndgrams);
941
942         CDEBUG(D_MALLOC|D_NETTRACE, "slab-alloced 'dgram': %lu at %p.\n",
943                sizeof(*dgram), dgram);
944
945         *dgramp = dgram;
946         return 0;
947 }
948
949 /* call this on a dgram that came back from kgnilnd_ep_postdata_test_by_id
950  * returns < 0 on dgram to be cleaned up
951  * > 0 on dgram that isn't done yet
952  * == 0 on dgram that is ok and needs connreq processing */
953 int
954 kgnilnd_process_dgram(kgn_dgram_t *dgram, gni_post_state_t post_state)
955 {
956         int rc = 0;
957
958         switch (post_state) {
959         case GNI_POST_COMPLETED:
960                 /* normal state for dgrams that need actual processing */
961                 /* GOTO to avoid processing dgram as canceled/done */
962                 GOTO(process_out, rc);
963
964         case GNI_POST_PENDING:
965                 /* we should only see this if we are testing a WC dgram after a
966                  * cancel - it means that it needs a full cycle of waiting
967                  * for kgni_sm_task to finish moving it to TERMINATED */
968                 LASSERTF((dgram->gndg_type == GNILND_DGRAM_WC_REQ) &&
969                           (dgram->gndg_state == GNILND_DGRAM_CANCELED),
970                          "POST_PENDING dgram 0x%p with bad type %d(%s) or state %d(%s)\n",
971                          dgram, dgram->gndg_type, kgnilnd_dgram_type2str(dgram),
972                          dgram->gndg_state, kgnilnd_dgram_state2str(dgram));
973
974                 /* positive RC as this dgram isn't done yet */
975                 rc = EINPROGRESS;
976
977                 /* GOTO as this isn't done yet */
978                 GOTO(process_out, rc);
979                 break;
980
981         case GNI_POST_TERMINATED:
982                 /* we've called cancel and it is done or remote guy called cancel and
983                  * we've receved it on a WC dgram */
984 #if 0
985                 /* we are seeing weird terminations on non WC dgrams when we have not
986                  * canceled them */
987
988                 LASSERTF(dgram->gndg_state == GNILND_DGRAM_CANCELED ||
989                          dgram->gndg_conn_out.gncr_dstnid == LNET_NID_ANY,
990                         "dgram 0x%p with bad state %d(%s) or dst nid %s\n",
991                         dgram, dgram->gndg_state, kgnilnd_dgram_state2str(dgram),
992                         libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid));
993 #endif
994
995                 CDEBUG(D_NETTRACE, "dgram 0x%p saw %s, cleaning it up\n", dgram,
996                        dgram->gndg_state == GNILND_DGRAM_CANCELED ?  "canceled" : "terminated");
997
998                 rc =  -ECANCELED;
999                 break;
1000
1001         case GNI_POST_TIMEOUT:
1002                 /* we could have a timeout on a wildcard dgram too - if
1003                  * we got the incoming request but the remote node beefed
1004                  * before kgni could send the match data back. We'll just error
1005                  * on the active case and bail out gracefully */
1006                 if (dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
1007                         CNETERR("hardware timeout for connect to "
1008                                "%s after %lu seconds. Is node dead?\n",
1009                                libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
1010                                cfs_duration_sec(jiffies - dgram->gndg_post_time));
1011                 }
1012
1013                 rc = -ETIMEDOUT;
1014                 break;
1015
1016         default:
1017                 CERROR("dgram 0x%p with bad post_state %d\n", dgram, post_state);
1018                 LBUG();
1019         }
1020
1021         /* now finish cleaning up a dgram that is canceled/terminated and needs to
1022          * go away */
1023
1024         /* If this was actively canceled, drop the count now that we are processing */
1025         if (dgram->gndg_state == GNILND_DGRAM_CANCELED) {
1026                 atomic_dec(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1027                 /* caller responsible for gndg_list removal */
1028         }
1029
1030 process_out:
1031
1032         RETURN(rc);
1033 }
1034
1035 /* needs dev->gnd_dgram_lock held */
1036 void
1037 kgnilnd_cancel_dgram_locked(kgn_dgram_t *dgram)
1038 {
1039         gni_return_t            grc;
1040
1041         if (dgram->gndg_state != GNILND_DGRAM_POSTED) {
1042                 return;
1043         }
1044
1045         LASSERTF(dgram->gndg_conn != NULL,
1046                  "dgram 0x%p with NULL conn\n", dgram);
1047
1048         /* C.E - WC dgrams could be canceled immediately but
1049          * if there was some match pending, we need to call
1050          * test_by_id to clear it out. If that test returns
1051          * POST_PENDING, it is half done and needs to go along
1052          * with the rest of dgrams and go through a kgni_sm_task cycle
1053          * and deliver a GNI_POST_TERMINATED event before they
1054          * are actually canceled */
1055
1056         dgram->gndg_state = GNILND_DGRAM_CANCELED;
1057
1058         if (dgram->gndg_conn->gnc_state >= GNILND_CONN_ESTABLISHED) {
1059                 /* we don't need to cancel_by_id if the datagram was good */
1060                 return;
1061         }
1062
1063         /* let folks know there are outstanding cancels */
1064         atomic_inc(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1065         /* leave on nid list until cancel is done for debugging fun */
1066         grc = kgnilnd_ep_postdata_cancel_by_id(dgram->gndg_conn->gnc_ephandle, (__u64) dgram);
1067
1068         /* if we don't get success here, we have hosed up the dgram tracking
1069          * code and need to bail out */
1070         LASSERTF(grc == GNI_RC_SUCCESS,
1071                  "postdata_cancel returned %d for conn 0x%p to %s\n",
1072                  grc, dgram->gndg_conn,
1073                  dgram->gndg_conn->gnc_peer ?
1074                   libcfs_nid2str(dgram->gndg_conn->gnc_peer->gnp_nid)
1075                   : "<?>");
1076
1077         CDEBUG(D_NETTRACE,
1078                 "canceled dgram 0x%p conn 0x%p ephandle 0x%p\n",
1079                 dgram, dgram->gndg_conn,
1080                 dgram->gndg_conn->gnc_ephandle);
1081
1082         if (dgram->gndg_type == GNILND_DGRAM_WC_REQ) {
1083                 gni_post_state_t         post_state;
1084                 int                      rc = 0;
1085                 __u32                    remote_addr = 0, remote_id = 0;
1086
1087                 grc = kgnilnd_ep_postdata_test_by_id(dgram->gndg_conn->gnc_ephandle,
1088                                                      (__u64)dgram, &post_state,
1089                                                      &remote_addr, &remote_id);
1090
1091                 LASSERTF(grc == GNI_RC_NO_MATCH || grc == GNI_RC_SUCCESS,
1092                          "bad grc %d from test_by_id on dgram 0x%p\n",
1093                         grc, dgram);
1094
1095                 /* if WC was canceled immediately, we get NO_MATCH, if needs to go
1096                  * through full cycle, we get SUCCESS and need to parse post_state */
1097
1098                 CDEBUG(D_NET, "grc %d dgram 0x%p type %s post_state %d "
1099                         "remote_addr %u remote_id %u\n", grc, dgram,
1100                         kgnilnd_dgram_type2str(dgram),
1101                         post_state, remote_addr, remote_id);
1102
1103                 if (grc == GNI_RC_NO_MATCH) {
1104                         /* she's gone, reduce count and move along */
1105                         dgram->gndg_state = GNILND_DGRAM_DONE;
1106                         atomic_dec(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1107                         RETURN_EXIT;
1108                 }
1109
1110                 rc = kgnilnd_process_dgram(dgram, post_state);
1111
1112                 if (rc <= 0) {
1113                         /* if for some weird reason we get a valid dgram back, just mark as done
1114                          * so we can drop it and move along.
1115                          * C.E - if it was completed, we'll just release the conn/mbox
1116                          * back into the pool and it'll get reused. That said, we should only
1117                          * be canceling a WC dgram on stack rest or shutdown, so that is moot */
1118                         dgram->gndg_state = GNILND_DGRAM_DONE;
1119                         atomic_dec(&dgram->gndg_conn->gnc_device->gnd_canceled_dgrams);
1120
1121                         /* caller context responsible for calling kgnilnd_release_dgram() */
1122                 } else {
1123                         /* still pending, let it simmer until golden brown and delicious */
1124                 }
1125         }
1126
1127         /* for non WC dgrams, they are still on the nid list but marked canceled waiting
1128          * for kgni to return their ID to us via probe - that is when we'll complete their
1129          * cancel processing */
1130 }
1131
1132 void
1133 kgnilnd_cleanup_dgram(kgn_dgram_t *dgram)
1134 {
1135         /* release the dgram ref on conn */
1136         if (dgram->gndg_conn) {
1137                 kgnilnd_conn_decref(dgram->gndg_conn);
1138                 dgram->gndg_conn = NULL;
1139         }
1140 }
1141
1142 void
1143 kgnilnd_free_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
1144 {
1145         LASSERTF(dgram->gndg_state == GNILND_DGRAM_USED ||
1146                  dgram->gndg_state == GNILND_DGRAM_DONE,
1147                  "dgram 0x%p with bad state %s\n",
1148                  dgram, kgnilnd_dgram_state2str(dgram));
1149
1150         /* bit of poisoning to help detect bad driver data */
1151         dgram->gndg_magic = 0x6f5a6b5f;
1152         atomic_dec(&dev->gnd_ndgrams);
1153
1154         kmem_cache_free(kgnilnd_data.kgn_dgram_cache, dgram);
1155         CDEBUG(D_MALLOC|D_NETTRACE, "slab-freed 'dgram': %lu at %p.\n",
1156                sizeof(*dgram), dgram);
1157 }
1158
1159 int
1160 kgnilnd_post_dgram(kgn_device_t *dev, lnet_nid_t dstnid, kgn_connreq_type_t type,
1161                    int data_rc)
1162 {
1163         int              rc = 0;
1164         kgn_dgram_t     *dgram = NULL;
1165         kgn_dgram_t     *tmpdgram;
1166         kgn_dgram_type_t dgtype;
1167         gni_return_t     grc;
1168         __u64            srcnid;
1169         ENTRY;
1170
1171         switch (type) {
1172         case GNILND_CONNREQ_REQ:
1173                 if (dstnid == LNET_NID_ANY)
1174                         dgtype = GNILND_DGRAM_WC_REQ;
1175                 else
1176                         dgtype = GNILND_DGRAM_REQ;
1177                 break;
1178         case GNILND_CONNREQ_NAK:
1179                 LASSERTF(dstnid != LNET_NID_ANY, "can't NAK to LNET_NID_ANY\n");
1180                 dgtype = GNILND_DGRAM_NAK;
1181                 break;
1182         default:
1183                 CERROR("unknown connreq type %d\n", type);
1184                 LBUG();
1185         }
1186
1187         rc = kgnilnd_alloc_dgram(&dgram, dev, dgtype);
1188         if (rc < 0) {
1189                 rc = -ENOMEM;
1190                 GOTO(post_failed, rc);
1191         }
1192
1193         rc = kgnilnd_create_conn(&dgram->gndg_conn, dev);
1194         if (rc) {
1195                 GOTO(post_failed, rc);
1196         }
1197
1198         if (dgram->gndg_type == GNILND_DGRAM_WC_REQ) {
1199                 /* clear buffer for sanity on reuse of wildcard */
1200                 memset(&dgram->gndg_conn_in, 0, sizeof(kgn_connreq_t));
1201         }
1202
1203         if (dstnid == LNET_NID_ANY) {
1204                 /* set here to reset any dgram re-use */
1205                 dgram->gndg_conn->gnc_state = GNILND_CONN_LISTEN;
1206         } else {
1207                 __u32            host_id;
1208
1209                 rc = kgnilnd_nid_to_nicaddrs(LNET_NIDADDR(dstnid), 1, &host_id);
1210                 if (rc <= 0) {
1211                         rc = -ESRCH;
1212                         GOTO(post_failed, rc);
1213                 }
1214
1215                 dgram->gndg_conn->gnc_state = GNILND_CONN_CONNECTING;
1216
1217                 /* don't need to serialize, there are no CQs for the dgram
1218                  * EP on the kgn_net_t */
1219                 grc = kgnilnd_ep_bind(dgram->gndg_conn->gnc_ephandle, host_id, dev->gnd_id);
1220
1221                 if (grc != GNI_RC_SUCCESS) {
1222                         rc = -ECONNABORTED;
1223                         GOTO(post_failed, rc);
1224                 }
1225
1226         }
1227
1228         /* If we are posting wildcards post using a net of 0, otherwise we'll use the
1229          * net of the destination node.
1230          */
1231
1232         if (dstnid == LNET_NID_ANY) {
1233                 srcnid = LNET_MKNID(LNET_MKNET(GNILND, 0), dev->gnd_nid);
1234         } else {
1235                 srcnid = LNET_MKNID(LNET_NIDNET(dstnid), dev->gnd_nid);
1236         }
1237
1238         rc = kgnilnd_pack_connreq(&dgram->gndg_conn_out, dgram->gndg_conn,
1239                                   srcnid, dstnid, type);
1240         if (rc) {
1241                 GOTO(post_failed, rc);
1242         }
1243
1244         if (type == GNILND_CONNREQ_NAK)
1245                 dgram->gndg_conn_out.gncr_nakdata.gnnd_errno = data_rc;
1246
1247         dgram->gndg_post_time = jiffies;
1248
1249         /* XXX Nic: here is where we'd add in logical network multiplexing */
1250
1251         CDEBUG(D_NETTRACE, "dgram 0x%p type %s %s->%s cdm %d\n",
1252                dgram, kgnilnd_dgram_type2str(dgram),
1253                libcfs_nid2str(srcnid),
1254                libcfs_nid2str(dstnid), dev->gnd_id);
1255
1256         /* this allocates memory, can't hold locks across */
1257         grc = kgnilnd_ep_postdata_w_id(dgram->gndg_conn->gnc_ephandle,
1258                                    &dgram->gndg_conn_out, sizeof(kgn_connreq_t),
1259                                    &dgram->gndg_conn_in, sizeof(kgn_connreq_t),
1260                                    (__u64)dgram);
1261
1262         if (grc != GNI_RC_SUCCESS) {
1263                 CNETERR("dropping failed dgram post id 0x%p type %s"
1264                         " reqtype %s to %s: rc %d\n",
1265                         dgram, kgnilnd_dgram_type2str(dgram),
1266                         kgnilnd_connreq_type2str(&dgram->gndg_conn_out),
1267                         libcfs_nid2str(dstnid), grc);
1268                 rc = (grc == GNI_RC_ERROR_NOMEM) ? -ENOMEM : -EBADR;
1269                 GOTO(post_failed, rc);
1270         }
1271
1272         /* we don't need to add earlier - if someone does del_peer during post,
1273          * that peer will get marked as unlinked and the callers wil take care of it.
1274          * The dgram code is largely kgn_peer_t ignorant, so at worst, we'll just drop
1275          * the completed dgram later when we cant find a peer to stuff it into */
1276
1277         spin_lock(&dev->gnd_dgram_lock);
1278
1279         /* make sure we are not double posting targeted dgrams
1280          * - we can multiple post WC dgrams to help with processing speed */
1281         if (dstnid != LNET_NID_ANY) {
1282                 tmpdgram = kgnilnd_find_dgram_locked(dev, dstnid);
1283
1284                 LASSERTF(tmpdgram == NULL,
1285                         "dgram 0x%p->%s already posted\n",
1286                          dgram, libcfs_nid2str(dstnid));
1287         }
1288
1289         /* unmunge dstnid to help processing code cope... */
1290         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PACK_DSTNID)) {
1291                 dgram->gndg_conn_out.gncr_dstnid = dstnid;
1292         }
1293
1294         list_add_tail(&dgram->gndg_list, kgnilnd_nid2dgramlist(dev, dstnid));
1295         dgram->gndg_state = GNILND_DGRAM_POSTED;
1296         spin_unlock(&dev->gnd_dgram_lock);
1297
1298 post_failed:
1299         if (rc < 0 && dgram != NULL) {
1300                 kgnilnd_cleanup_dgram(dgram);
1301                 kgnilnd_free_dgram(dev, dgram);
1302         }
1303
1304         RETURN(rc);
1305 }
1306
1307 void
1308 kgnilnd_release_dgram(kgn_device_t *dev, kgn_dgram_t *dgram)
1309 {
1310         spin_lock(&dev->gnd_dgram_lock);
1311         kgnilnd_cancel_dgram_locked(dgram);
1312         spin_unlock(&dev->gnd_dgram_lock);
1313
1314         kgnilnd_cleanup_dgram(dgram);
1315
1316         /* if the dgram is 'canceled' it needs to be wait until the event
1317          * comes up from kgni that tells us it is safe to release */
1318         if (dgram->gndg_state != GNILND_DGRAM_CANCELED) {
1319                 dgram->gndg_state = GNILND_DGRAM_DONE;
1320
1321                 LASSERTF(list_empty(&dgram->gndg_list), "dgram 0x%p on list\n", dgram);
1322
1323                 /* if it is a wildcard and we are in an appropriate state, repost
1324                  * the wildcard */
1325
1326                 if ((dgram->gndg_type == GNILND_DGRAM_WC_REQ) &&
1327                     (!kgnilnd_data.kgn_wc_kill && !kgnilnd_data.kgn_in_reset)) {
1328                         int     rerc;
1329
1330                         rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
1331                         if (rerc != 0) {
1332                                 /* We failed to repost the WC dgram for some reason
1333                                  * mark it so the repost system attempts to repost */
1334                                 kgnilnd_admin_addref(dev->gnd_nwcdgrams);
1335                         }
1336                 }
1337
1338                 /* always free the old dgram */
1339                 kgnilnd_free_dgram(dev, dgram);
1340         }
1341 }
1342
1343
1344 int
1345 kgnilnd_probe_for_dgram(kgn_device_t *dev, kgn_dgram_t **dgramp)
1346 {
1347         kgn_dgram_t             *dgram = NULL;
1348         gni_post_state_t         post_state;
1349         gni_return_t             grc;
1350         int                      rc = 0;
1351         __u64                    readyid;
1352         __u32                    remote_addr = 0, remote_id = 0;
1353         ENTRY;
1354
1355         /* Probe with the lock held. That way if we get a dgram we dont have it canceled
1356          * between finding the ready dgram and grabbing the lock to remove it from the
1357          * list. Otherwise we could be left in an inconsistent state. We own the dgram
1358          * once its off the list so we don't need to worry about others changing it at
1359          * that point. */
1360         spin_lock(&dev->gnd_dgram_lock);
1361         grc = kgnilnd_postdata_probe_by_id(dev->gnd_handle, &readyid);
1362         if (grc != GNI_RC_SUCCESS) {
1363                 spin_unlock(&dev->gnd_dgram_lock);
1364                 /* return 0 to indicate nothing happened */
1365                 RETURN(0);
1366         }
1367
1368         CDEBUG(D_NET, "ready "LPX64" on device 0x%p\n",
1369                 readyid, dev);
1370
1371         dgram = (kgn_dgram_t *)readyid;
1372
1373         LASSERTF(dgram->gndg_magic == GNILND_DGRAM_MAGIC,
1374                  "dgram 0x%p from id "LPX64" with bad magic %x\n",
1375                  dgram, readyid, dgram->gndg_magic);
1376
1377         LASSERTF(dgram->gndg_state == GNILND_DGRAM_POSTED ||
1378                  dgram->gndg_state == GNILND_DGRAM_CANCELED,
1379                  "dgram 0x%p with bad state %s\n",
1380                  dgram, kgnilnd_dgram_state2str(dgram));
1381
1382         LASSERTF(!list_empty(&dgram->gndg_list),
1383                  "dgram 0x%p with bad list state %s\n",
1384                  dgram, kgnilnd_dgram_state2str(dgram));
1385
1386         /* now we know that the datagram structure is ok, so pull off list */
1387         list_del_init(&dgram->gndg_list);
1388
1389         /* while we have the gnn_dgram_lock and BEFORE we call test_by_id
1390          * change the state from POSTED to PROCESSING to ensure that
1391          * nobody cancels it after we've pulled it from the wire */
1392         if (dgram->gndg_state == GNILND_DGRAM_POSTED) {
1393                 dgram->gndg_state = GNILND_DGRAM_PROCESSING;
1394         }
1395
1396         spin_unlock(&dev->gnd_dgram_lock);
1397
1398         /* we now "own" this datagram */
1399
1400         LASSERTF(dgram->gndg_conn != NULL,
1401                 "dgram 0x%p with NULL conn\n", dgram);
1402
1403         grc = kgnilnd_ep_postdata_test_by_id(dgram->gndg_conn->gnc_ephandle,
1404                                              (__u64)dgram, &post_state,
1405                                              &remote_addr, &remote_id);
1406
1407         LASSERTF(grc != GNI_RC_NO_MATCH, "kgni lied! probe_by_id told us that"
1408                  " id "LPU64" was ready\n", readyid);
1409
1410         CDEBUG(D_NET, "grc %d dgram 0x%p type %s post_state %d "
1411                 "remote_addr %u remote_id %u\n", grc, dgram,
1412                 kgnilnd_dgram_type2str(dgram),
1413                 post_state, remote_addr, remote_id);
1414
1415         if (unlikely(grc != GNI_RC_SUCCESS)) {
1416                 CNETERR("getting data for dgram 0x%p->%s failed rc %d. Dropping it\n",
1417                         dgram, libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
1418                         grc);
1419                 rc = -EINVAL;
1420                 GOTO(probe_for_out, rc);
1421         }
1422
1423         rc = kgnilnd_process_dgram(dgram, post_state);
1424
1425         /* we should never get probe finding a dgram for us and then it
1426          * being a WC dgram that is still in the middle of processing */
1427         LASSERTF(rc <= 0, "bad rc %d from process_dgram 0x%p state %d\n",
1428                  rc, dgram, post_state);
1429
1430         if (rc == 0) {
1431                 /* dgram is good enough for the data to be used */
1432                 dgram->gndg_state = GNILND_DGRAM_PROCESSING;
1433                 /* fake rc to mark that we've done something */
1434                 rc = 1;
1435         } else {
1436                 /* bring out your dead! */
1437                 dgram->gndg_state = GNILND_DGRAM_DONE;
1438         }
1439
1440         *dgramp = dgram;
1441         RETURN(rc);
1442
1443 probe_for_out:
1444
1445         kgnilnd_release_dgram(dev, dgram);
1446         RETURN(rc);
1447 }
1448
1449 int
1450 kgnilnd_setup_wildcard_dgram(kgn_device_t *dev)
1451 {
1452         /* if kgn_wildcard is zero, return error */
1453         int     rc = -ENOENT, i;
1454         ENTRY;
1455
1456         for (i = 0; i < *kgnilnd_tunables.kgn_nwildcard; i++) {
1457                 rc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
1458                 if (rc < 0) {
1459                         CERROR("error %d: could not post wildcard datagram # %d\n",
1460                                 rc, i);
1461                         rc = -EINVAL;
1462                         GOTO(failed, rc);
1463                 }
1464         }
1465
1466 failed:
1467         RETURN(rc);
1468 }
1469
1470 int
1471 kgnilnd_cancel_net_dgrams(kgn_net_t *net)
1472 {
1473         kgn_dgram_t            *dg, *dgN;
1474         struct list_head        zombies;
1475         int                     i;
1476         ENTRY;
1477
1478         /* we want to cancel any outstanding dgrams - we don't want to rely
1479          * on del_peer_or_conn catching all of them. This helps protect us in cases
1480          * where we don't quite keep the peer->dgram mapping in sync due to some
1481          * race conditions */
1482
1483         LASSERTF(net->gnn_shutdown || kgnilnd_data.kgn_in_reset,
1484                  "called with LND invalid state: net shutdown %d "
1485                  "in reset %d\n", net->gnn_shutdown,
1486                  kgnilnd_data.kgn_in_reset);
1487
1488         INIT_LIST_HEAD(&zombies);
1489
1490         spin_lock(&net->gnn_dev->gnd_dgram_lock);
1491
1492         for (i = 0; i < *kgnilnd_tunables.kgn_peer_hash_size; i++) {
1493                 list_for_each_entry_safe(dg, dgN, &net->gnn_dev->gnd_dgrams[i], gndg_list) {
1494
1495                         /* skip nids not on our net or are wildcards */
1496
1497
1498                         if (dg->gndg_type == GNILND_DGRAM_WC_REQ ||
1499                                 net->gnn_netnum != LNET_NETNUM(LNET_NIDNET(dg->gndg_conn_out.gncr_dstnid)))
1500                                 continue;
1501
1502                         kgnilnd_cancel_dgram_locked(dg);
1503                 }
1504         }
1505
1506         spin_unlock(&net->gnn_dev->gnd_dgram_lock);
1507
1508         RETURN(0);
1509 }
1510
1511 int
1512 kgnilnd_cancel_wc_dgrams(kgn_device_t *dev)
1513 {
1514         kgn_dgram_t *dg, *dgN;
1515         struct list_head zombies;
1516         ENTRY;
1517
1518         /* Time to kill the outstanding WC's
1519          * WC's exist on net 0 only but match on any net...
1520          */
1521
1522         LASSERTF(kgnilnd_data.kgn_in_reset || kgnilnd_data.kgn_wc_kill,
1523                 "called with LND invalid state: WC shutdown %d "
1524                 "in reset %d\n", kgnilnd_data.kgn_wc_kill,
1525                 kgnilnd_data.kgn_in_reset);
1526
1527         INIT_LIST_HEAD(&zombies);
1528         spin_lock(&dev->gnd_dgram_lock);
1529
1530         do {
1531                 dg = kgnilnd_find_dgram_locked(dev, LNET_NID_ANY);
1532                 if (dg != NULL) {
1533                         LASSERTF(dg->gndg_type == GNILND_DGRAM_WC_REQ,
1534                                  "dgram 0x%p->%s with bad type %d (%s)\n",
1535                                 dg, libcfs_nid2str(dg->gndg_conn_out.gncr_dstnid),
1536                                 dg->gndg_type, kgnilnd_dgram_type2str(dg));
1537
1538                         kgnilnd_cancel_dgram_locked(dg);
1539
1540                         /* WC could be DONE already, check and if so add to list to be released */
1541                         if (dg->gndg_state == GNILND_DGRAM_DONE) {
1542                                 list_del_init(&dg->gndg_list);
1543                                 list_add_tail(&dg->gndg_list, &zombies);
1544                         }
1545                 }
1546         } while (dg != NULL);
1547
1548         spin_unlock(&dev->gnd_dgram_lock);
1549
1550         list_for_each_entry_safe(dg, dgN, &zombies, gndg_list) {
1551                 list_del_init(&dg->gndg_list);
1552                 kgnilnd_release_dgram(dev, dg);
1553         }
1554         RETURN(0);
1555
1556 }
1557
1558 void
1559 kgnilnd_wait_for_canceled_dgrams(kgn_device_t *dev)
1560 {
1561         int             i = 4;
1562         int             rc;
1563         gni_return_t    grc;
1564         __u64           readyid;
1565         kgn_dgram_t    *dgram;
1566
1567         /* use do while to get at least one check run to allow
1568          * regression test for 762072 to hit bug if there */
1569
1570         /* This function races with the dgram mover during shutdown so it is possible for
1571          * a dgram to be seen in kgnilnd_postdata_probe_wait_by_id but be handled in the
1572          * dgram mover thread instead of inside of this function.
1573          */
1574
1575         /* This should only be called from within shutdown, baseshutdown, or stack reset.
1576          * there are no assertions here to verify since base_shutdown has nothing in it we can check
1577          * the net is gone by then.
1578          */
1579
1580         do {
1581                 i++;
1582                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1583                         "Waiting for %d canceled datagrams to clear on device %d\n",
1584                         atomic_read(&dev->gnd_canceled_dgrams), dev->gnd_id);
1585
1586                 /* check once a second */
1587                 grc = kgnilnd_postdata_probe_wait_by_id(dev->gnd_handle,
1588                        250, &readyid);
1589
1590                 if (grc != GNI_RC_SUCCESS)
1591                         continue;
1592
1593                 CDEBUG(D_NET, "ready "LPX64" on device %d->0x%p\n",
1594                         readyid, dev->gnd_id, dev);
1595
1596                 rc = kgnilnd_probe_for_dgram(dev, &dgram);
1597                 if (rc != 0) {
1598                         /* if we got a valid dgram or one that is now done, clean up */
1599                         kgnilnd_release_dgram(dev, dgram);
1600                 }
1601         } while (atomic_read(&dev->gnd_canceled_dgrams));
1602 }
1603
1604 int
1605 kgnilnd_start_connect(kgn_peer_t *peer)
1606 {
1607         int              rc = 0;
1608         /* sync point for kgnilnd_del_peer_locked - do an early check to
1609          * catch the most common hits where del_peer is done by the
1610          * time we get here */
1611         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GNP_CONNECTING1)) {
1612                 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_GNP_CONNECTING1, 1)) {};
1613         }
1614
1615         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1616         if (!kgnilnd_peer_active(peer) || peer->gnp_connecting != GNILND_PEER_CONNECT) {
1617                 /* raced with peer getting unlinked */
1618                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1619                 rc = ESTALE;
1620                 GOTO(out, rc);
1621         }
1622         peer->gnp_connecting = GNILND_PEER_POSTING;
1623         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1624
1625         set_mb(peer->gnp_last_dgram_time, jiffies);
1626         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GNP_CONNECTING2)) {
1627                 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_GNP_CONNECTING2, 1)) {};
1628         }
1629
1630         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_GNP_CONNECTING3)) {
1631                 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_GNP_CONNECTING3, 1)) {};
1632                 rc = cfs_fail_val ? cfs_fail_val : -ENOMEM;
1633         } else {
1634                 rc = kgnilnd_post_dgram(peer->gnp_net->gnn_dev,
1635                                         peer->gnp_nid, GNILND_CONNREQ_REQ, 0);
1636         }
1637         if (rc < 0) {
1638                 set_mb(peer->gnp_last_dgram_errno, rc);
1639                 GOTO(failed, rc);
1640         }
1641
1642         /* while we're posting someone could have decided this peer/dgram needed to
1643          * die a quick death, so we check for state change and process accordingly */
1644
1645         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1646         if (!kgnilnd_peer_active(peer) || peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1647                 if (peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH) {
1648                         peer->gnp_connecting = GNILND_PEER_KILL;
1649                 }
1650                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1651                 /* positive RC to avoid dgram cleanup - we'll have to
1652                  * wait for the kgni GNI_POST_TERMINATED event to
1653                  * finish cleaning up */
1654                 rc = ESTALE;
1655                 kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev, peer->gnp_nid);
1656                 GOTO(out, rc);
1657         }
1658         peer->gnp_connecting = GNILND_PEER_POSTED;
1659         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1660         /* reaper thread will take care of any timeouts */
1661         CDEBUG(D_NET, "waiting for connect to finish to %s rc %d\n",
1662                libcfs_nid2str(peer->gnp_nid), rc);
1663
1664         RETURN(rc);
1665
1666 failed:
1667         CDEBUG(D_NET, "connect to %s failed: rc %d \n",
1668                libcfs_nid2str(peer->gnp_nid), rc);
1669 out:
1670         RETURN(rc);
1671 }
1672
1673 int
1674 kgnilnd_finish_connect(kgn_dgram_t *dgram)
1675 {
1676         kgn_conn_t        *conn = dgram->gndg_conn;
1677         lnet_nid_t         her_nid = dgram->gndg_conn_in.gncr_srcnid;
1678         kgn_peer_t        *new_peer, *peer = NULL;
1679         kgn_tx_t          *tx;
1680         kgn_tx_t          *txn;
1681         kgn_mbox_info_t   *mbox;
1682         int                rc;
1683         int                nstale;
1684
1685         /* try to find a peer that matches the nid we got in the connreq
1686          * kgnilnd_unpack_connreq makes sure that conn_in.gncr_srcnid is
1687          * HER and conn_out.gncr_srcnid is ME for both active and WC dgrams */
1688
1689         /* assume this is a new peer  - it makes locking cleaner when it isn't */
1690         /* no holding kgn_net_rw_sem - already are at the kgnilnd_dgram_mover level */
1691
1692         rc = kgnilnd_create_peer_safe(&new_peer, her_nid, NULL);
1693         if (rc != 0) {
1694                 CERROR("Can't create peer for %s\n", libcfs_nid2str(her_nid));
1695                 return rc;
1696         }
1697
1698         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1699
1700         /* this transfers ref from create_peer to the kgn_peer table */
1701         kgnilnd_add_peer_locked(her_nid, new_peer, &peer);
1702
1703         /* if we found an existing peer, is it really ready for a new conn ? */
1704         if (peer != new_peer) {
1705                 /* if this was an active connect attempt but we can't find a peer waiting for it
1706                  * we will dump in the trash */
1707
1708                 if (peer->gnp_connecting == GNILND_PEER_IDLE && dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
1709                         CDEBUG(D_NET, "dropping completed connreq for %s peer 0x%p->%s\n",
1710                                libcfs_nid2str(her_nid), peer, libcfs_nid2str(peer->gnp_nid));
1711                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1712                         rc = ECANCELED;
1713                         GOTO(out, rc);
1714                 }
1715
1716                 /* check to see if we can catch a connecting peer before it is
1717                  * removed from the connd_peers list - if not, we need to
1718                  * let the connreqs race and be handled by kgnilnd_conn_isdup_locked() */
1719                 if (peer->gnp_connecting != GNILND_PEER_IDLE) {
1720                         spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1721                         if (!list_empty(&peer->gnp_connd_list)) {
1722                                 list_del_init(&peer->gnp_connd_list);
1723                                 /* drop connd ref */
1724                                 kgnilnd_peer_decref(peer);
1725                         }
1726                         spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
1727                         /* clear rc to make sure we don't have fake error */
1728                         rc = 0;
1729                 }
1730
1731                 /* no matter what, we are no longer waiting to connect this peer now */
1732                 peer->gnp_connecting = GNILND_PEER_IDLE;
1733
1734                 /* Refuse to duplicate an existing connection (both sides might try to
1735                  * connect at once).  NB we return success!  We _are_ connected so we
1736                  * _don't_ have any blocked txs to complete with failure. */
1737                 rc = kgnilnd_conn_isdup_locked(peer, conn);
1738                 if (rc != 0) {
1739                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1740                         CDEBUG(D_NET, "Not creating duplicate connection to %s: %d\n",
1741                               libcfs_nid2str(her_nid), rc);
1742                         rc = EALREADY;
1743                         GOTO(out, rc);
1744                 }
1745         }
1746
1747         if (peer->gnp_down == GNILND_RCA_NODE_DOWN) {
1748                 CNETERR("Received connection request from %s that RCA thinks is"
1749                         " down.\n", libcfs_nid2str(her_nid));
1750                 peer->gnp_down = GNILND_RCA_NODE_UP;
1751         }
1752
1753         nstale = kgnilnd_close_stale_conns_locked(peer, conn);
1754
1755         /* either way with peer (new or existing), we are ok with ref counts here as the
1756          * kgnilnd_add_peer_locked will use our ref on new_peer (from create_peer_safe) as the
1757          * ref for the peer table. */
1758
1759         /* at this point, the connection request is a winner */
1760
1761         /* mark 'DONE' to avoid cancel being called from release */
1762         dgram->gndg_state = GNILND_DGRAM_DONE;
1763
1764         /* initialise timestamps before reaper looks at them */
1765         conn->gnc_last_rx = conn->gnc_last_rx_cq = jiffies;
1766
1767         /* last_tx is initialized to jiffies - (keepalive*2) so that if the NOOP fails it will
1768          * immediatly send a NOOP in the reaper thread during the call to
1769          * kgnilnd_check_conn_timeouts_locked
1770          */
1771         conn->gnc_last_tx = jiffies - (cfs_time_seconds(GNILND_TO2KA(conn->gnc_timeout)) * 2);
1772         conn->gnc_state = GNILND_CONN_ESTABLISHED;
1773
1774         /* save the dgram type used to establish this connection */
1775         conn->gnc_dgram_type = dgram->gndg_type;
1776
1777         /* refs are not transferred from dgram to tables, so increment to
1778          * take ownership */
1779         kgnilnd_conn_addref(conn);
1780         kgnilnd_peer_addref(peer);
1781         conn->gnc_peer = peer;
1782         list_add_tail(&conn->gnc_list, &peer->gnp_conns);
1783
1784         kgnilnd_conn_addref(conn);               /* +1 ref for conn table */
1785         list_add_tail(&conn->gnc_hashlist,
1786                       kgnilnd_cqid2connlist(conn->gnc_cqid));
1787         kgnilnd_data.kgn_conn_version++;
1788
1789         /* Dont send NOOP if fail_loc is set
1790          */
1791         if (!CFS_FAIL_CHECK(CFS_FAIL_GNI_ONLY_NOOP)) {
1792                 tx = kgnilnd_new_tx_msg(GNILND_MSG_NOOP, peer->gnp_net->gnn_ni->ni_nid);
1793                 if (tx == NULL) {
1794                         CNETERR("can't get TX to initiate NOOP to %s\n",
1795                                 libcfs_nid2str(peer->gnp_nid));
1796                 } else {
1797                         kgnilnd_queue_tx(conn, tx);
1798                 }
1799         }
1800
1801         /* Schedule all packets blocking for a connection */
1802         list_for_each_entry_safe(tx, txn, &peer->gnp_tx_queue, tx_list) {
1803                 /* lock held here is the peer_conn lock */
1804                 kgnilnd_tx_del_state_locked(tx, peer, NULL, GNILND_TX_ALLOCD);
1805                 kgnilnd_queue_tx(conn, tx);
1806         }
1807
1808         /* If this is an active connection lets mark its timestamp on the MBoX */
1809         if (dgram->gndg_conn_out.gncr_dstnid != LNET_NID_ANY) {
1810                 mbox = &conn->gnc_fma_blk->gnm_mbox_info[conn->gnc_mbox_id];
1811                 /* conn->gnc_last_rx is jiffies it better exist as it was just set */
1812                 mbox->mbx_release_purg_active_dgram = conn->gnc_last_rx;
1813         }
1814
1815         /* Bug 765042: wake up scheduler for a race with finish_connect and
1816          * complete_conn_closed with a conn in purgatory
1817          * since we can't use CFS_RACE due to mutex_holds in kgnilnd_process_conns,
1818          * we just check for set and then clear */
1819         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_FINISH_PURG)) {
1820                 cfs_fail_loc = 0x0;
1821                 /* get scheduler thread moving again */
1822                 kgnilnd_schedule_device(conn->gnc_device);
1823         }
1824
1825         CDEBUG(D_NET, "New conn 0x%p->%s dev %d\n",
1826                conn, libcfs_nid2str(her_nid), conn->gnc_device->gnd_id);
1827
1828         /* make sure we reset peer reconnect interval now that we have a good conn */
1829         kgnilnd_peer_alive(peer);
1830         peer->gnp_reconnect_interval = 0;
1831
1832         /* clear the unlink attribute if we dont clear it kgnilnd_del_conn_or_peer will wait
1833          * on the atomic forever
1834          */
1835         if (peer->gnp_pending_unlink) {
1836                 peer->gnp_pending_unlink = 0;
1837                 kgnilnd_admin_decref(kgnilnd_data.kgn_npending_unlink);
1838                 CDEBUG(D_NET, "Clearing peer unlink %p\n",peer);
1839         }
1840
1841         /* add ref to make it hang around until after we drop the lock */
1842         kgnilnd_conn_addref(conn);
1843
1844         /* Once the peer_conn lock is dropped, the conn could actually move into
1845          * CLOSING->CLOSED->DONE in the scheduler thread, so hold the
1846          * lock until we are really done */
1847         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1848
1849         /* Notify LNET that we now have a working connection to this peer.
1850          * This is a Cray extension to the "standard" LND behavior. */
1851         lnet_notify(peer->gnp_net->gnn_ni, peer->gnp_nid,
1852                      1, cfs_time_current());
1853
1854         /* drop our 'hold' ref */
1855         kgnilnd_conn_decref(conn);
1856
1857 out:
1858         RETURN(rc);
1859 }
1860
1861 void
1862 kgnilnd_send_nak(kgn_device_t *dev, lnet_nid_t dst_nid, int error)
1863 {
1864         int              rc = 0;
1865         ENTRY;
1866
1867         LASSERTF(dst_nid != LNET_NID_ANY, "bad dst_nid %s\n", libcfs_nid2str(dst_nid));
1868
1869         CDEBUG(D_NET, "NAK to %s errno %d\n", libcfs_nid2str(dst_nid), error);
1870
1871         rc = kgnilnd_post_dgram(dev, dst_nid, GNILND_CONNREQ_NAK, error);
1872
1873         if (rc < 0) {
1874                 CDEBUG(D_NET, "NAK to %s failed: rc %d \n", libcfs_nid2str(dst_nid), rc);
1875         }
1876         EXIT;
1877 }
1878
1879 int
1880 kgnilnd_process_nak(kgn_dgram_t *dgram)
1881 {
1882         kgn_connreq_t     *connreq = &dgram->gndg_conn_in;
1883         lnet_nid_t         src_nid = connreq->gncr_srcnid;
1884         int                errno = connreq->gncr_nakdata.gnnd_errno;
1885         kgn_peer_t        *peer;
1886         int                rc = 0;
1887
1888         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
1889
1890         peer = kgnilnd_find_peer_locked(src_nid);
1891         if (peer == NULL) {
1892                 /* we likely dropped him from bad data when we processed
1893                  * the original REQ */
1894                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1895                 return -EBADSLT;
1896         }
1897
1898         /* need to check peerstamp/connstamp against the ones we find
1899          * to make sure we don't close new (and good?) conns that we
1900          * formed after this connreq failed */
1901         if (peer->gnp_connecting == GNILND_PEER_IDLE) {
1902                 kgn_conn_t        conn;
1903
1904                 if (list_empty(&peer->gnp_conns)) {
1905                         /* assume already procced datagram and it barfed up
1906                          * on this side too */
1907                         CDEBUG(D_NET, "dropping NAK from %s; "
1908                                "peer %s is already not connected\n",
1909                                 libcfs_nid2str(connreq->gncr_srcnid),
1910                                 libcfs_nid2str(connreq->gncr_dstnid));
1911                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1912                         return 0;
1913                 }
1914
1915                 /* stub up a connection with the connreq XXX_stamps to allow
1916                  * use to use close_stale_conns_locked */
1917                 conn.gnc_peerstamp = connreq->gncr_peerstamp;
1918                 conn.gnc_my_connstamp = connreq->gncr_connstamp;
1919                 conn.gnc_peer_connstamp = connreq->gncr_connstamp;
1920                 conn.gnc_device = peer->gnp_net->gnn_dev;
1921
1922                 rc = kgnilnd_close_stale_conns_locked(peer, &conn);
1923
1924                 LCONSOLE_INFO("Received NAK from %s for %s errno %d; "
1925                         "closed %d connections\n",
1926                         libcfs_nid2str(connreq->gncr_srcnid),
1927                         libcfs_nid2str(connreq->gncr_dstnid), errno, rc);
1928         } else {
1929                 rc = 0;
1930                 spin_lock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
1931
1932                 if (list_empty(&peer->gnp_connd_list)) {
1933                         /* if peer isn't on waiting list, try to find one to nuke */
1934                         rc = kgnilnd_find_and_cancel_dgram(peer->gnp_net->gnn_dev,
1935                                                            peer->gnp_nid);
1936
1937                         if (rc) {
1938                                 LCONSOLE_INFO("Received NAK from %s for %s errno %d; "
1939                                         "canceled pending connect request\n",
1940                                         libcfs_nid2str(connreq->gncr_srcnid),
1941                                         libcfs_nid2str(connreq->gncr_dstnid), errno);
1942                         }
1943
1944                         /* if we can't find a waiting dgram, we just drop the nak - the conn
1945                          * connect must have failed (didn't find conn above and clear connecting
1946                          * -- so nothing to do besides drop */
1947                 } else {
1948                         /* peer is on list, meaning it is a new connect attempt from the one
1949                          * we started that generated the NAK - so just drop NAK */
1950
1951                         /* use negative to prevent error message */
1952                         rc = -EAGAIN;
1953                 }
1954                 spin_unlock(&dgram->gndg_conn->gnc_device->gnd_connd_lock);
1955         }
1956
1957         /* success! we found a peer and at least marked pending_nak */
1958         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
1959
1960         return 0;
1961 }
1962
1963 int
1964 kgnilnd_process_connreq(kgn_dgram_t *dgram, int *needs_nak)
1965 {
1966         int                      rc;
1967
1968         rc = kgnilnd_unpack_connreq(dgram);
1969         if (rc < 0) {
1970                 if (rc != -EBADF) {
1971                         /* only NAK if we have good srcnid to use */
1972                         *needs_nak = 1;
1973                 }
1974                 goto connreq_out;
1975         }
1976
1977         switch (dgram->gndg_conn_in.gncr_type) {
1978         case GNILND_CONNREQ_REQ:
1979                 /* wire up peer & conn, send queued TX */
1980                 rc = kgnilnd_finish_connect(dgram);
1981
1982                 /* don't nak when the nid is hosed */
1983                 if ((rc < 0)) {
1984                         *needs_nak = 1;
1985                 }
1986
1987                 break;
1988         case GNILND_CONNREQ_NAK:
1989                 rc = kgnilnd_process_nak(dgram);
1990                 /* return early to prevent reconnect bump */
1991                 return rc;
1992         default:
1993                 CERROR("unexpected connreq type %s (%d) from %s\n",
1994                         kgnilnd_connreq_type2str(&dgram->gndg_conn_in),
1995                         dgram->gndg_conn_in.gncr_type,
1996                         libcfs_nid2str(dgram->gndg_conn_in.gncr_srcnid));
1997                 rc = -EINVAL;
1998                 *needs_nak = 1;
1999                 break;
2000         }
2001
2002 connreq_out:
2003         RETURN(rc);
2004 }
2005
2006 int
2007 kgnilnd_probe_and_process_dgram(kgn_device_t *dev)
2008 {
2009         int                      rc;
2010         int                      needs_nak = 0;
2011         lnet_nid_t               nak_dstnid = LNET_NID_ANY;
2012         lnet_nid_t               orig_dstnid;
2013         kgn_dgram_t             *dgram = NULL;
2014         kgn_peer_t              *peer;
2015         ENTRY;
2016
2017         if (CFS_FAIL_CHECK(CFS_FAIL_GNI_PAUSE_DGRAM_COMP)) {
2018                 rc = 0;
2019         } else {
2020                 rc = kgnilnd_probe_for_dgram(dev, &dgram);
2021         }
2022
2023         if (rc == 0) {
2024                 RETURN(0);
2025         } else if (rc < 0) {
2026                 GOTO(inform_peer, rc);
2027         } else {
2028                 /* rc > 1 means it did something, reset for this func  */
2029                 rc = 0;
2030         }
2031
2032         switch (dgram->gndg_type) {
2033         case GNILND_DGRAM_WC_REQ:
2034         case GNILND_DGRAM_REQ:
2035                 rc = kgnilnd_process_connreq(dgram, &needs_nak);
2036                 break;
2037         case GNILND_DGRAM_NAK:
2038                 CDEBUG(D_NETTRACE, "NAK to %s done\n",
2039                         libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid));
2040                 break;
2041         default:
2042                 CERROR("unknown datagram type %s (%d)\n",
2043                        kgnilnd_dgram_type2str(dgram), dgram->gndg_type);
2044                 break;
2045         }
2046
2047         /* stash data to use after releasing current datagram */
2048         /* don't stash net - we are operating on a net already,
2049          * so the lock on rw_net_lock is sufficient */
2050
2051         nak_dstnid = dgram->gndg_conn_in.gncr_srcnid;
2052
2053 inform_peer:
2054         LASSERTF(dgram != NULL, "dgram 0x%p rc %d needs_nak %d\n", dgram, rc, needs_nak);
2055
2056         orig_dstnid = dgram->gndg_conn_out.gncr_dstnid;
2057
2058         kgnilnd_release_dgram(dev, dgram);
2059
2060         CDEBUG(D_NET, "cleaning up dgram to %s, rc %d\n",
2061                libcfs_nid2str(orig_dstnid), rc);
2062
2063         /* if this was a WC_REQ that matched an existing peer, it'll get marked done
2064          * in kgnilnd_finish_connect - if errors are from before we get to there,
2065          * we just drop as it is a WC_REQ - the peer CAN'T be waiting for it */
2066         if ((orig_dstnid != LNET_NID_ANY) && (rc < 0)) {
2067                 /* if we have a negative rc, we want to find a peer to inform about
2068                  * the bad connection attempt. Sorry buddy, better luck next time! */
2069
2070                 write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2071                 peer = kgnilnd_find_peer_locked(orig_dstnid);
2072
2073                 if (peer != NULL) {
2074                         /* add ref to make sure he stays around past the possible unlink
2075                          * so we can tell LNet about him */
2076                         kgnilnd_peer_addref(peer);
2077
2078                         /* if he still cares about the outstanding connect */
2079                         if (peer->gnp_connecting >= GNILND_PEER_CONNECT) {
2080                                 /* check if he is on the connd list and remove.. */
2081                                 spin_lock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2082                                 if (!list_empty(&peer->gnp_connd_list)) {
2083                                         list_del_init(&peer->gnp_connd_list);
2084                                         /* drop connd ref */
2085                                         kgnilnd_peer_decref(peer);
2086                                 }
2087                                 spin_unlock(&peer->gnp_net->gnn_dev->gnd_connd_lock);
2088
2089                                 /* clear gnp_connecting so we don't have a non-connecting peer
2090                                  * on gnd_connd_list */
2091                                 peer->gnp_connecting = GNILND_PEER_IDLE;
2092
2093                                 set_mb(peer->gnp_last_dgram_errno, rc);
2094
2095                                 kgnilnd_peer_increase_reconnect_locked(peer);
2096                         }
2097                 }
2098                 write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2099
2100                 /* now that we are outside the lock, tell Mommy */
2101                 if (peer != NULL) {
2102                         kgnilnd_peer_notify(peer, rc);
2103                         kgnilnd_peer_decref(peer);
2104                 }
2105         }
2106
2107         if (needs_nak) {
2108                 kgnilnd_send_nak(dev, nak_dstnid, rc);
2109         }
2110
2111         RETURN(1);
2112 }
2113
2114 void
2115 kgnilnd_reaper_dgram_check(kgn_device_t *dev)
2116 {
2117         kgn_dgram_t    *dgram, *tmp;
2118         int             i;
2119
2120         spin_lock(&dev->gnd_dgram_lock);
2121
2122         for (i = 0; i < (*kgnilnd_tunables.kgn_peer_hash_size - 1); i++) {
2123                 list_for_each_entry_safe(dgram, tmp, &dev->gnd_dgrams[i], gndg_list) {
2124                         unsigned long            now = jiffies;
2125                         unsigned long            timeout;
2126
2127                         /* don't timeout stuff if the network is mucked or shutting down */
2128                         if (kgnilnd_check_hw_quiesce()) {
2129                                 break;
2130                         }
2131
2132                         if ((dgram->gndg_state != GNILND_DGRAM_POSTED) ||
2133                             (dgram->gndg_type == GNILND_DGRAM_WC_REQ)) {
2134                                 continue;
2135                         }
2136                         CDEBUG(D_NETTRACE, "checking dgram 0x%p type %s "
2137                                 "state %s conn 0x%p to %s age %lus\n",
2138                                 dgram, kgnilnd_dgram_type2str(dgram),
2139                                 kgnilnd_dgram_state2str(dgram), dgram->gndg_conn,
2140                                 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
2141                                 cfs_duration_sec(now - dgram->gndg_post_time));
2142
2143                         timeout = cfs_time_seconds(*kgnilnd_tunables.kgn_timeout);
2144
2145                         if (time_before(now, (dgram->gndg_post_time + timeout)))
2146                                 continue;
2147
2148                         CNETERR("%s datagram to %s timed out @ %lus dgram "
2149                                 "0x%p state %s conn 0x%p\n",
2150                                 kgnilnd_dgram_type2str(dgram),
2151                                 libcfs_nid2str(dgram->gndg_conn_out.gncr_dstnid),
2152                                 cfs_duration_sec(now - dgram->gndg_post_time),
2153                                 dgram, kgnilnd_dgram_state2str(dgram),
2154                                 dgram->gndg_conn);
2155
2156                         kgnilnd_cancel_dgram_locked(dgram);
2157                 }
2158         }
2159         spin_unlock(&dev->gnd_dgram_lock);
2160 }
2161
2162
2163 /* use a thread for the possibly long-blocking wait_by_id to prevent
2164  * stalling the global workqueues */
2165 int
2166 kgnilnd_dgram_waitq(void *arg)
2167 {
2168         kgn_device_t     *dev = (kgn_device_t *) arg;
2169         gni_return_t      grc;
2170         __u64             readyid;
2171         DEFINE_WAIT(mover_done);
2172
2173         cfs_block_allsigs();
2174
2175         /* all gnilnd threads need to run fairly urgently */
2176         set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2177
2178         /* we dont shut down until the device shuts down ... */
2179         while (!kgnilnd_data.kgn_shutdown) {
2180                 /* to quiesce or to not quiesce, that is the question */
2181                 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2182                         KGNILND_SPIN_QUIESCE;
2183                 }
2184
2185                 while (CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_PAUSE_DGRAM_COMP, 1)) {}
2186
2187                 /* check once a second */
2188                 grc = kgnilnd_postdata_probe_wait_by_id(dev->gnd_handle,
2189                                                        1000, &readyid);
2190
2191                 if (grc == GNI_RC_SUCCESS) {
2192                         CDEBUG(D_INFO, "waking up dgram mover thread\n");
2193                         kgnilnd_schedule_dgram(dev);
2194
2195                         /* wait for dgram thread to ping us before spinning again */
2196                         prepare_to_wait(&dev->gnd_dgping_waitq, &mover_done,
2197                                         TASK_INTERRUPTIBLE);
2198
2199                         /* don't sleep if we need to quiesce */
2200                         if (likely(!kgnilnd_data.kgn_quiesce_trigger)) {
2201                                 schedule();
2202                         }
2203                         finish_wait(&dev->gnd_dgping_waitq, &mover_done);
2204                 }
2205         }
2206
2207         kgnilnd_thread_fini();
2208         return 0;
2209 }
2210
2211 int
2212 kgnilnd_start_outbound_dgrams(kgn_device_t *dev, unsigned long deadline)
2213 {
2214         int                      did_something = 0, rc;
2215         kgn_peer_t              *peer = NULL;
2216
2217         spin_lock(&dev->gnd_connd_lock);
2218
2219         /* Active connect - we added this in kgnilnd_launch_tx */
2220         while (!list_empty(&dev->gnd_connd_peers) && time_before(jiffies, deadline)) {
2221                 peer = list_first_entry(&dev->gnd_connd_peers,
2222                                         kgn_peer_t, gnp_connd_list);
2223
2224                 /* ref for connd removed in if/else below */
2225                list_del_init(&peer->gnp_connd_list);
2226
2227                 /* gnp_connecting and membership on gnd_connd_peers should be
2228                  * done coherently to avoid double adding, etc */
2229                 /* don't need kgnilnd_data.kgn_peer_conn_lock here as that is only needed
2230                  * to get the peer to gnp_connecting in the first place. We just need to
2231                  * rely on gnd_connd_lock to serialize someone pulling him from the list
2232                  * BEFORE clearing gnp_connecting */
2233                 LASSERTF(peer->gnp_connecting != GNILND_PEER_IDLE, "peer 0x%p->%s not connecting\n",
2234                          peer, libcfs_nid2str(peer->gnp_nid));
2235
2236                 spin_unlock(&dev->gnd_connd_lock);
2237
2238                 CDEBUG(D_NET, "processing connect to %s\n",
2239                        libcfs_nid2str(peer->gnp_nid));
2240
2241                 did_something += 1;
2242                 rc = kgnilnd_start_connect(peer);
2243
2244                 if (likely(rc >= 0)) {
2245                         /* 0 on success, positive on 'just drop peer' errors */
2246                         kgnilnd_peer_decref(peer);
2247                 } else if (rc == -ENOMEM) {
2248                         /* if we are out of wildcards, add back to
2249                          * connd_list - then break out and we'll try later
2250                          * if other errors, we'll bail & cancel pending tx */
2251                         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2252                         if (peer->gnp_connecting == GNILND_PEER_POSTING) {
2253                                 peer->gnp_connecting = GNILND_PEER_CONNECT;
2254                                 spin_lock(&dev->gnd_connd_lock);
2255                                 list_add_tail(&peer->gnp_connd_list,
2256                                               &dev->gnd_connd_peers);
2257                         } else {
2258                                 /* connecting changed while we were posting */
2259
2260                                 LASSERTF(peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH, "Peer is in invalid"
2261                                         " state 0x%p->%s, connecting %d\n",
2262                                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
2263                                 peer->gnp_connecting = GNILND_PEER_KILL;
2264                                 spin_lock(&dev->gnd_connd_lock);
2265                                 /* remove the peer ref frrom the cond list */
2266                                 kgnilnd_peer_decref(peer);
2267                                 /* let the system handle itself */
2268                         }
2269                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2270                         /* the datagrams are a global pool,
2271                          * so break out of trying and hope some free
2272                          * up soon */
2273                         did_something -= 1;
2274                         break;
2275                 } else {
2276                         /* something bad happened, you lose */
2277                         CNETERR("could not start connecting to %s "
2278                                 "rc %d: Will retry until TX timeout\n",
2279                                libcfs_nid2str(peer->gnp_nid), rc);
2280                         /* It didnt post so just set connecting back to zero now.
2281                          * The reaper will reattempt the connection if it needs too.
2282                          * If the peer needs death set it so the reaper will cleanup.
2283                          */
2284                         write_lock(&kgnilnd_data.kgn_peer_conn_lock);
2285                         if (peer->gnp_connecting == GNILND_PEER_POSTING) {
2286                                 peer->gnp_connecting = GNILND_PEER_IDLE;
2287                                 kgnilnd_peer_increase_reconnect_locked(peer);
2288                         } else {
2289                                 LASSERTF(peer->gnp_connecting == GNILND_PEER_NEEDS_DEATH, "Peer is in invalid"
2290                                         " state 0x%p->%s, connecting %d\n",
2291                                         peer, libcfs_nid2str(peer->gnp_nid), peer->gnp_connecting);
2292                                 peer->gnp_connecting = GNILND_PEER_KILL;
2293                         }
2294                         write_unlock(&kgnilnd_data.kgn_peer_conn_lock);
2295
2296                         /* hold onto ref until we are really done - if it was
2297                          * unlinked this could result in a destroy */
2298                         kgnilnd_peer_decref(peer);
2299                 }
2300                 spin_lock(&dev->gnd_connd_lock);
2301         }
2302
2303         spin_unlock(&dev->gnd_connd_lock);
2304         RETURN(did_something);
2305 }
2306
2307 int
2308 kgnilnd_repost_wc_dgrams(kgn_device_t *dev)
2309 {
2310         int did_something = 0, to_repost, i;
2311         to_repost = atomic_read(&dev->gnd_nwcdgrams);
2312         ENTRY;
2313
2314         for (i = 0; i < to_repost; ++i) {
2315                 int     rerc;
2316                 rerc = kgnilnd_post_dgram(dev, LNET_NID_ANY, GNILND_CONNREQ_REQ, 0);
2317                 if (rerc == 0) {
2318                         kgnilnd_admin_decref(dev->gnd_nwcdgrams);
2319                         did_something += 1;
2320                 } else {
2321                         CDEBUG(D_NETERROR, "error %d: dev %d could not post wildcard datagram\n",
2322                                 rerc, dev->gnd_id);
2323                         break;
2324                 }
2325         }
2326
2327         RETURN(did_something);
2328 }
2329
2330 static void
2331 kgnilnd_dgram_poke_with_stick(unsigned long arg)
2332 {
2333         int             dev_id = arg;
2334         kgn_device_t    *dev = &kgnilnd_data.kgn_devices[dev_id];
2335
2336         wake_up(&dev->gnd_dgram_waitq);
2337 }
2338
2339 /* use single thread for dgrams - should be sufficient for performance */
2340 int
2341 kgnilnd_dgram_mover(void *arg)
2342 {
2343         kgn_device_t            *dev = (kgn_device_t *)arg;
2344         int                      rc, did_something;
2345         unsigned long            next_purge_check = jiffies - 1;
2346         unsigned long            timeout;
2347         struct timer_list        timer;
2348         unsigned long            deadline = 0;
2349         DEFINE_WAIT(wait);
2350
2351         cfs_block_allsigs();
2352         /* all gnilnd threads need to run fairly urgently */
2353         set_user_nice(current, *kgnilnd_tunables.kgn_nice);
2354
2355         /* we are ok not locking for these variables as the dgram waitq threads
2356          * will block both due to tying up net (kgn_shutdown) and the completion
2357          * event for the dgram_waitq (kgn_quiesce_trigger) */
2358         deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
2359         while (!kgnilnd_data.kgn_shutdown) {
2360                 /* Safe: kgn_shutdown only set when quiescent */
2361
2362                 /* race with stack reset - we want to hold off seeing any new incoming dgrams
2363                  * so we can force a dirty WC dgram for Bug 762072 - put right before
2364                  * quiesce check so that it'll go right into that and not do any
2365                  * dgram mucking */
2366                 CFS_RACE(CFS_FAIL_GNI_WC_DGRAM_FREE);
2367
2368                 /* to quiesce or to not quiesce, that is the question */
2369                 if (unlikely(kgnilnd_data.kgn_quiesce_trigger)) {
2370                         KGNILND_SPIN_QUIESCE;
2371                 }
2372                 did_something = 0;
2373
2374                 CFS_RACE(CFS_FAIL_GNI_QUIESCE_RACE);
2375
2376                 /* process any newly completed dgrams */
2377                 down_read(&kgnilnd_data.kgn_net_rw_sem);
2378
2379                 rc = kgnilnd_probe_and_process_dgram(dev);
2380                 if (rc > 0) {
2381                         did_something += rc;
2382                 }
2383
2384                 up_read(&kgnilnd_data.kgn_net_rw_sem);
2385
2386                 CFS_FAIL_TIMEOUT(CFS_FAIL_GNI_DGRAM_DEADLINE,
2387                         (*kgnilnd_tunables.kgn_dgram_timeout + 1));
2388                 /* start new outbound dgrams */
2389                 did_something += kgnilnd_start_outbound_dgrams(dev, deadline);
2390
2391                 /* find dead dgrams */
2392                 if (time_after_eq(jiffies, next_purge_check)) {
2393                         /* these don't need to be checked that often */
2394                         kgnilnd_reaper_dgram_check(dev);
2395
2396                         next_purge_check = (long) jiffies +
2397                                       cfs_time_seconds(kgnilnd_data.kgn_new_min_timeout / 4);
2398                 }
2399
2400                 did_something += kgnilnd_repost_wc_dgrams(dev);
2401
2402                 /* careful with the jiffy wrap... */
2403                 timeout = (long)(next_purge_check - jiffies);
2404
2405                 CDEBUG(D_INFO, "did %d timeout %lu next %lu jiffies %lu\n",
2406                        did_something, timeout, next_purge_check, jiffies);
2407
2408                 if ((did_something || timeout <= 0) && time_before(jiffies, deadline)) {
2409                         did_something = 0;
2410                         continue;
2411                 }
2412
2413                 prepare_to_wait(&dev->gnd_dgram_waitq, &wait, TASK_INTERRUPTIBLE);
2414
2415                 setup_timer(&timer, kgnilnd_dgram_poke_with_stick, dev->gnd_id);
2416                 mod_timer(&timer, (long) jiffies + timeout);
2417
2418                 /* last second chance for others to poke us */
2419                 did_something += xchg(&dev->gnd_dgram_ready, GNILND_DGRAM_IDLE);
2420
2421                 /* check flag variables before comittingi even if we did something;
2422                  * if we are after the deadline call schedule */
2423                 if ((!did_something || time_after(jiffies, deadline)) &&
2424                     !kgnilnd_data.kgn_shutdown &&
2425                     !kgnilnd_data.kgn_quiesce_trigger) {
2426                         CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n",
2427                                timeout, cfs_duration_sec(timeout));
2428                         wake_up_all(&dev->gnd_dgping_waitq);
2429                         schedule();
2430                         CDEBUG(D_INFO, "awake after schedule\n");
2431                         deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout);
2432                 }
2433
2434                 del_singleshot_timer_sync(&timer);
2435                 finish_wait(&dev->gnd_dgram_waitq, &wait);
2436         }
2437
2438         kgnilnd_thread_fini();
2439         return 0;
2440 }
2441