Whamcloud - gitweb
a0d3530e66fab08bc63dee9c6692926cd09f99ff
[fs/lustre-release.git] / lnet / klnds / gmlnd / gmlnd_comm.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (c) 2003 Los Alamos National Laboratory (LANL)
5  *
6  *   This file is part of Lustre, http://www.lustre.org/
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 /*
23  *      This file contains all gmnal send and receive functions
24  */
25
26 #include "gmnal.h"
27
28 /*
29  *      The caretaker thread
30  *      This is main thread of execution for the NAL side
31  *      This guy waits in gm_blocking_recvive and gets
32  *      woken up when the myrinet adaptor gets an interrupt.
33  *      Hands off receive operations to the receive thread 
34  *      This thread Looks after gm_callbacks etc inline.
35  */
36 int
37 gmnal_ct_thread(void *arg)
38 {
39         gmnal_data_t            *nal_data;
40         gm_recv_event_t         *rxevent = NULL;
41         gm_recv_t               *recv = NULL;
42
43         if (!arg) {
44                 CDEBUG(D_TRACE, "NO nal_data. Exiting\n");
45                 return(-1);
46         }
47
48         nal_data = (gmnal_data_t*)arg;
49         CDEBUG(D_TRACE, "nal_data is [%p]\n", arg);
50
51         daemonize();
52
53         nal_data->ctthread_flag = GMNAL_CTTHREAD_STARTED;
54
55         GMNAL_GM_LOCK(nal_data);
56         while(nal_data->ctthread_flag == GMNAL_CTTHREAD_STARTED) {
57                 CDEBUG(D_NET, "waiting\n");
58                 rxevent = gm_blocking_receive_no_spin(nal_data->gm_port);
59                 if (nal_data->ctthread_flag == GMNAL_THREAD_STOP) {
60                         CDEBUG(D_INFO, "time to exit\n");
61                         break;
62                 }
63                 CDEBUG(D_INFO, "got [%s]\n", gmnal_rxevent(rxevent));
64                 switch (GM_RECV_EVENT_TYPE(rxevent)) {
65
66                         case(GM_RECV_EVENT):
67                                 CDEBUG(D_NET, "CTTHREAD:: GM_RECV_EVENT\n");
68                                 recv = (gm_recv_t*)&rxevent->recv;
69                                 GMNAL_GM_UNLOCK(nal_data);
70                                 gmnal_add_rxtwe(nal_data, recv);
71                                 GMNAL_GM_LOCK(nal_data);
72                                 CDEBUG(D_NET, "CTTHREAD:: Added event to Q\n");
73                         break;
74                         case(_GM_SLEEP_EVENT):
75                                 /*
76                                  *      Blocking receive above just returns
77                                  *      immediatly with _GM_SLEEP_EVENT
78                                  *      Don't know what this is
79                                  */
80                                 CDEBUG(D_NET, "Sleeping in gm_unknown\n");
81                                 GMNAL_GM_UNLOCK(nal_data);
82                                 gm_unknown(nal_data->gm_port, rxevent);
83                                 GMNAL_GM_LOCK(nal_data);
84                                 CDEBUG(D_INFO, "Awake from gm_unknown\n");
85                                 break;
86                                 
87                         default:
88                                 /*
89                                  *      Don't know what this is
90                                  *      gm_unknown will make sense of it
91                                  *      Should be able to do something with
92                                  *      FAST_RECV_EVENTS here.
93                                  */
94                                 CDEBUG(D_NET, "Passing event to gm_unknown\n");
95                                 GMNAL_GM_UNLOCK(nal_data);
96                                 gm_unknown(nal_data->gm_port, rxevent);
97                                 GMNAL_GM_LOCK(nal_data);
98                                 CDEBUG(D_INFO, "Processed unknown event\n");
99                 }
100         }
101         GMNAL_GM_UNLOCK(nal_data);
102         nal_data->ctthread_flag = GMNAL_THREAD_RESET;
103         CDEBUG(D_INFO, "thread nal_data [%p] is exiting\n", nal_data);
104         return(GMNAL_STATUS_OK);
105 }
106
107
108 /*
109  *      process a receive event
110  */
111 int gmnal_rx_thread(void *arg)
112 {
113         gmnal_data_t            *nal_data;
114         void                    *buffer;
115         gmnal_rxtwe_t           *we = NULL;
116
117         if (!arg) {
118                 CDEBUG(D_TRACE, "NO nal_data. Exiting\n");
119                 return(-1);
120         }
121
122         nal_data = (gmnal_data_t*)arg;
123         CDEBUG(D_TRACE, "nal_data is [%p]\n", arg);
124
125         daemonize();
126         /*
127          *      set 1 bit for each thread started
128          *      doesn't matter which bit
129          */
130         spin_lock(&nal_data->rxthread_flag_lock);
131         if (nal_data->rxthread_flag)
132                 nal_data->rxthread_flag=nal_data->rxthread_flag*2 + 1;
133         else
134                 nal_data->rxthread_flag = 1;
135         CDEBUG(D_INFO, "rxthread flag is [%ld]\n", nal_data->rxthread_flag);
136         spin_unlock(&nal_data->rxthread_flag_lock);
137
138         while(nal_data->rxthread_stop_flag != GMNAL_THREAD_STOP) {
139                 CDEBUG(D_NET, "RXTHREAD:: Receive thread waiting\n");
140                 we = gmnal_get_rxtwe(nal_data);
141                 if (!we) {
142                         CDEBUG(D_INFO, "Receive thread time to exit\n");
143                         break;
144                 }
145
146                 buffer = we->buffer;
147                 switch(((gmnal_msghdr_t*)buffer)->type) {
148                 case(GMNAL_SMALL_MESSAGE):
149                         gmnal_pre_receive(nal_data, we, 
150                                            GMNAL_SMALL_MESSAGE);
151                 break;  
152                 case(GMNAL_LARGE_MESSAGE_INIT):
153                         gmnal_pre_receive(nal_data, we, 
154                                            GMNAL_LARGE_MESSAGE_INIT);
155                 break;  
156                 case(GMNAL_LARGE_MESSAGE_ACK):
157                         gmnal_pre_receive(nal_data, we, 
158                                            GMNAL_LARGE_MESSAGE_ACK);
159                 break;  
160                 default:
161                         CDEBUG(D_ERROR, "Unsupported message type\n");
162                         gmnal_rx_bad(nal_data, we, NULL);
163                 }
164                 PORTAL_FREE(we, sizeof(gmnal_rxtwe_t));
165         }
166
167         spin_lock(&nal_data->rxthread_flag_lock);
168         nal_data->rxthread_flag/=2;
169         CDEBUG(D_INFO, "rxthread flag is [%ld]\n", nal_data->rxthread_flag);
170         spin_unlock(&nal_data->rxthread_flag_lock);
171         CDEBUG(D_INFO, "thread nal_data [%p] is exiting\n", nal_data);
172         return(GMNAL_STATUS_OK);
173 }
174
175
176
177 /*
178  *      Start processing a small message receive
179  *      Get here from gmnal_receive_thread
180  *      Hand off to lib_parse, which calls cb_recv
181  *      which hands back to gmnal_small_receive
182  *      Deal with all endian stuff here.
183  */
184 int
185 gmnal_pre_receive(gmnal_data_t *nal_data, gmnal_rxtwe_t *we, int gmnal_type)
186 {
187         gmnal_srxd_t    *srxd = NULL;
188         void            *buffer = NULL;
189         unsigned int snode, sport, type, length;
190         gmnal_msghdr_t  *gmnal_msghdr;
191         ptl_hdr_t       *portals_hdr;
192
193         CDEBUG(D_INFO, "nal_data [%p], we[%p] type [%d]\n", 
194                nal_data, we, gmnal_type);
195
196         buffer = we->buffer;
197         snode = we->snode;
198         sport = we->sport;
199         type = we->type;
200         buffer = we->buffer;
201         length = we->length;
202
203         gmnal_msghdr = (gmnal_msghdr_t*)buffer;
204         portals_hdr = (ptl_hdr_t*)(buffer+GMNAL_MSGHDR_SIZE);
205
206         CDEBUG(D_INFO, "rx_event:: Sender node [%d], Sender Port [%d], 
207                type [%d], length [%d], buffer [%p]\n",
208                snode, sport, type, length, buffer);
209         CDEBUG(D_INFO, "gmnal_msghdr:: Sender node [%u], magic [%d], 
210                gmnal_type [%d]\n", gmnal_msghdr->sender_node_id, 
211                gmnal_msghdr->magic, gmnal_msghdr->type);
212         CDEBUG(D_INFO, "portals_hdr:: Sender node ["LPD64"], 
213                dest_node ["LPD64"]\n", portals_hdr->src_nid, 
214                portals_hdr->dest_nid);
215
216         
217         /*
218          *      Get a receive descriptor for this message
219          */
220         srxd = gmnal_rxbuffer_to_srxd(nal_data, buffer);
221         CDEBUG(D_INFO, "Back from gmnal_rxbuffer_to_srxd\n");
222         srxd->nal_data = nal_data;
223         if (!srxd) {
224                 CDEBUG(D_ERROR, "Failed to get receive descriptor\n");
225                 lib_parse(nal_data->nal_cb, portals_hdr, srxd);
226                 return(GMNAL_STATUS_FAIL);
227         }
228
229         /*
230          *      no need to bother portals library with this
231          */
232         if (gmnal_type == GMNAL_LARGE_MESSAGE_ACK) {
233                 gmnal_large_tx_ack_received(nal_data, srxd);
234                 return(GMNAL_STATUS_OK);
235         }
236
237         srxd->type = gmnal_type;
238         srxd->nsiov = gmnal_msghdr->niov;
239         srxd->gm_source_node = gmnal_msghdr->sender_node_id;
240         
241         CDEBUG(D_PORTALS, "Calling lib_parse buffer is [%p]\n", 
242                buffer+GMNAL_MSGHDR_SIZE);
243         /*
244          *      control passes to lib, which calls cb_recv 
245          *      cb_recv is responsible for returning the buffer 
246          *      for future receive
247          */
248         lib_parse(nal_data->nal_cb, portals_hdr, srxd);
249
250         return(GMNAL_STATUS_OK);
251 }
252
253
254
255 /*
256  *      After a receive has been processed, 
257  *      hang out the receive buffer again.
258  *      This implicitly returns a receive token.
259  */
260 int
261 gmnal_rx_requeue_buffer(gmnal_data_t *nal_data, gmnal_srxd_t *srxd)
262 {
263         CDEBUG(D_TRACE, "gmnal_rx_requeue_buffer\n");
264
265         CDEBUG(D_NET, "requeueing srxd[%p] nal_data[%p]\n", srxd, nal_data);
266
267         GMNAL_GM_LOCK(nal_data);
268         gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer,
269                                         srxd->gmsize, GM_LOW_PRIORITY, 0 );
270         GMNAL_GM_UNLOCK(nal_data);
271
272         return(GMNAL_STATUS_OK);
273 }
274
275
276 /*
277  *      Handle a bad message
278  *      A bad message is one we don't expect or can't interpret
279  */
280 int
281 gmnal_rx_bad(gmnal_data_t *nal_data, gmnal_rxtwe_t *we, gmnal_srxd_t *srxd)
282 {
283         CDEBUG(D_TRACE, "Can't handle message\n");
284
285         if (!srxd)
286                 srxd = gmnal_rxbuffer_to_srxd(nal_data, 
287                                                we->buffer);
288         if (srxd) {
289                 gmnal_rx_requeue_buffer(nal_data, srxd);
290         } else {
291                 CDEBUG(D_ERROR, "Can't find a descriptor for this buffer\n");
292                 /*
293                  *      get rid of it ?
294                  */
295                 return(GMNAL_STATUS_FAIL);
296         }
297
298         return(GMNAL_STATUS_OK);
299 }
300
301
302
303 /*
304  *      Process a small message receive.
305  *      Get here from gmnal_receive_thread, gmnal_pre_receive
306  *      lib_parse, cb_recv
307  *      Put data from prewired receive buffer into users buffer(s)
308  *      Hang out the receive buffer again for another receive
309  *      Call lib_finalize
310  */
311 int
312 gmnal_small_rx(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie, 
313                 unsigned int niov, struct iovec *iov, size_t mlen, size_t rlen)
314 {
315         gmnal_srxd_t    *srxd = NULL;
316         void    *buffer = NULL;
317         gmnal_data_t    *nal_data = (gmnal_data_t*)nal_cb->nal_data;
318
319
320         CDEBUG(D_TRACE, "niov [%d] mlen["LPSZ"]\n", niov, mlen);
321
322         if (!private) {
323                 CDEBUG(D_ERROR, "gmnal_small_rx no context\n");
324                 return(PTL_FAIL);
325         }
326
327         srxd = (gmnal_srxd_t*)private;
328         buffer = srxd->buffer;
329         buffer += sizeof(gmnal_msghdr_t);
330         buffer += sizeof(ptl_hdr_t);
331
332         while(niov--) {
333                 CDEBUG(D_INFO, "processing [%p] len ["LPSZ"]\n", iov, 
334                        iov->iov_len);
335                 gm_bcopy(buffer, iov->iov_base, iov->iov_len);                  
336                 buffer += iov->iov_len;
337                 iov++;
338         }
339
340
341         /*
342          *      let portals library know receive is complete
343          */
344         CDEBUG(D_PORTALS, "calling lib_finalize\n");
345         lib_finalize(nal_cb, private, cookie, PTL_OK);
346
347         /*
348          *      return buffer so it can be used again
349          */
350         CDEBUG(D_NET, "calling gm_provide_receive_buffer\n");
351         GMNAL_GM_LOCK(nal_data);
352         gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer, 
353                                            srxd->gmsize, GM_LOW_PRIORITY, 0);   
354         GMNAL_GM_UNLOCK(nal_data);
355
356         return(PTL_OK);
357 }
358
359
360 /*
361  *      Start a small transmit. 
362  *      Get a send token (and wired transmit buffer).
363  *      Copy data from senders buffer to wired buffer and
364  *      initiate gm_send from the wired buffer.
365  *      The callback function informs when the send is complete.
366  */
367 int
368 gmnal_small_tx(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie, 
369                 ptl_hdr_t *hdr, int type, ptl_nid_t global_nid, ptl_pid_t pid, 
370                 unsigned int niov, struct iovec *iov, int size)
371 {
372         gmnal_data_t    *nal_data = (gmnal_data_t*)nal_cb->nal_data;
373         gmnal_stxd_t    *stxd = NULL;
374         void            *buffer = NULL;
375         gmnal_msghdr_t  *msghdr = NULL;
376         int             tot_size = 0;
377         unsigned int    local_nid;
378         gm_status_t     gm_status = GM_SUCCESS;
379
380         CDEBUG(D_TRACE, "gmnal_small_tx nal_cb [%p] private [%p] cookie [%p] 
381                hdr [%p] type [%d] global_nid ["LPU64"] pid [%d] niov [%d] 
382                iov [%p] size [%d]\n", nal_cb, private, cookie, hdr, type, 
383                global_nid, pid, niov, iov, size);
384
385         CDEBUG(D_INFO, "portals_hdr:: dest_nid ["LPU64"], src_nid ["LPU64"]\n",
386                hdr->dest_nid, hdr->src_nid);
387
388         if (!nal_data) {
389                 CDEBUG(D_ERROR, "no nal_data\n");
390                 return(GMNAL_STATUS_FAIL);
391         } else {
392                 CDEBUG(D_INFO, "nal_data [%p]\n", nal_data);
393         }
394
395         GMNAL_GM_LOCK(nal_data);
396         gm_status = gm_global_id_to_node_id(nal_data->gm_port, global_nid, 
397                                             &local_nid);
398         GMNAL_GM_UNLOCK(nal_data);
399         if (gm_status != GM_SUCCESS) {
400                 CDEBUG(D_ERROR, "Failed to obtain local id\n");
401                 return(GMNAL_STATUS_FAIL);
402         }
403         CDEBUG(D_INFO, "Local Node_id is [%u][%x]\n", local_nid, local_nid);
404
405         stxd = gmnal_get_stxd(nal_data, 1);
406         CDEBUG(D_INFO, "stxd [%p]\n", stxd);
407
408         stxd->type = GMNAL_SMALL_MESSAGE;
409         stxd->cookie = cookie;
410
411         /*
412          *      Copy gmnal_msg_hdr and portals header to the transmit buffer
413          *      Then copy the data in
414          */
415         buffer = stxd->buffer;
416         msghdr = (gmnal_msghdr_t*)buffer;
417
418         msghdr->magic = GMNAL_MAGIC;
419         msghdr->type = GMNAL_SMALL_MESSAGE;
420         msghdr->sender_node_id = nal_data->gm_global_nid;
421         CDEBUG(D_INFO, "processing msghdr at [%p]\n", buffer);
422
423         buffer += sizeof(gmnal_msghdr_t);
424
425         CDEBUG(D_INFO, "processing  portals hdr at [%p]\n", buffer);
426         gm_bcopy(hdr, buffer, sizeof(ptl_hdr_t));
427
428         buffer += sizeof(ptl_hdr_t);
429
430         while(niov--) {
431                 CDEBUG(D_INFO, "processing iov [%p] len ["LPSZ"] to [%p]\n", 
432                        iov, iov->iov_len, buffer);
433                 gm_bcopy(iov->iov_base, buffer, iov->iov_len);
434                 buffer+= iov->iov_len;
435                 iov++;
436         }
437
438         CDEBUG(D_INFO, "sending\n");
439         tot_size = size+sizeof(ptl_hdr_t)+sizeof(gmnal_msghdr_t);
440         stxd->msg_size = tot_size;
441
442
443         CDEBUG(D_NET, "Calling gm_send_to_peer port [%p] buffer [%p] 
444                gmsize [%lu] msize [%d] global_nid ["LPU64"] local_nid[%d] 
445                stxd [%p]\n", nal_data->gm_port, stxd->buffer, stxd->gm_size, 
446                stxd->msg_size, global_nid, local_nid, stxd);
447
448         GMNAL_GM_LOCK(nal_data);
449         stxd->gm_priority = GM_LOW_PRIORITY;
450         stxd->gm_target_node = local_nid;
451         gm_send_to_peer_with_callback(nal_data->gm_port, stxd->buffer, 
452                                       stxd->gm_size, stxd->msg_size, 
453                                       GM_LOW_PRIORITY, local_nid, 
454                                       gmnal_small_tx_callback, (void*)stxd);
455         GMNAL_GM_UNLOCK(nal_data);
456         CDEBUG(D_INFO, "done\n");
457                 
458         return(PTL_OK);
459 }
460
461
462 /*
463  *      A callback to indicate the small transmit operation is compete
464  *      Check for erros and try to deal with them.
465  *      Call lib_finalise to inform the client application that the send 
466  *      is complete and the memory can be reused.
467  *      Return the stxd when finished with it (returns a send token)
468  */
469 void 
470 gmnal_small_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
471 {
472         gmnal_stxd_t    *stxd = (gmnal_stxd_t*)context;
473         lib_msg_t       *cookie = stxd->cookie;
474         gmnal_data_t    *nal_data = (gmnal_data_t*)stxd->nal_data;
475         nal_cb_t        *nal_cb = nal_data->nal_cb;
476
477         if (!stxd) {
478                 CDEBUG(D_TRACE, "send completion event for unknown stxd\n");
479                 return;
480         }
481         if (status != GM_SUCCESS) {
482                 CDEBUG(D_ERROR, "Result of send stxd [%p] is [%s]\n", 
483                        stxd, gmnal_gm_error(status));
484         }
485
486         switch(status) {
487                 case(GM_SUCCESS):
488                 break;
489
490
491
492                 case(GM_SEND_DROPPED):
493                 /*
494                  *      do a resend on the dropped ones
495                  */
496                         CDEBUG(D_ERROR, "send stxd [%p] was dropped 
497                                resending\n", context);
498                         GMNAL_GM_LOCK(nal_data);
499                         gm_send_to_peer_with_callback(nal_data->gm_port, 
500                                                       stxd->buffer, 
501                                                       stxd->gm_size, 
502                                                       stxd->msg_size, 
503                                                       stxd->gm_priority, 
504                                                       stxd->gm_target_node, 
505                                                       gmnal_small_tx_callback,
506                                                       context);
507                         GMNAL_GM_UNLOCK(nal_data);
508                 
509                 return;
510                 case(GM_TIMED_OUT):
511                 case(GM_SEND_TIMED_OUT):
512                 /*
513                  *      drop these ones
514                  */
515                         CDEBUG(D_INFO, "calling gm_drop_sends\n");
516                         GMNAL_GM_LOCK(nal_data);
517                         gm_drop_sends(nal_data->gm_port, stxd->gm_priority, 
518                                       stxd->gm_target_node, GMNAL_GM_PORT, 
519                                       gmnal_drop_sends_callback, context);
520                         GMNAL_GM_UNLOCK(nal_data);
521
522                 return;
523
524
525                 /*
526                  *      abort on these ?
527                  */
528                 case(GM_TRY_AGAIN):
529                 case(GM_INTERRUPTED):
530                 case(GM_FAILURE):
531                 case(GM_INPUT_BUFFER_TOO_SMALL):
532                 case(GM_OUTPUT_BUFFER_TOO_SMALL):
533                 case(GM_BUSY):
534                 case(GM_MEMORY_FAULT):
535                 case(GM_INVALID_PARAMETER):
536                 case(GM_OUT_OF_MEMORY):
537                 case(GM_INVALID_COMMAND):
538                 case(GM_PERMISSION_DENIED):
539                 case(GM_INTERNAL_ERROR):
540                 case(GM_UNATTACHED):
541                 case(GM_UNSUPPORTED_DEVICE):
542                 case(GM_SEND_REJECTED):
543                 case(GM_SEND_TARGET_PORT_CLOSED):
544                 case(GM_SEND_TARGET_NODE_UNREACHABLE):
545                 case(GM_SEND_PORT_CLOSED):
546                 case(GM_NODE_ID_NOT_YET_SET):
547                 case(GM_STILL_SHUTTING_DOWN):
548                 case(GM_CLONE_BUSY):
549                 case(GM_NO_SUCH_DEVICE):
550                 case(GM_ABORTED):
551                 case(GM_INCOMPATIBLE_LIB_AND_DRIVER):
552                 case(GM_UNTRANSLATED_SYSTEM_ERROR):
553                 case(GM_ACCESS_DENIED):
554                 case(GM_NO_DRIVER_SUPPORT):
555                 case(GM_PTE_REF_CNT_OVERFLOW):
556                 case(GM_NOT_SUPPORTED_IN_KERNEL):
557                 case(GM_NOT_SUPPORTED_ON_ARCH):
558                 case(GM_NO_MATCH):
559                 case(GM_USER_ERROR):
560                 case(GM_DATA_CORRUPTED):
561                 case(GM_HARDWARE_FAULT):
562                 case(GM_SEND_ORPHANED):
563                 case(GM_MINOR_OVERFLOW):
564                 case(GM_PAGE_TABLE_FULL):
565                 case(GM_UC_ERROR):
566                 case(GM_INVALID_PORT_NUMBER):
567                 case(GM_DEV_NOT_FOUND):
568                 case(GM_FIRMWARE_NOT_RUNNING):
569                 case(GM_YP_NO_MATCH):
570                 default:
571                         CDEBUG(D_ERROR, "Unknown send error\n");
572         }
573
574         /*
575          *      TO DO
576          *      If this is a large message init,
577          *      we're not finished with the data yet,
578          *      so can't call lib_finalise.
579          *      However, we're also holding on to a 
580          *      stxd here (to keep track of the source
581          *      iovec only). Should use another structure
582          *      to keep track of iovec and return stxd to 
583          *      free list earlier.
584          */
585         if (stxd->type == GMNAL_LARGE_MESSAGE_INIT) {
586                 CDEBUG(D_INFO, "large transmit done\n");
587                 return;
588         }
589         gmnal_return_stxd(nal_data, stxd);
590         lib_finalize(nal_cb, stxd, cookie, PTL_OK);
591
592         return;
593 }
594
595
596
597 void gmnal_drop_sends_callback(struct gm_port *gm_port, void *context, 
598                                 gm_status_t status)
599 {
600         gmnal_stxd_t    *stxd = (gmnal_stxd_t*)context;
601         gmnal_data_t    *nal_data = stxd->nal_data;
602
603         CDEBUG(D_TRACE, "status is [%d] context is [%p]\n", status, context);
604         if (status == GM_SUCCESS) {
605                 GMNAL_GM_LOCK(nal_data);
606                 gm_send_to_peer_with_callback(gm_port, stxd->buffer, 
607                                               stxd->gm_size, stxd->msg_size, 
608                                               stxd->gm_priority, 
609                                               stxd->gm_target_node, 
610                                               gmnal_small_tx_callback, 
611                                               context);
612                 GMNAL_GM_LOCK(nal_data);
613         } else {
614                 CDEBUG(D_ERROR, "send_to_peer status for stxd [%p] is 
615                        [%d][%s]\n", stxd, status, gmnal_gm_error(status));
616         }
617
618
619         return;
620 }
621
622
623 /*
624  *      Begine a large transmit.
625  *      Do a gm_register of the memory pointed to by the iovec 
626  *      and send details to the receiver. The receiver does a gm_get
627  *      to pull the data and sends and ack when finished. Upon receipt of
628  *      this ack, deregister the memory. Only 1 send token is required here.
629  */
630 int
631 gmnal_large_tx(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie, 
632                 ptl_hdr_t *hdr, int type, ptl_nid_t global_nid, ptl_pid_t pid, 
633                 unsigned int niov, struct iovec *iov, int size)
634 {
635
636         gmnal_data_t    *nal_data;
637         gmnal_stxd_t    *stxd = NULL;
638         void            *buffer = NULL;
639         gmnal_msghdr_t  *msghdr = NULL;
640         unsigned int    local_nid;
641         int             mlen = 0;       /* the size of the init message data */
642         struct iovec    *iov_dup = NULL;
643         gm_status_t     gm_status;
644         int             niov_dup;
645
646
647         CDEBUG(D_TRACE, "gmnal_large_tx nal_cb [%p] private [%p], cookie [%p] 
648                hdr [%p], type [%d] global_nid ["LPU64"], pid [%d], niov [%d], 
649                iov [%p], size [%d]\n", nal_cb, private, cookie, hdr, type, 
650                global_nid, pid, niov, iov, size);
651
652         if (nal_cb)
653                 nal_data = (gmnal_data_t*)nal_cb->nal_data;
654         else  {
655                 CDEBUG(D_ERROR, "no nal_cb.\n");
656                 return(GMNAL_STATUS_FAIL);
657         }
658         
659
660         /*
661          *      Get stxd and buffer. Put local address of data in buffer, 
662          *      send local addresses to target, 
663          *      wait for the target node to suck the data over.
664          *      The stxd is used to ren
665          */
666         stxd = gmnal_get_stxd(nal_data, 1);
667         CDEBUG(D_INFO, "stxd [%p]\n", stxd);
668
669         stxd->type = GMNAL_LARGE_MESSAGE_INIT;
670         stxd->cookie = cookie;
671
672         /*
673          *      Copy gmnal_msg_hdr and portals header to the transmit buffer
674          *      Then copy the iov in
675          */
676         buffer = stxd->buffer;
677         msghdr = (gmnal_msghdr_t*)buffer;
678
679         CDEBUG(D_INFO, "processing msghdr at [%p]\n", buffer);
680
681         msghdr->magic = GMNAL_MAGIC;
682         msghdr->type = GMNAL_LARGE_MESSAGE_INIT;
683         msghdr->sender_node_id = nal_data->gm_global_nid;
684         msghdr->stxd = stxd;
685         msghdr->niov = niov ;
686         buffer += sizeof(gmnal_msghdr_t);
687         mlen = sizeof(gmnal_msghdr_t);
688         CDEBUG(D_INFO, "mlen is [%d]\n", mlen);
689
690
691         CDEBUG(D_INFO, "processing  portals hdr at [%p]\n", buffer);
692
693         gm_bcopy(hdr, buffer, sizeof(ptl_hdr_t));
694         buffer += sizeof(ptl_hdr_t);
695         mlen += sizeof(ptl_hdr_t); 
696         CDEBUG(D_INFO, "mlen is [%d]\n", mlen);
697
698         /*
699          *      copy the iov to the buffer so target knows 
700          *      where to get the data from
701          */
702         CDEBUG(D_INFO, "processing iov to [%p]\n", buffer);
703         gm_bcopy(iov, buffer, niov*sizeof(struct iovec));
704         mlen += niov*(sizeof(struct iovec));
705         CDEBUG(D_INFO, "mlen is [%d]\n", mlen);
706
707
708         /*
709          *      Store the iovs in the stxd for we can get 
710          *      them later if we need them
711          */
712         CDEBUG(D_NET, "Copying iov [%p] to [%p]\n", iov, stxd->iov);
713         gm_bcopy(iov, stxd->iov, niov*sizeof(struct iovec));
714         stxd->niov = niov;
715         
716
717         /*
718          *      register the memory so the NIC can get hold of the data
719          *      This is a slow process. it'd be good to overlap it 
720          *      with something else.
721          */
722         iov_dup = iov;
723         niov_dup = niov;
724         while(niov--) {
725                 CDEBUG(D_INFO, "Registering memory [%p] len ["LPSZ"] \n", 
726                        iov->iov_base, iov->iov_len);
727                 GMNAL_GM_LOCK(nal_data);
728                 gm_status = gm_register_memory(nal_data->gm_port, 
729                                                iov->iov_base, iov->iov_len);
730                 if (gm_status != GM_SUCCESS) {
731                         GMNAL_GM_UNLOCK(nal_data);
732                         CDEBUG(D_ERROR, "gm_register_memory returns [%d][%s] 
733                                for memory [%p] len ["LPSZ"]\n", 
734                                gm_status, gmnal_gm_error(gm_status), 
735                                iov->iov_base, iov->iov_len);
736                         GMNAL_GM_LOCK(nal_data);
737                         while (iov_dup != iov) {
738                                 gm_deregister_memory(nal_data->gm_port, 
739                                                      iov_dup->iov_base, 
740                                                      iov_dup->iov_len);
741                                 iov_dup++;
742                         }
743                         GMNAL_GM_UNLOCK(nal_data);
744                         gmnal_return_stxd(nal_data, stxd);
745                         return(PTL_FAIL);
746                 }
747
748                 GMNAL_GM_UNLOCK(nal_data);
749                 iov++;
750         }
751
752         /*
753          *      Send the init message to the target
754          */
755         CDEBUG(D_INFO, "sending mlen [%d]\n", mlen);
756         GMNAL_GM_LOCK(nal_data);
757         gm_status = gm_global_id_to_node_id(nal_data->gm_port, global_nid, 
758                                             &local_nid);
759         if (gm_status != GM_SUCCESS) {
760                 GMNAL_GM_UNLOCK(nal_data);
761                 CDEBUG(D_ERROR, "Failed to obtain local id\n");
762                 gmnal_return_stxd(nal_data, stxd);
763                 /* TO DO deregister memory on failure */
764                 return(GMNAL_STATUS_FAIL);
765         }
766         CDEBUG(D_INFO, "Local Node_id is [%d]\n", local_nid);
767         gm_send_to_peer_with_callback(nal_data->gm_port, stxd->buffer, 
768                                       stxd->gm_size, mlen, GM_LOW_PRIORITY, 
769                                       local_nid, gmnal_large_tx_callback, 
770                                       (void*)stxd);
771         GMNAL_GM_UNLOCK(nal_data);
772         
773         CDEBUG(D_INFO, "done\n");
774                 
775         return(PTL_OK);
776 }
777
778 /*
779  *      Callback function indicates that send of buffer with 
780  *      large message iovec has completed (or failed).
781  */
782 void 
783 gmnal_large_tx_callback(gm_port_t *gm_port, void *context, gm_status_t status)
784 {
785         gmnal_small_tx_callback(gm_port, context, status);
786
787 }
788
789
790
791 /*
792  *      Have received a buffer that contains an iovec of the sender. 
793  *      Do a gm_register_memory of the receivers buffer and then do a get
794  *      data from the sender.
795  */
796 int
797 gmnal_large_rx(nal_cb_t *nal_cb, void *private, lib_msg_t *cookie, 
798                 unsigned int nriov, struct iovec *riov, size_t mlen, 
799                 size_t rlen)
800 {
801         gmnal_data_t    *nal_data = nal_cb->nal_data;
802         gmnal_srxd_t    *srxd = (gmnal_srxd_t*)private;
803         void            *buffer = NULL;
804         struct  iovec   *riov_dup;
805         int             nriov_dup;
806         gmnal_msghdr_t  *msghdr = NULL;
807         gm_status_t     gm_status;
808
809         CDEBUG(D_TRACE, "gmnal_large_rx :: nal_cb[%p], private[%p], 
810                cookie[%p], niov[%d], iov[%p], mlen["LPSZ"], rlen["LPSZ"]\n",
811                 nal_cb, private, cookie, nriov, riov, mlen, rlen);
812
813         if (!srxd) {
814                 CDEBUG(D_ERROR, "gmnal_large_rx no context\n");
815                 return(PTL_FAIL);
816         }
817
818         buffer = srxd->buffer;
819         msghdr = (gmnal_msghdr_t*)buffer;
820         buffer += sizeof(gmnal_msghdr_t);
821         buffer += sizeof(ptl_hdr_t);
822
823         /*
824          *      Store the senders stxd address in the srxd for this message
825          *      The gmnal_large_message_ack needs it to notify the sender
826          *      the pull of data is complete
827          */
828         srxd->source_stxd = msghdr->stxd;
829
830         /*
831          *      Register the receivers memory
832          *      get the data,
833          *      tell the sender that we got the data
834          *      then tell the receiver we got the data
835          *      TO DO
836          *      If the iovecs match, could interleave 
837          *      gm_registers and gm_gets for each element
838          */
839         nriov_dup = nriov;
840         riov_dup = riov;
841         while(nriov--) {
842                 CDEBUG(D_INFO, "Registering memory [%p] len ["LPSZ"] \n", 
843                        riov->iov_base, riov->iov_len);
844                 GMNAL_GM_LOCK(nal_data);
845                 gm_status = gm_register_memory(nal_data->gm_port, 
846                                                riov->iov_base, riov->iov_len);
847                 if (gm_status != GM_SUCCESS) {
848                         GMNAL_GM_UNLOCK(nal_data);
849                         CDEBUG(D_ERROR, "gm_register_memory returns [%d][%s] 
850                                for memory [%p] len ["LPSZ"]\n", 
851                                gm_status, gmnal_gm_error(gm_status), 
852                                riov->iov_base, riov->iov_len);
853                         GMNAL_GM_LOCK(nal_data);
854                         while (riov_dup != riov) {
855                                 gm_deregister_memory(nal_data->gm_port, 
856                                                      riov_dup->iov_base, 
857                                                      riov_dup->iov_len);
858                                 riov_dup++;
859                         }
860                         GMNAL_GM_LOCK(nal_data);
861                         /*
862                          *      give back srxd and buffer. Send NACK to sender
863                          */
864                         return(PTL_FAIL);
865                 }
866                 GMNAL_GM_UNLOCK(nal_data);
867                 riov++;
868         }
869         /*
870          *      do this so the final gm_get callback can deregister the memory
871          */
872         PORTAL_ALLOC(srxd->riov, nriov_dup*(sizeof(struct iovec)));
873         gm_bcopy(riov_dup, srxd->riov, nriov_dup*(sizeof(struct iovec)));
874         srxd->nriov = nriov_dup;
875
876         /*
877          *      now do gm_get to get the data
878          */
879         srxd->cookie = cookie;
880         if (gmnal_remote_get(srxd, srxd->nsiov, (struct iovec*)buffer, 
881                               nriov_dup, riov_dup) != GMNAL_STATUS_OK) {
882                 CDEBUG(D_ERROR, "can't get the data");
883         }
884
885         CDEBUG(D_INFO, "lgmanl_large_rx done\n");
886
887         return(PTL_OK);
888 }
889
890
891 /*
892  *      Perform a number of remote gets as part of receiving 
893  *      a large message.
894  *      The final one to complete (i.e. the last callback to get called)
895  *      tidies up.
896  *      gm_get requires a send token.
897  */
898 int
899 gmnal_remote_get(gmnal_srxd_t *srxd, int nsiov, struct iovec *siov, 
900                   int nriov, struct iovec *riov)
901 {
902
903         int     ncalls = 0;
904
905         CDEBUG(D_TRACE, "gmnal_remote_get srxd[%p], nriov[%d], riov[%p], 
906                nsiov[%d], siov[%p]\n", srxd, nriov, riov, nsiov, siov);
907
908
909         ncalls = gmnal_copyiov(0, srxd, nsiov, siov, nriov, riov);
910         if (ncalls < 0) {
911                 CDEBUG(D_ERROR, "there's something wrong with the iovecs\n");
912                 return(GMNAL_STATUS_FAIL);
913         }
914         CDEBUG(D_INFO, "gmnal_remote_get ncalls [%d]\n", ncalls);
915         spin_lock_init(&srxd->callback_lock);
916         srxd->ncallbacks = ncalls;
917         srxd->callback_status = 0;
918
919         ncalls = gmnal_copyiov(1, srxd, nsiov, siov, nriov, riov);
920         if (ncalls < 0) {
921                 CDEBUG(D_ERROR, "there's something wrong with the iovecs\n");
922                 return(GMNAL_STATUS_FAIL);
923         }
924
925         return(GMNAL_STATUS_OK);
926
927 }
928
929
930 /*
931  *      pull data from source node (source iovec) to a local iovec.
932  *      The iovecs may not match which adds the complications below.
933  *      Count the number of gm_gets that will be required to the callbacks
934  *      can determine who is the last one.
935  */     
936 int
937 gmnal_copyiov(int do_copy, gmnal_srxd_t *srxd, int nsiov, 
938                struct iovec *siov, int nriov, struct iovec *riov)
939 {
940
941         int     ncalls = 0;
942         int     slen = siov->iov_len, rlen = riov->iov_len;
943         char    *sbuf = siov->iov_base, *rbuf = riov->iov_base; 
944         unsigned long   sbuf_long;
945         gm_remote_ptr_t remote_ptr = 0;
946         unsigned int    source_node;
947         gmnal_ltxd_t    *ltxd = NULL;
948         gmnal_data_t    *nal_data = srxd->nal_data;
949
950         CDEBUG(D_TRACE, "copy[%d] nal_data[%p]\n", do_copy, nal_data);
951         if (do_copy) {
952                 if (!nal_data) {
953                         CDEBUG(D_ERROR, "Bad args No nal_data\n");
954                         return(GMNAL_STATUS_FAIL);
955                 }
956                 GMNAL_GM_LOCK(nal_data);
957                 if (gm_global_id_to_node_id(nal_data->gm_port, 
958                                             srxd->gm_source_node, 
959                                             &source_node) != GM_SUCCESS) {
960
961                         CDEBUG(D_ERROR, "cannot resolve global_id [%u] 
962                                to local node_id\n", srxd->gm_source_node);
963                         GMNAL_GM_UNLOCK(nal_data);
964                         return(GMNAL_STATUS_FAIL);
965                 }
966                 GMNAL_GM_UNLOCK(nal_data);
967                 /*
968                  *      We need a send token to use gm_get
969                  *      getting an stxd gets us a send token.
970                  *      the stxd is used as the context to the
971                  *      callback function (so stxd can be returned).
972                  *      Set pointer in stxd to srxd so callback count in srxd
973                  *      can be decremented to find last callback to complete
974                  */
975                 CDEBUG(D_INFO, "gmnal_copyiov source node is G[%u]L[%d]\n", 
976                        srxd->gm_source_node, source_node);
977         }
978
979         do {
980                 CDEBUG(D_INFO, "sbuf[%p] slen[%d] rbuf[%p], rlen[%d]\n",
981                                 sbuf, slen, rbuf, rlen);
982                 if (slen > rlen) {
983                         ncalls++;
984                         if (do_copy) {
985                                 CDEBUG(D_INFO, "slen>rlen\n");
986                                 ltxd = gmnal_get_ltxd(nal_data);
987                                 ltxd->srxd = srxd;
988                                 GMNAL_GM_LOCK(nal_data);
989                                 /* 
990                                  *      funny business to get rid 
991                                  *      of compiler warning 
992                                  */
993                                 sbuf_long = (unsigned long) sbuf;
994                                 remote_ptr = (gm_remote_ptr_t)sbuf_long;
995                                 gm_get(nal_data->gm_port, remote_ptr, rbuf, 
996                                        rlen, GM_LOW_PRIORITY, source_node, 
997                                        GMNAL_GM_PORT, 
998                                        gmnal_remote_get_callback, ltxd);
999                                 GMNAL_GM_UNLOCK(nal_data);
1000                         }
1001                         /*
1002                          *      at the end of 1 iov element
1003                          */
1004                         sbuf+=rlen;
1005                         slen-=rlen;
1006                         riov++;
1007                         nriov--;
1008                         rbuf = riov->iov_base;
1009                         rlen = riov->iov_len;
1010                 } else if (rlen > slen) {
1011                         ncalls++;
1012                         if (do_copy) {
1013                                 CDEBUG(D_INFO, "slen<rlen\n");
1014                                 ltxd = gmnal_get_ltxd(nal_data);
1015                                 ltxd->srxd = srxd;
1016                                 GMNAL_GM_LOCK(nal_data);
1017                                 sbuf_long = (unsigned long) sbuf;
1018                                 remote_ptr = (gm_remote_ptr_t)sbuf_long;
1019                                 gm_get(nal_data->gm_port, remote_ptr, rbuf, 
1020                                        slen, GM_LOW_PRIORITY, source_node, 
1021                                        GMNAL_GM_PORT, 
1022                                        gmnal_remote_get_callback, ltxd);
1023                                 GMNAL_GM_UNLOCK(nal_data);
1024                         }
1025                         /*
1026                          *      at end of siov element
1027                          */
1028                         rbuf+=slen;
1029                         rlen-=slen;
1030                         siov++;
1031                         sbuf = siov->iov_base;
1032                         slen = siov->iov_len;
1033                 } else {
1034                         ncalls++;
1035                         if (do_copy) {
1036                                 CDEBUG(D_INFO, "rlen=slen\n");
1037                                 ltxd = gmnal_get_ltxd(nal_data);
1038                                 ltxd->srxd = srxd;
1039                                 GMNAL_GM_LOCK(nal_data);
1040                                 sbuf_long = (unsigned long) sbuf;
1041                                 remote_ptr = (gm_remote_ptr_t)sbuf_long;
1042                                 gm_get(nal_data->gm_port, remote_ptr, rbuf, 
1043                                        rlen, GM_LOW_PRIORITY, source_node, 
1044                                        GMNAL_GM_PORT, 
1045                                        gmnal_remote_get_callback, ltxd);
1046                                 GMNAL_GM_UNLOCK(nal_data);
1047                         }
1048                         /*
1049                          *      at end of siov and riov element
1050                          */
1051                         siov++;
1052                         sbuf = siov->iov_base;
1053                         slen = siov->iov_len;
1054                         riov++;
1055                         nriov--;
1056                         rbuf = riov->iov_base;
1057                         rlen = riov->iov_len;
1058                 }
1059
1060         } while (nriov);
1061         return(ncalls);
1062 }
1063
1064
1065 /*
1066  *      The callback function that is invoked after each gm_get call completes.
1067  *      Multiple callbacks may be invoked for 1 transaction, only the final
1068  *      callback has work to do.
1069  */
1070 void
1071 gmnal_remote_get_callback(gm_port_t *gm_port, void *context, 
1072                            gm_status_t status)
1073 {
1074
1075         gmnal_ltxd_t    *ltxd = (gmnal_ltxd_t*)context;
1076         gmnal_srxd_t    *srxd = ltxd->srxd;
1077         nal_cb_t        *nal_cb = srxd->nal_data->nal_cb;
1078         int             lastone;
1079         struct  iovec   *riov;
1080         int             nriov;
1081         gmnal_data_t    *nal_data;
1082
1083         CDEBUG(D_TRACE, "called for context [%p]\n", context);
1084
1085         if (status != GM_SUCCESS) {
1086                 CDEBUG(D_ERROR, "reports error [%d][%s]\n", status, 
1087                        gmnal_gm_error(status));
1088         }
1089
1090         spin_lock(&srxd->callback_lock);
1091         srxd->ncallbacks--;
1092         srxd->callback_status |= status;
1093         lastone = srxd->ncallbacks?0:1;
1094         spin_unlock(&srxd->callback_lock);
1095         nal_data = srxd->nal_data;
1096
1097         /*
1098          *      everyone returns a send token
1099          */
1100         gmnal_return_ltxd(nal_data, ltxd);
1101
1102         if (!lastone) {
1103                 CDEBUG(D_ERROR, "NOT final callback context[%p]\n", srxd);
1104                 return;
1105         }
1106         
1107         /*
1108          *      Let our client application proceed
1109          */     
1110         CDEBUG(D_ERROR, "final callback context[%p]\n", srxd);
1111         lib_finalize(nal_cb, srxd, srxd->cookie, PTL_OK);
1112
1113         /*
1114          *      send an ack to the sender to let him know we got the data
1115          */
1116         gmnal_large_tx_ack(nal_data, srxd);
1117
1118         /*
1119          *      Unregister the memory that was used
1120          *      This is a very slow business (slower then register)
1121          */
1122         nriov = srxd->nriov;
1123         riov = srxd->riov;
1124         GMNAL_GM_LOCK(nal_data);
1125         while (nriov--) {
1126                 CDEBUG(D_ERROR, "deregister memory [%p]\n", riov->iov_base);
1127                 if (gm_deregister_memory(srxd->nal_data->gm_port, 
1128                                          riov->iov_base, riov->iov_len)) {
1129                         CDEBUG(D_ERROR, "failed to deregister memory [%p]\n", 
1130                                riov->iov_base);
1131                 }
1132                 riov++;
1133         }
1134         GMNAL_GM_UNLOCK(nal_data);
1135         PORTAL_FREE(srxd->riov, sizeof(struct iovec)*nriov);
1136
1137         /*
1138          *      repost the receive buffer (return receive token)
1139          */
1140         GMNAL_GM_LOCK(nal_data);
1141         gm_provide_receive_buffer_with_tag(nal_data->gm_port, srxd->buffer, 
1142                                            srxd->gmsize, GM_LOW_PRIORITY, 0);   
1143         GMNAL_GM_UNLOCK(nal_data);
1144         
1145         return;
1146 }
1147
1148
1149 /*
1150  *      Called on target node.
1151  *      After pulling data from a source node
1152  *      send an ack message to indicate the large transmit is complete.
1153  */
1154 void 
1155 gmnal_large_tx_ack(gmnal_data_t *nal_data, gmnal_srxd_t *srxd)
1156 {
1157
1158         gmnal_stxd_t    *stxd;
1159         gmnal_msghdr_t *msghdr;
1160         void            *buffer = NULL;
1161         unsigned int    local_nid;
1162         gm_status_t     gm_status = GM_SUCCESS;
1163
1164         CDEBUG(D_TRACE, "srxd[%p] target_node [%u]\n", srxd, 
1165                srxd->gm_source_node);
1166
1167         GMNAL_GM_LOCK(nal_data);
1168         gm_status = gm_global_id_to_node_id(nal_data->gm_port, 
1169                                             srxd->gm_source_node, &local_nid);
1170         GMNAL_GM_UNLOCK(nal_data);
1171         if (gm_status != GM_SUCCESS) {
1172                 CDEBUG(D_ERROR, "Failed to obtain local id\n");
1173                 return;
1174         }
1175         CDEBUG(D_INFO, "Local Node_id is [%u][%x]\n", local_nid, local_nid);
1176
1177         stxd = gmnal_get_stxd(nal_data, 1);
1178         CDEBUG(D_TRACE, "gmnal_large_tx_ack got stxd[%p]\n", stxd);
1179
1180         stxd->nal_data = nal_data;
1181         stxd->type = GMNAL_LARGE_MESSAGE_ACK;
1182
1183         /*
1184          *      Copy gmnal_msg_hdr and portals header to the transmit buffer
1185          *      Then copy the data in
1186          */
1187         buffer = stxd->buffer;
1188         msghdr = (gmnal_msghdr_t*)buffer;
1189
1190         /*
1191          *      Add in the address of the original stxd from the sender node
1192          *      so it knows which thread to notify.
1193          */
1194         msghdr->magic = GMNAL_MAGIC;
1195         msghdr->type = GMNAL_LARGE_MESSAGE_ACK;
1196         msghdr->sender_node_id = nal_data->gm_global_nid;
1197         msghdr->stxd = srxd->source_stxd;
1198         CDEBUG(D_INFO, "processing msghdr at [%p]\n", buffer);
1199
1200         CDEBUG(D_INFO, "sending\n");
1201         stxd->msg_size= sizeof(gmnal_msghdr_t);
1202
1203
1204         CDEBUG(D_NET, "Calling gm_send_to_peer port [%p] buffer [%p] 
1205                gmsize [%lu] msize [%d] global_nid [%u] local_nid[%d] 
1206                stxd [%p]\n", nal_data->gm_port, stxd->buffer, stxd->gm_size, 
1207                stxd->msg_size, srxd->gm_source_node, local_nid, stxd);
1208         GMNAL_GM_LOCK(nal_data);
1209         stxd->gm_priority = GM_LOW_PRIORITY;
1210         stxd->gm_target_node = local_nid;
1211         gm_send_to_peer_with_callback(nal_data->gm_port, stxd->buffer, 
1212                                       stxd->gm_size, stxd->msg_size, 
1213                                       GM_LOW_PRIORITY, local_nid, 
1214                                       gmnal_large_tx_ack_callback, 
1215                                       (void*)stxd);
1216         
1217         GMNAL_GM_UNLOCK(nal_data);
1218         CDEBUG(D_INFO, "gmnal_large_tx_ack :: done\n");
1219                 
1220         return;
1221 }
1222
1223
1224 /*
1225  *      A callback to indicate the small transmit operation is compete
1226  *      Check for errors and try to deal with them.
1227  *      Call lib_finalise to inform the client application that the 
1228  *      send is complete and the memory can be reused.
1229  *      Return the stxd when finished with it (returns a send token)
1230  */
1231 void 
1232 gmnal_large_tx_ack_callback(gm_port_t *gm_port, void *context, 
1233                              gm_status_t status)
1234 {
1235         gmnal_stxd_t    *stxd = (gmnal_stxd_t*)context;
1236         gmnal_data_t    *nal_data = (gmnal_data_t*)stxd->nal_data;
1237
1238         if (!stxd) {
1239                 CDEBUG(D_ERROR, "send completion event for unknown stxd\n");
1240                 return;
1241         }
1242         CDEBUG(D_TRACE, "send completion event for stxd [%p] status is [%d]\n",
1243                stxd, status);
1244         gmnal_return_stxd(stxd->nal_data, stxd);
1245
1246         GMNAL_GM_UNLOCK(nal_data);
1247         return;
1248 }
1249
1250 /*
1251  *      Indicates the large transmit operation is compete.
1252  *      Called on transmit side (means data has been pulled  by receiver 
1253  *      or failed).
1254  *      Call lib_finalise to inform the client application that the send 
1255  *      is complete, deregister the memory and return the stxd. 
1256  *      Finally, report the rx buffer that the ack message was delivered in.
1257  */
1258 void 
1259 gmnal_large_tx_ack_received(gmnal_data_t *nal_data, gmnal_srxd_t *srxd)
1260 {
1261         nal_cb_t        *nal_cb = nal_data->nal_cb;
1262         gmnal_stxd_t    *stxd = NULL;
1263         gmnal_msghdr_t  *msghdr = NULL;
1264         void            *buffer = NULL;
1265         struct  iovec   *iov;
1266
1267
1268         CDEBUG(D_TRACE, "gmnal_large_tx_ack_received buffer [%p]\n", buffer);
1269
1270         buffer = srxd->buffer;
1271         msghdr = (gmnal_msghdr_t*)buffer;
1272         stxd = msghdr->stxd;
1273
1274         CDEBUG(D_INFO, "gmnal_large_tx_ack_received stxd [%p]\n", stxd);
1275
1276         lib_finalize(nal_cb, stxd, stxd->cookie, PTL_OK);
1277
1278         /*
1279          *      extract the iovec from the stxd, deregister the memory.
1280          *      free the space used to store the iovec
1281          */
1282         iov = stxd->iov;
1283         while(stxd->niov--) {
1284                 CDEBUG(D_INFO, "deregister memory [%p] size ["LPSZ"]\n",
1285                        iov->iov_base, iov->iov_len);
1286                 GMNAL_GM_LOCK(nal_data);
1287                 gm_deregister_memory(nal_data->gm_port, iov->iov_base, 
1288                                      iov->iov_len);
1289                 GMNAL_GM_UNLOCK(nal_data);
1290                 iov++;
1291         }
1292
1293         /*
1294          *      return the send token
1295          *      TO DO It is bad to hold onto the send token so long?
1296          */
1297         gmnal_return_stxd(nal_data, stxd);
1298
1299
1300         /*
1301          *      requeue the receive buffer 
1302          */
1303         gmnal_rx_requeue_buffer(nal_data, srxd);
1304         
1305
1306         return;
1307 }