Whamcloud - gitweb
* Addid portals/knals/ibnal
authoreeb <eeb>
Tue, 4 Nov 2003 19:50:20 +0000 (19:50 +0000)
committereeb <eeb>
Tue, 4 Nov 2003 19:50:20 +0000 (19:50 +0000)
26 files changed:
lnet/archdep.m4
lnet/include/linux/kp30.h
lnet/klnds/Makefile.am
lnet/klnds/iblnd/Makefile.am [new file with mode: 0644]
lnet/klnds/iblnd/ibnal.c [new file with mode: 0644]
lnet/klnds/iblnd/ibnal.h [new file with mode: 0644]
lnet/klnds/iblnd/ibnal_cb.c [new file with mode: 0644]
lnet/klnds/iblnd/ibnal_send_recv_self_testing.c [new file with mode: 0644]
lnet/klnds/iblnd/uagent.c [new file with mode: 0644]
lnet/libcfs/debug.c
lnet/libcfs/module.c
lnet/lnet/api-init.c
lnet/utils/portals.c
lustre/portals/archdep.m4
lustre/portals/include/linux/kp30.h
lustre/portals/knals/Makefile.am
lustre/portals/knals/ibnal/Makefile.am [new file with mode: 0644]
lustre/portals/knals/ibnal/ibnal.c [new file with mode: 0644]
lustre/portals/knals/ibnal/ibnal.h [new file with mode: 0644]
lustre/portals/knals/ibnal/ibnal_cb.c [new file with mode: 0644]
lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c [new file with mode: 0644]
lustre/portals/knals/ibnal/uagent.c [new file with mode: 0644]
lustre/portals/libcfs/debug.c
lustre/portals/libcfs/module.c
lustre/portals/portals/api-init.c
lustre/portals/utils/portals.c

index 7910823..6171555 100644 (file)
@@ -297,6 +297,28 @@ AC_SUBST(with_gm)
 AC_SUBST(GMNAL)
 
 
+#fixme: where are the default IB includes?
+default_ib_include_dir=/usr/local/ib/include
+an_ib_include_file=vapi.h
+
+AC_ARG_WITH(ib, [ --with-ib=[yes/no/path] Path to IB includes], with_ib=$withval, with_ib=$default_ib)
+AC_MSG_CHECKING(if IB headers are present)
+if test "$with_ib" = yes; then
+    with_ib=$default_ib_include_dir
+fi
+if test "$with_ib" != no -a -f ${with_ib}/${an_ib_include_file}; then
+    AC_MSG_RESULT(yes)
+    IBNAL="ibnal"
+    with_ib="-I${with_ib}"
+else
+    AC_MSG_RESULT(no)
+    IBNAL=""
+    with_ib=""
+fi
+AC_SUBST(IBNAL)
+AC_SUBST(with_ib)
+
+
 def_scamac=/opt/scali/include
 AC_ARG_WITH(scamac, [  --with-scamac=[yes/no/path] Path to ScaMAC includes (default=/opt/scali/include)], with_scamac=$withval, with_scamac=$def_scamac)
 AC_MSG_CHECKING(if ScaMAC headers are present)
@@ -317,7 +339,7 @@ AC_SUBST(with_scamac)
 AC_SUBST(SCIMACNAL)
 
 CFLAGS="$KCFLAGS"
-CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac "
+CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac $with_ib"
 
 AC_SUBST(MOD_LINK)
 AC_SUBST(LINUX25)
index f152725..dfca5fb 100644 (file)
@@ -43,6 +43,7 @@ extern unsigned int portal_cerror;
 #define S_GMNAL       (1 << 19)
 #define S_PTLROUTER   (1 << 20)
 #define S_COBD        (1 << 21)
+#define S_IBNAL       (1 << 22)
 
 /* If you change these values, please keep portals/utils/debug.c
  * up to date! */
@@ -1034,6 +1035,7 @@ enum {
         TCPNAL,
         SCIMACNAL,
         ROUTER,
+        IBNAL,
         NAL_ENUM_END_MARKER
 };
 
@@ -1042,6 +1044,7 @@ extern ptl_handle_ni_t  kqswnal_ni;
 extern ptl_handle_ni_t  ksocknal_ni;
 extern ptl_handle_ni_t  ktoenal_ni;
 extern ptl_handle_ni_t  kgmnal_ni;
+extern ptl_handle_ni_t  kibnal_ni;
 extern ptl_handle_ni_t  kscimacnal_ni;
 #endif
 
index fed2785..25aab9d 100644 (file)
@@ -3,5 +3,5 @@
 # This code is issued under the GNU General Public License.
 # See the file COPYING in this distribution
 
-DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal 
-SUBDIRS= socknal toenal        @QSWNAL@ @GMNAL@ @SCIMACNAL@
+DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal ibnal
+SUBDIRS= socknal toenal        @QSWNAL@ @GMNAL@ @SCIMACNAL@ @IBNAL@
diff --git a/lnet/klnds/iblnd/Makefile.am b/lnet/klnds/iblnd/Makefile.am
new file mode 100644 (file)
index 0000000..788c641
--- /dev/null
@@ -0,0 +1,10 @@
+include ../../Rules.linux
+
+MODULE = kibnal
+modulenet_DATA = kibnal.o
+EXTRA_PROGRAMS = kibnal
+
+
+DEFS =
+CPPFLAGS=@CPPFLAGS@ @with_@
+kibnal_SOURCES = ibnal.h ibnal.c ibnal_cb.c
diff --git a/lnet/klnds/iblnd/ibnal.c b/lnet/klnds/iblnd/ibnal.c
new file mode 100644 (file)
index 0000000..948badf
--- /dev/null
@@ -0,0 +1,2146 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Based on ksocknal, qswnal, and gmnal
+ *
+ * Copyright (C) 2003 LANL 
+ *   Author: HB Chen <hbchen@lanl.gov>
+ *   Los Alamos National Lab
+ *
+ *   Portals is free software; you can redistribute it and/or
+ *   modify it under the terms of version 2 of the GNU General Public
+ *   License as published by the Free Software Foundation.
+ *
+ *   Portals is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Portals; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *   
+ */
+
+#include "ibnal.h"
+
+// portal handle ID for this IB-NAL
+ptl_handle_ni_t kibnal_ni;
+
+// message send buffer mutex
+spinlock_t   MSBuf_mutex[NUM_MBUF];
+
+// message recv buffer mutex
+spinlock_t   MRBuf_mutex[NUM_MBUF];
+
+// IB-NAL API information 
+nal_t  kibnal_api; 
+
+// nal's private data 
+kibnal_data_t kibnal_data; 
+
+int ibnal_debug = 0;
+VAPI_pd_hndl_t      Pd_hndl;    
+unsigned int    Num_posted_recv_buf;
+
+// registered send buffer list
+Memory_buffer_info MSbuf_list[NUM_MBUF]; 
+
+// registered recv buffer list 
+Memory_buffer_info MRbuf_list[NUM_MBUF];
+
+//
+// for router 
+// currently there is no need fo IBA  
+//
+kpr_nal_interface_t kibnal_router_interface = {
+        kprni_nalid: IBNAL,
+        kprni_arg:   &kibnal_data,
+        kprni_fwd:   kibnal_fwd_packet, // forward data to router  
+                                        // is router invloving the
+                                        // data transmision 
+};
+
+
+// Queue-pair list 
+QP_info QP_list[NUM_QPS];
+
+// information associated with a HCA 
+HCA_info        Hca_data;
+
+// something about HCA 
+VAPI_hca_hndl_t      Hca_hndl; // assume we only use one HCA now 
+VAPI_hca_vendor_t    Hca_vendor;
+VAPI_hca_cap_t       Hca_cap;
+VAPI_hca_port_t      Hca_port_1_props;
+VAPI_hca_port_t      Hca_port_2_props;
+VAPI_hca_attr_t      Hca_attr;
+VAPI_hca_attr_mask_t Hca_attr_mask;
+VAPI_cq_hndl_t       Cq_RQ_hndl;    // CQ's handle
+VAPI_cq_hndl_t       Cq_SQ_hndl;    // CQ's handle
+VAPI_cq_hndl_t       Cq_hndl;    // CQ's handle
+Remote_QP_Info       L_QP_data;
+Remote_QP_Info       R_QP_data;
+
+
+//
+// forward  API
+//
+int 
+kibnal_forward(nal_t   *nal,
+               int     id,
+               void    *args,  
+               size_t args_len,
+               void    *ret,   
+               size_t ret_len)
+{
+        kibnal_data_t *knal_data = nal->nal_data;
+        nal_cb_t      *nal_cb = knal_data->kib_cb;
+
+        // ASSERT checking 
+        LASSERT (nal == &kibnal_api);
+        LASSERT (knal_data == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // dispatch forward API function 
+        
+        CDEBUG(D_NET,"kibnal_forward: function id = %d\n", id);
+
+        lib_dispatch(nal_cb, knal_data, id, args, ret); 
+
+        CDEBUG(D_TRACE,"IBNAL- Done kibnal_forward\n");
+
+        return PTL_OK; // always return PTL_OK
+}
+
+//
+// lock API  
+//
+void 
+kibnal_lock(nal_t *nal, unsigned long *flags)
+{
+        kibnal_data_t *knal_data = nal->nal_data;
+        nal_cb_t      *nal_cb = knal_data->kib_cb;
+
+        // ASSERT checking 
+        LASSERT (nal == &kibnal_api);
+        LASSERT (knal_data == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // disable logical interrrupt 
+        nal_cb->cb_cli(nal_cb,flags);
+
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_lock\n");
+
+}
+
+//
+// unlock API
+//
+void 
+kibnal_unlock(nal_t *nal, unsigned long *flags)
+{
+        kibnal_data_t *k = nal->nal_data;
+        nal_cb_t      *nal_cb = k->kib_cb;
+
+        // ASSERT checking
+        LASSERT (nal == &kibnal_api);
+        LASSERT (k == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // enable logical interrupt 
+        nal_cb->cb_sti(nal_cb,flags);
+
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_unlock");
+
+}
+
+//
+// shutdown API 
+//     showdown this network interface 
+//
+int
+kibnal_shutdown(nal_t *nal, int ni)
+{       
+        VAPI_ret_t          vstat;
+        kibnal_data_t *k = nal->nal_data;
+        nal_cb_t      *nal_cb = k->kib_cb;
+
+        // assert checking
+        LASSERT (nal == &kibnal_api);
+        LASSERT (k == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // take down this IB network interface 
+        // there is not corresponding cb function to hande this
+        // do we actually need this one 
+        // reference to IB network interface shutdown 
+        //
+        
+        vstat = IB_Close_HCA();
+
+        if (vstat != VAPI_OK) {
+           CERROR("Failed to close HCA  - %s\n",VAPI_strerror(vstat));
+           return (~PTL_OK);
+        }
+
+        CDEBUG(D_TRACE,"IBNAL- Done kibnal_shutdown\n");
+
+        return PTL_OK;
+}
+
+//
+// yield 
+// when do we call this yield function 
+//
+void 
+kibnal_yield( nal_t *nal )
+{
+        kibnal_data_t *k = nal->nal_data;
+        nal_cb_t      *nal_cb = k->kib_cb;
+        
+        // assert checking
+        LASSERT (nal == &kibnal_api);
+        LASSERT (k    == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // check under what condition that we need to 
+        // call schedule()
+        // who set this need_resched 
+        if (current->need_resched)
+                schedule();
+
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_yield");
+
+        return;
+}
+
+//
+// ibnal init 
+//
+nal_t *
+kibnal_init(int             interface, // no use here 
+            ptl_pt_index_t  ptl_size,
+            ptl_ac_index_t  ac_size, 
+            ptl_pid_t       requested_pid // no use here
+           )
+{
+  nal_t         *nal       = NULL;
+  nal_cb_t      *nal_cb    = NULL;
+  kibnal_data_t *nal_data  = NULL;
+  int            rc;
+
+  unsigned int nnids = 1; // number of nids 
+                          // do we know how many nodes are in this
+                          // system related to this kib_nid  
+                          //
+
+  CDEBUG(D_NET, "kibnal_init:calling lib_init with nid 0x%u\n",
+                  kibnal_data.kib_nid);
+
+
+  CDEBUG(D_NET, "kibnal_init: interface [%d], ptl_size [%d], ac_size[%d]\n", 
+                 interface, ptl_size, ac_size);
+  CDEBUG(D_NET, "kibnal_init: &kibnal_lib  0x%X\n", &kibnal_lib);
+  CDEBUG(D_NET, "kibnal_init: kibnal_data.kib_nid  %d\n", kibnal_data.kib_nid);
+
+  rc = lib_init(&kibnal_lib, 
+                kibnal_data.kib_nid, 
+                0, // process id is set as 0  
+                nnids,
+                ptl_size, 
+                ac_size);
+
+  if(rc != PTL_OK) {
+     CERROR("kibnal_init: Failed lib_init with nid 0x%u, rc=%d\n",
+                                  kibnal_data.kib_nid,rc);
+  }
+  else {
+      CDEBUG(D_NET,"kibnal_init: DONE lib_init with nid 0x%x%x\n",
+                                  kibnal_data.kib_nid);
+  }
+
+  return &kibnal_api;
+
+}
+
+
+//
+// called before remove ibnal kernel module 
+//
+void __exit 
+kibnal_finalize(void) 
+{ 
+        struct list_head *tmp;
+
+        inter_module_unregister("kibnal_ni");
+
+        // release resources allocated to this Infiniband network interface 
+        PtlNIFini(kibnal_ni); 
+
+        lib_fini(&kibnal_lib); 
+
+        IB_Close_HCA();
+
+        // how much do we need to do here?
+        list_for_each(tmp, &kibnal_data.kib_list) {
+                kibnal_rx_t *conn;
+                conn = list_entry(tmp, kibnal_rx_t, krx_item);
+                CDEBUG(D_IOCTL, "freeing conn %p\n",conn);
+                tmp = tmp->next;
+                list_del(&conn->krx_item);
+                PORTAL_FREE(conn, sizeof(*conn));
+        }
+
+        CDEBUG(D_MALLOC,"done kmem %d\n",atomic_read(&portal_kmemory));
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_finalize\n");
+
+        return;
+}
+
+
+//
+// * k_server_thread is a kernel thread 
+//   use a shared memory ro exchange HCA's data with a pthread in user 
+//   address space
+// * will be replaced when CM is used to handle communication management 
+//
+
+void k_server_thread(Remote_QP_Info *hca_data)
+{
+  int              segment_id;
+  const int        shared_segment_size = sizeof(Remote_QP_Info); 
+  key_t            key = HCA_EXCHANGE_SHM_KEY;
+  unsigned long    raddr;
+  int exchanged_done = NO;
+  int i;
+
+  Remote_QP_Info  *exchange_hca_data;
+
+  long *n;
+  long *uaddr;
+  long ret = 0;
+  // create a shared memory with pre-agreement key
+  segment_id =  sys_shmget(key,
+                           shared_segment_size,
+                           IPC_CREAT | 0666);
+
+
+  // attached to shared memoru 
+  // raddr is pointed to an user address space 
+  // use this address to update shared menory content 
+  ret = sys_shmat(segment_id, 0 , SHM_RND, &raddr);
+
+#ifdef IBNAL_DEBUG 
+  if(ret >= 0) {
+    CDEBUG(D_NET,"k_server_thread: Shared memory attach success ret = 0X%d,&raddr"
+                   " 0X%x (*(&raddr))=0x%x \n", ret, &raddr,  (*(&raddr)));
+    printk("k_server_thread: Shared memory attach success ret = 0X%d, &raddr"
+                   " 0X%x (*(&raddr))=0x%x \n", ret, &raddr,  (*(&raddr)));
+  }
+  else {
+    CERROR("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); 
+    printk("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); 
+    return;
+  }
+#endif
+
+  n = &raddr;
+  uaddr = *n; // get the U-address 
+  /* cast uaddr to exchange_hca_data */
+  exchange_hca_data = (Remote_QP_Info  *) uaddr; 
+  
+  /* copy data from local HCA to shared memory */
+  exchange_hca_data->opcode  = hca_data->opcode;
+  exchange_hca_data->length  = hca_data->length;
+
+  for(i=0; i < NUM_QPS; i++) {
+    exchange_hca_data->dlid[i]    = hca_data->dlid[i];
+    exchange_hca_data->rqp_num[i] = hca_data->rqp_num[i];
+  }
+
+  // periodically check shared memory until get updated 
+  // remote HCA's data from user mode pthread  
+  while(exchanged_done == NO) {
+    if(exchange_hca_data->opcode == RECV_QP_INFO){
+       exchanged_done = YES;
+       /* copy data to local buffer from shared memory */
+       hca_data->opcode  = exchange_hca_data->opcode;
+       hca_data->length  = exchange_hca_data->length;
+
+       for(i=0; i < NUM_QPS; i++) {
+         hca_data->dlid[i]    = exchange_hca_data->dlid[i];
+         hca_data->rqp_num[i] = exchange_hca_data->rqp_num[i];
+       }
+       break;
+    }
+    else { 
+       schedule_timeout(1000);
+    }
+  }
+  
+  // detached shared memory 
+  sys_shmdt(uaddr);
+
+  CDEBUG(D_NET, "Exit from kernel thread: k_server_thread \n");
+  printk("Exit from kernel thread: k_server_thread \n");
+
+  return;
+
+}
+
+//
+// create QP 
+// 
+VAPI_ret_t 
+create_qp(QP_info *qp, int qp_index)
+{
+
+  VAPI_ret_t          vstat;
+  VAPI_qp_init_attr_t qp_init_attr;
+  VAPI_qp_prop_t      qp_prop;
+
+  qp->hca_hndl = Hca_hndl;
+  qp->port     = 1; // default 
+  qp->slid     = Hca_port_1_props.lid;
+  qp->hca_port = Hca_port_1_props;
+
+
+  /* Queue Pair Creation Attributes */
+  qp_init_attr.cap.max_oust_wr_rq = NUM_WQE;
+  qp_init_attr.cap.max_oust_wr_sq = NUM_WQE;
+  qp_init_attr.cap.max_sg_size_rq = NUM_SG;
+  qp_init_attr.cap.max_sg_size_sq = NUM_SG;
+  qp_init_attr.pd_hndl            = qp->pd_hndl;
+  qp_init_attr.rdd_hndl           = 0;
+  qp_init_attr.rq_cq_hndl         = qp->rq_cq_hndl;
+  /* we use here polling */
+  //qp_init_attr.rq_sig_type        = VAPI_SIGNAL_REQ_WR;
+  qp_init_attr.rq_sig_type        = VAPI_SIGNAL_ALL_WR;
+  qp_init_attr.sq_cq_hndl         = qp->sq_cq_hndl;
+  /* we use here polling */
+  //qp_init_attr.sq_sig_type        = VAPI_SIGNAL_REQ_WR;
+  qp_init_attr.sq_sig_type        = VAPI_SIGNAL_ALL_WR;
+  // transport servce - reliable connection
+
+  qp_init_attr.ts_type            = VAPI_TS_RC;
+          
+  vstat = VAPI_create_qp(qp->hca_hndl,   
+                         &qp_init_attr,      
+                         &qp->qp_hndl, &qp_prop); 
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed creating QP. Return Failed - %s\n",VAPI_strerror(vstat));
+     return vstat;
+  }
+  
+  qp->qp_num = qp_prop.qp_num; // the qp number 
+  qp->last_posted_send_id  = 0; // user defined work request ID
+  qp->last_posted_rcv_id   = 0; // user defined work request ID
+  qp->cur_send_outstanding = 0;
+  qp->cur_posted_rcv_bufs  = 0;
+  qp->snd_rcv_balance      = 0;
+  
+  CDEBUG(D_OTHER, "create_qp: qp_num = %d, slid = %d, qp_hndl = 0X%X", 
+                  qp->qp_num, qp->slid, qp->qp_hndl);
+
+  // initialize spin-lock mutex variables
+  spin_lock_init(&(qp->snd_mutex));
+  spin_lock_init(&(qp->rcv_mutex));
+  spin_lock_init(&(qp->bl_mutex));
+  spin_lock_init(&(qp->cln_mutex));
+  // number of outstanding requests on the send Q
+  qp->cur_send_outstanding = 0; 
+  // number of posted receive buffers
+  qp->cur_posted_rcv_bufs  = 0;  
+  qp->snd_rcv_balance      = 0;
+
+  return(VAPI_OK);
+
+}
+
+//
+// initialize a UD qp state to RTR and RTS 
+//
+VAPI_ret_t 
+init_qp_UD(QP_info *qp, int qp_index)
+{
+  VAPI_qp_attr_t      qp_attr;
+  VAPI_qp_init_attr_t qp_init_attr;
+  VAPI_qp_attr_mask_t qp_attr_mask;
+  VAPI_qp_cap_t       qp_cap;
+  VAPI_ret_t       vstat;
+
+  /* Move from RST to INIT */
+  /* Change QP to INIT */
+
+  CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index);
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state = VAPI_INIT;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.pkey_ix  = 0;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX);
+
+  CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.port     = qp->port;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT);
+
+  CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.qkey = 0;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QKEY);
+
+  CDEBUG(D_OTHER, "qkey qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  /* If I do not set this mask, I get an error from HH. QPM should catch it */
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  CDEBUG(D_OTHER, "Modifying QP from RST to INIT.\n");
+
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Move from INIT to RTR */
+  /* Change QP to RTR */
+  CDEBUG(D_OTHER, "Changing QP state to RTR\n");
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state         = VAPI_RTR;  
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  CDEBUG(D_OTHER, "INIT to RTR- qp_state : qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from INIT to RTR. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+  
+  CDEBUG(D_OTHER, "Modifying QP from INIT to RTR.\n");
+  
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                                      
+  /* RTR to RTS - Change QP to RTS */
+  CDEBUG(D_OTHER, "Changing QP state to RTS\n");
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state        = VAPI_RTS;   
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+  
+  qp_attr.sq_psn          = START_SQ_PSN;          
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN);
+  
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RTR to RTS. %s:%s\n",
+                          VAPI_strerror_sym(vstat), 
+                          VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  CDEBUG(D_OTHER, "Modifying QP from RTR to RTS. \n");
+                     
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                        
+  //
+  // a QP is at RTS state NOW
+  //
+  CDEBUG(D_OTHER, "IBNAL- UD qp is at RTS NOW\n");
+  
+  return(vstat);
+
+}
+
+
+
+//
+// initialize a RC qp state to RTR and RTS 
+// RC transport service 
+//
+VAPI_ret_t 
+init_qp_RC(QP_info *qp, int qp_index)
+{
+  VAPI_qp_attr_t      qp_attr;
+  VAPI_qp_init_attr_t qp_init_attr;
+  VAPI_qp_attr_mask_t qp_attr_mask;
+  VAPI_qp_cap_t       qp_cap;
+  VAPI_ret_t       vstat;
+
+  /* Move from RST to INIT */
+  /* Change QP to INIT */
+  
+  CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index);
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state = VAPI_INIT;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+   CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.pkey_ix  = 0;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX);
+
+  CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.port     = qp->port;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT);
+
+  CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.remote_atomic_flags = VAPI_EN_REM_WRITE | VAPI_EN_REM_READ;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_REMOTE_ATOMIC_FLAGS);
+
+  CDEBUG(D_OTHER, "remote_atomic_flags qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  /* If I do not set this mask, I get an error from HH. QPM should catch it */
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Move from INIT to RTR */
+  /* Change QP to RTR */
+  CDEBUG(D_OTHER, "Changing QP state to RTR qp_indexi %d\n", qp_index);
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+  qp_attr.qp_state         = VAPI_RTR;  
+
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.av.sl            = 0;/* RESPONDER_SL */
+  qp_attr.av.grh_flag      = FALSE;
+  qp_attr.av.dlid          = qp->dlid;/*RESPONDER_LID;*/
+  qp_attr.av.static_rate   = 0;
+  qp_attr.av.src_path_bits = 0;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_AV);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.path_mtu         = MTU_2048;// default is MTU_2048             
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PATH_MTU);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.rq_psn           = START_RQ_PSN;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.qp_ous_rd_atom   = NUM_WQE;        
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_OUS_RD_ATOM);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.pkey_ix          = 0;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.min_rnr_timer    = 10;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.dest_qp_num = qp->rqp_num;                   
+
+  CDEBUG(D_OTHER, "remore qp num %d\n",  qp->rqp_num);
+
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_DEST_QP_NUM);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from INIT to RTR. qp_index %d - %s\n",
+                                                qp_index, VAPI_strerror(vstat));
+     return(vstat);
+  }
+  
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                                      
+  /* RTR to RTS - Change QP to RTS */
+  CDEBUG(D_OTHER, "Changing QP state to RTS\n");
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state        = VAPI_RTS;   
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  qp_attr.sq_psn          = START_SQ_PSN;          
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN);
+
+  qp_attr.timeout         = 0x18;         
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_TIMEOUT);
+
+  qp_attr.retry_count     = 10;         
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RETRY_COUNT);
+
+  qp_attr.rnr_retry       = 14;         
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RNR_RETRY);
+
+  qp_attr.ous_dst_rd_atom = 100;        
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_OUS_DST_RD_ATOM);
+
+  qp_attr.min_rnr_timer   = 5;          
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER);
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RTR to RTS. %s:%s\n",
+                   VAPI_strerror_sym(vstat), VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                        
+  //
+  // a QP is at RTS state NOW
+  //
+   CDEBUG(D_OTHER, "IBNAL- RC qp is at RTS NOW\n");
+  
+  return(vstat);
+}
+
+
+
+VAPI_ret_t 
+IB_Open_HCA(kibnal_data_t *kib_data)
+{
+
+  VAPI_ret_t     vstat;
+  VAPI_cqe_num_t cqe_active_num;
+  QP_info        *qp; 
+  int            i;
+  int            Num_posted_recv_buf;
+
+  /* Open HCA */
+  CDEBUG(D_PORTALS, "Opening an HCA\n");
+
+  vstat = VAPI_open_hca(HCA_ID, &Hca_hndl);
+  vstat = EVAPI_get_hca_hndl(HCA_ID, &Hca_hndl);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed opening the HCA: %s. %s...\n",HCA_ID,VAPI_strerror(vstat));
+     return(vstat);
+  } 
+
+  /* Get HCA CAP */
+  vstat = VAPI_query_hca_cap(Hca_hndl, &Hca_vendor, &Hca_cap);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query hca cap %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Get port 1 info */
+  vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_1 , &Hca_port_1_props);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query port cap %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }      
+
+  /* Get port 2 info */
+  vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_2, &Hca_port_2_props);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query port cap %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }      
+
+  // Get a PD 
+  CDEBUG(D_PORTALS, "Allocating PD \n");
+  vstat = VAPI_alloc_pd(Hca_hndl,&Pd_hndl);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed allocating a PD. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  vstat = createMemRegion(Hca_hndl, Pd_hndl);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed registering a memory region.%s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Create CQ for RQ*/
+  CDEBUG(D_PORTALS, "Creating a send completion queue\n");
+
+  vstat = VAPI_create_cq(Hca_hndl,    
+                         NUM_CQE,    
+                         &Cq_hndl, 
+                         &cqe_active_num);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed creating a CQ. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  if(NUM_CQE == cqe_active_num) {
+    CERROR("VAPI_create_cq: NUM_CQE EQ cqe_active_num \n");
+  }
+  else {
+    CDEBUG(D_NET, "VAPI_create_cq: NUM_CQE %d , actual cqe_active_num %d \n",
+                   NUM_CQE, cqe_active_num);
+  }
+
+  Cq_SQ_hndl     = Cq_hndl;
+  Cq_RQ_hndl     = Cq_hndl;
+
+  //
+  // create  QPs 
+  //
+  for(i=0; i < NUM_QPS; i++) {
+      QP_list[i].pd_hndl    = Pd_hndl;
+      QP_list[i].hca_hndl   = Hca_hndl;
+      // sq rq use the same Cq_hndl 
+      QP_list[i].sq_cq_hndl = Cq_hndl; 
+      QP_list[i].rq_cq_hndl = Cq_hndl;
+      vstat = create_qp(&QP_list[i], i);
+      if (vstat != VAPI_OK) {
+         CERROR("Failed creating a QP %d %s\n",i, VAPI_strerror(vstat));
+         return(vstat);
+      }
+  }      
+
+  //
+  // record HCA data 
+  //
+
+  Hca_data.hca_hndl     = Hca_hndl;      // HCA handle
+  Hca_data.pd_hndl      = Pd_hndl;       // protection domain
+  Hca_data.port         = 1;             // port number
+  Hca_data.num_qp       = NUM_QPS;        // number of qp used
+
+  for(i=0; i < NUM_QPS; i++) {
+    Hca_data.qp_ptr[i]    = &QP_list[i];   // point to QP_list
+  }
+
+  Hca_data.num_cq       = NUM_CQ;        // number of cq used
+  Hca_data.cq_hndl      = Cq_hndl;       // 
+  Hca_data.sq_cq_hndl   = Cq_SQ_hndl;    // 
+  Hca_data.rq_cq_hndl   = Cq_RQ_hndl;    // 
+  Hca_data.kib_data     = kib_data;       //
+  Hca_data.slid         = QP_list[0].slid;//
+
+  // prepare L_QP_data
+
+#ifdef USE_SHARED_MEMORY_AND_SOCKET
+
+  /*
+   *  + use a shared-memory between a user thread and a kernel thread 
+   *    for HCA's data exchange on the same node  
+   *  + use socket in user mode to exhange HCA's data with a remote node 
+   */
+
+  
+  R_QP_data.opcode  = SEND_QP_INFO;
+  R_QP_data.length  = sizeof(L_QP_data);
+
+  for(i=0; i < NUM_QPS; i++) {
+    // my slid  will be used in a remote node as dlid 
+    R_QP_data.dlid[i]    = QP_list[i].slid;
+    // my qp_num will be used in remode node as remote_qp_number 
+    // RC is used here so we need dlid and rqp_num  
+    R_QP_data.rqp_num[i] = QP_list[i].qp_num ;
+  }
+
+  // create a kernel thread for exchanging HCA's data 
+  // R_QP_data will be exchanged with a remoe node
+
+  kernel_thread(k_server_thread, &R_QP_data, 0); // 
+  // check if the HCA'data have been updated by kernel_thread 
+  // loop until the HCA's data is updated 
+  // make sure that uagent is running 
+  
+  // QP info is exchanged with a remote node   
+  while (1) {
+    schedule_timeout(1000);
+    if(R_QP_data.opcode ==  RECV_QP_INFO) {
+       CDEBUG(D_NET, "HCA's data is being updated\n");
+       break;
+   }
+  }
+#endif
+
+#ifdef USE_SHARED_MEMORY_AND_MULTICAST
+
+  /*
+   *  + use a shared-memory between a user thread and a kernel thread 
+   *    for HCA's data exchange on the same node  
+   *  + use Infinoband UR/multicast in user mode to exhange HCA's data with i
+   *    a remote node 
+   */
+
+  // use CM, opemSM 
+  
+#endif
+
+  // 
+  for(i=0; i < NUM_QPS; i++) {
+     qp = (QP_info *) &QP_list[i];
+     QP_list[i].rqp_num = R_QP_data.rqp_num[i]; // remoter qp number 
+     QP_list[i].dlid    = R_QP_data.dlid[i];    // remote dlid 
+  }
+
+  // already have remote_qp_num adn dlid information
+  // initialize QP to RTR/RTS state 
+  //
+  for(i=0; i < NUM_QPS; i++) {
+    vstat = init_qp_RC(&QP_list[i], i);
+    if (vstat != VAPI_OK) {
+       CERROR("Failed change a QP %d to RTS state%s\n",
+                    i,VAPI_strerror(vstat));
+       return(vstat);
+    }
+  }
+
+  // post receiving buffer before any send happened 
+  
+  Num_posted_recv_buf = post_recv_bufs( (VAPI_wr_id_t ) START_RECV_WRQ_ID); 
+
+  // for irregular completion event or some unexpected failure event 
+  vstat = IB_Set_Async_Event_Handler(Hca_data, &kibnal_data);
+  if (vstat != VAPI_OK) {
+     CERROR("IB_Set_Async_Event_Handler failed: %d\n", vstat);
+     return vstat;
+  }
+
+
+  CDEBUG(D_PORTALS, "IBNAL- done with IB_Open_HCA\n");
+
+  for(i=0;  i < NUM_MBUF; i++) {
+    spin_lock_init(&MSB_mutex[i]);
+  }
+
+  return(VAPI_OK);
+
+}
+
+
+/* 
+  Function:  IB_Set_Event_Handler()
+             
+             IN   Hca_info hca_data
+             IN   kibnal_data_t *kib_data  -- private data      
+             OUT  NONE
+
+        return: VAPI_OK - success
+                else    - fail 
+
+*/
+
+VAPI_ret_t 
+IB_Set_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data)
+{
+  VAPI_ret_t vstat;
+  EVAPI_compl_handler_hndl_t   comp_handler_hndl;
+
+  // register CQE_Event_Hnadler 
+  // VAPI function 
+  vstat = VAPI_set_comp_event_handler(hca_data.hca_hndl,
+                                      CQE_event_handler,
+                                      &hca_data);
+
+  /*
+  or use extended VAPI function 
+  vstat = EVAPI_set_comp_eventh(hca_data.hca_hndl,
+                                hca_data.cq_hndl,
+                                CQE_event_handler,
+                                &hca_data,
+                                &comp_handler_hndl
+                                );
+  */
+                                    
+  if (vstat != VAPI_OK) {
+      CERROR("IB_Set_Event_Handler: failed EVAPI_set_comp_eventh for"
+             " HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat));
+      return vstat;
+  }
+
+  // issue a request for completion ievent notification 
+  vstat = VAPI_req_comp_notif(hca_data.hca_hndl, 
+                              hca_data.cq_hndl,
+                              VAPI_NEXT_COMP); 
+
+  if (vstat != VAPI_OK) {
+      CERROR("IB_Set_Event_Handler: failed VAPI_req_comp_notif for HCA ID"
+             " = %s (%s).\n", HCA_ID, VAPI_strerror(vstat));
+  }
+
+  return vstat;
+}
+
+
+
+/* 
+  Function:  IB_Set_Async_Event_Handler()
+             
+             IN   HCA_info hca_data
+             IN   kibnal_data_t *kib_data -- private data      
+             OUT  NONE
+
+        return: VAPI_OK - success
+                else    - fail 
+
+*/
+
+
+VAPI_ret_t 
+IB_Set_Async_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data)
+{
+  VAPI_ret_t    vstat;
+
+  //
+  // register an asynchronous event handler for this HCA 
+  //
+
+  vstat= VAPI_set_async_event_handler(hca_data.hca_hndl,
+                                      async_event_handler, 
+                                      kib_data);
+
+  if (vstat != VAPI_OK) {
+      CERROR("IB_Set_Async_Event_Handler: failed VAPI_set_async_comp_event_handler"
+             " for HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat));
+  }
+
+  return vstat;
+}
+
+//
+// IB_Close_HCA
+// close this Infiniband HCA interface 
+// release allocated resources to system 
+//
+VAPI_ret_t 
+IB_Close_HCA(void )
+{
+        
+  VAPI_ret_t  vstat;
+  int         ok = 1;
+  int         i;
+            
+  /* Destroy QP */
+  CDEBUG(D_PORTALS, "Destroying QP\n");
+
+  for(i=0; i < NUM_QPS; i++) {
+     vstat = VAPI_destroy_qp(QP_list[i].hca_hndl, QP_list[i].qp_hndl);
+     if (vstat != VAPI_OK) {
+        CERROR("Failed destroying QP %d. %s\n", i, VAPI_strerror(vstat));
+        ok = 0;
+     }
+  }
+
+  if (ok) {
+     /* Destroy CQ */
+     CDEBUG(D_PORTALS, "Destroying CQ\n");
+     for(i=0; i < NUM_QPS; i++) {
+        // send_cq adn receive_cq are shared the same CQ
+        // so only destroy one of them 
+        vstat = VAPI_destroy_cq(QP_list[i].hca_hndl, QP_list[i].sq_cq_hndl);
+        if (vstat != VAPI_OK) {
+           CERROR("Failed destroying CQ %d. %s\n", i, VAPI_strerror(vstat));
+           ok = 0;
+        }
+     }
+  }
+
+  if (ok) {
+     /* Destroy Memory Region */
+     CDEBUG(D_PORTALS, "Deregistering MR\n");
+     for(i=0; i < NUM_QPS; i++) {
+        vstat = deleteMemRegion(&QP_list[i], i);
+        if (vstat != VAPI_OK) {
+           CERROR("Failed deregister mem reg %d. %s\n",i, VAPI_strerror(vstat));
+           ok = 0;
+           break;
+        }
+     }
+  }
+
+  if (ok) {
+     // finally 
+     /* Close HCA */
+     CDEBUG(D_PORTALS, "Closing HCA\n");
+     vstat = VAPI_close_hca(Hca_hndl);
+     if (vstat != VAPI_OK) {
+        CERROR("Failed to close HCA. %s\n", VAPI_strerror(vstat));
+        ok = 0;
+     }
+  }
+
+  CDEBUG(D_PORTALS, "IBNAL- Done with closing HCA \n");
+  
+  return vstat; 
+}
+
+
+VAPI_ret_t 
+createMemRegion(VAPI_hca_hndl_t hca_hndl, 
+                   VAPI_pd_hndl_t  pd_hndl) 
+{
+  VAPI_ret_t  vstat;
+  VAPI_mrw_t  mrw;
+  VAPI_mrw_t  rep_mr;   
+  VAPI_mr_hndl_t   rep_mr_hndl;
+  int         buf_size;
+  char        *bufptr;
+  int         i;
+
+  // send registered memory region 
+  for(i=0; i < NUM_ENTRY; i++) {
+    MSbuf_list[i].buf_size = KB_32; 
+    PORTAL_ALLOC(bufptr, MSbuf_list[i].buf_size);
+    if(bufptr == NULL) {
+       CDEBUG(D_MALLOC,"Failed to malloc a block of send memory, qix %d size %d\n",
+                                          i, MSbuf_list[i].buf_size);
+       CERROR("Failed to malloc a block of send memory, qix %d size %d\n",
+                                          i, MSbuf_list[i].buf_size);
+       return(VAPI_ENOMEM);
+    }
+
+    mrw.type   = VAPI_MR; 
+    mrw.pd_hndl= pd_hndl;
+    mrw.start  = MSbuf_list[i].buf_addr = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr;
+    mrw.size   = MSbuf_list[i].buf_size;
+    mrw.acl    = VAPI_EN_LOCAL_WRITE  | 
+                 VAPI_EN_REMOTE_WRITE | 
+                 VAPI_EN_REMOTE_READ;
+
+    // register send memory region  
+    vstat = VAPI_register_mr(hca_hndl, 
+                             &mrw, 
+                             &rep_mr_hndl, 
+                             &rep_mr);
+
+    // this memory region is going to be reused until deregister is called 
+    if(vstat != VAPI_OK) {
+       CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n",
+                          i, mrw.start, mrw.size, VAPI_strerror(vstat));
+       return(vstat);
+    }
+
+    MSbuf_list[i].mr        = rep_mr;
+    MSbuf_list[i].mr_hndl   = rep_mr_hndl;
+    MSbuf_list[i].bufptr    = bufptr;
+    MSbuf_list[i].buf_addr  = rep_mr.start;
+    MSbuf_list[i].status    = BUF_REGISTERED;
+    MSbuf_list[i].ref_count = 0;
+    MSbuf_list[i].buf_type  = REG_BUF;
+    MSbuf_list[i].raddr     = 0x0;
+    MSbuf_list[i].rkey      = 0x0;
+  }
+
+  // RDAM buffer is not reserved for RDAM WRITE/READ
+  
+  for(i=NUM_ENTRY; i< NUM_MBUF; i++) {
+    MSbuf_list[i].status    = BUF_UNREGISTERED;
+    MSbuf_list[i].buf_type  = RDMA_BUF;
+  }
+
+
+  // recv registered memory region 
+  for(i=0; i < NUM_ENTRY; i++) {
+    MRbuf_list[i].buf_size = KB_32; 
+    PORTAL_ALLOC(bufptr, MRbuf_list[i].buf_size);
+
+    if(bufptr == NULL) {
+       CDEBUG(D_MALLOC, "Failed to malloc a block of send memory, qix %d size %d\n",
+                      i, MRbuf_list[i].buf_size);
+       return(VAPI_ENOMEM);
+    }
+
+    mrw.type   = VAPI_MR; 
+    mrw.pd_hndl= pd_hndl;
+    mrw.start  = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr;
+    mrw.size   = MRbuf_list[i].buf_size;
+    mrw.acl    = VAPI_EN_LOCAL_WRITE  | 
+                 VAPI_EN_REMOTE_WRITE | 
+                 VAPI_EN_REMOTE_READ;
+
+    // register send memory region  
+    vstat = VAPI_register_mr(hca_hndl, 
+                             &mrw, 
+                             &rep_mr_hndl, 
+                             &rep_mr);
+
+    // this memory region is going to be reused until deregister is called 
+    if(vstat != VAPI_OK) {
+       CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n",
+                          i, mrw.start, mrw.size, VAPI_strerror(vstat));
+       return(vstat);
+    }
+
+    MRbuf_list[i].mr        = rep_mr;
+    MRbuf_list[i].mr_hndl   = rep_mr_hndl;
+    MRbuf_list[i].bufptr    = bufptr;
+    MRbuf_list[i].buf_addr  = rep_mr.start;
+    MRbuf_list[i].status    = BUF_REGISTERED;
+    MRbuf_list[i].ref_count = 0;
+    MRbuf_list[i].buf_type  = REG_BUF;
+    MRbuf_list[i].raddr     = 0x0;
+    MRbuf_list[i].rkey      = rep_mr.r_key;
+    MRbuf_list[i].lkey      = rep_mr.l_key;
+  
+  }
+  // keep extra information for a qp 
+  for(i=0; i < NUM_QPS; i++) {
+    QP_list[i].mr_hndl    = MSbuf_list[i].mr_hndl; 
+    QP_list[i].mr         = MSbuf_list[i].mr;
+    QP_list[i].bufptr     = MSbuf_list[i].bufptr;
+    QP_list[i].buf_addr   = MSbuf_list[i].buf_addr;
+    QP_list[i].buf_size   = MSbuf_list[i].buf_size;
+    QP_list[i].raddr      = MSbuf_list[i].raddr;
+    QP_list[i].rkey       = MSbuf_list[i].rkey;
+    QP_list[i].lkey       = MSbuf_list[i].lkey;
+  }
+
+  CDEBUG(D_PORTALS, "IBNAL- done VAPI_ret_t createMemRegion \n");
+
+  return vstat;
+
+} /* createMemRegion */
+
+
+
+VAPI_ret_t  
+deleteMemRegion(QP_info *qp, int qix)
+{
+  VAPI_ret_t  vstat;
+
+  //
+  // free send memory assocaited with this memory region  
+  //
+  PORTAL_FREE(MSbuf_list[qix].bufptr, MSbuf_list[qix].buf_size);
+
+  // de-register it 
+  vstat =  VAPI_deregister_mr(qp->hca_hndl, MSbuf_list[qix].mr_hndl);
+
+  if(vstat != VAPI_OK) {
+     CERROR("Failed deregistering a send mem region qix %d %s\n",
+                         qix, VAPI_strerror(vstat));
+     return vstat;
+  }
+
+  //
+  // free recv memory assocaited with this memory region  
+  //
+  PORTAL_FREE(MRbuf_list[qix].bufptr, MRbuf_list[qix].buf_size);
+
+  // de-register it 
+  vstat =  VAPI_deregister_mr(qp->hca_hndl, MRbuf_list[qix].mr_hndl);
+
+  if(vstat != VAPI_OK) {
+     CERROR("Failed deregistering a recv mem region qix %d %s\n",
+                         qix, VAPI_strerror(vstat));
+     return vstat;
+  }
+
+  return vstat;
+}
+
+
+//
+// polling based event handling 
+// + a daemon process
+// + poll the CQ and check what is in the CQ 
+// + process incoming CQ event
+// + 
+//
+
+
+RDMA_Info_Exchange   Rdma_info;
+int                  Cts_Message_arrived = NO;
+
+void k_recv_thread(HCA_info *hca_data)
+{
+ VAPI_ret_t       vstat; 
+ VAPI_wc_desc_t   comp_desc;   
+ unsigned long    polling_count = 0;
+ u_int32_t        timeout_usec;
+ unsigned int     priority = 100;
+ unsigned int     length;
+ VAPI_wr_id_t     wrq_id;
+ u_int32_t        transferred_data_length; /* Num. of bytes transferred */
+ void             *bufdata;
+ VAPI_virt_addr_t bufaddr;
+ unsigned long    buf_size = 0;
+ QP_info          *qp;       // point to QP_list
+
+ kportal_daemonize("k_recv_thread"); // make it as a daemon process 
+
+ // tuning variable 
+ timeout_usec = 100; // how is the impact on the performance
+
+ // send Q and receive Q are using the same CQ 
+ // so only poll one CQ for both operations 
+ CDEBUG(D_NET, "IBNAL- enter kibnal_recv_thread\n");
+ CDEBUG(D_NET, "hca_hndl = 0X%x, cq_hndl=0X%x\n", 
+                         hca_data->hca_hndl,hca_data->cq_hndl); 
+
+ qp = hca_data->qp_ptr;
+ if(qp == NULL) {
+   CDEBUG(D_NET, "in recv_thread qp is NULL\n");
+   CDEBUG(D_NET, "Exit from  recv_thread qp is NULL\n");
+   return; 
+ }
+ else {
+   CDEBUG(D_NET, "in recv_thread qp is 0X%X\n", qp);
+ }
+
+ CDEBUG(D_NET, "kibnal_recv_thread - enter event driver polling loop\n");
+
+ //
+ // use event driver 
+ //
+
+
+ while(1) {
+    polling_count++;
+
+    //
+    // send Q and receive Q are using the same CQ 
+    // so only poll one CQ for both operations 
+    //
+
+    vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc);                      
+
+    if (vstat == VAPI_CQ_EMPTY) { 
+      // there is no event in CQE 
+      continue;
+    } 
+    else {
+      if (vstat != (VAPI_OK)) {
+        CERROR("error while polling completion queuei vstat %d \n", vstat);
+        return; 
+      }
+    }
+
+    // process the complete event 
+    switch(comp_desc.opcode) {
+      case   VAPI_CQE_SQ_SEND_DATA:
+        // about the Send Q ,POST SEND completion 
+        // who needs this information
+        // get wrq_id
+        // mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+               
+        wrq_id = comp_desc.id;
+
+        if(RDMA_OP_ID < wrq_id) {
+          // this RDMA message id, adjust it to the right entry       
+          wrq_id = wrq_id - RDMA_OP_ID;
+          vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.send_rdma_mr_hndl);
+        }
+        
+        if(vstat != VAPI_OK) {
+            CERROR("VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMAi recv"                   " mem region %s\n", VAPI_strerror(vstat));
+        }
+
+        if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) {
+          // RTS or CTS send complete, release send buffer 
+          if(wrq_id >= RDMA_RTS_ID)
+            wrq_id = wrq_id - RDMA_RTS_ID;
+          else 
+            wrq_id = wrq_id - RDMA_CTS_ID;
+        }
+
+        spin_lock(&MSB_mutex[(int) wrq_id]);
+        MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+        spin_unlock(&MSB_mutex[(int) wrq_id]);
+
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n");  
+        break;
+
+      case   VAPI_CQE_SQ_RDMA_WRITE:
+        // about the Send Q,  RDMA write completion 
+        // who needs this information
+        // data is successfully write from pource to  destionation 
+             
+        //  get wr_id
+        //  mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+        //  de-register  rdma buffer 
+        //
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n");  
+        break;
+
+      case   VAPI_CQE_SQ_RDMA_READ:
+        // about the Send Q
+        // RDMA read completion 
+        // who needs this information
+        // data is successfully read from destionation to source 
+        CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n");  
+        break;
+
+      case   VAPI_CQE_SQ_COMP_SWAP:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n");  
+        break;
+
+      case   VAPI_CQE_SQ_FETCH_ADD:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n");  
+        break;
+
+      case   VAPI_CQE_SQ_BIND_MRW:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n");  
+        break;
+
+      case   VAPI_CQE_RQ_SEND_DATA:
+        // about the Receive Q
+        // process the incoming data and
+        // forward it to .....
+        // a completion recevie event is arriving at CQ 
+        // issue a recevie to get this arriving data out from CQ 
+        // pass the receiving data for further processing 
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n");  
+        wrq_id = comp_desc.id ;
+        transferred_data_length = comp_desc.byte_len;
+             
+        if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) {
+          // this is RTS/CTS message 
+          // process it locally and don't pass it to portals layer 
+          // adjust wrq_id to get the right entry in MRbfu_list 
+                   
+          if(wrq_id >= RDMA_RTS_ID)
+            wrq_id = wrq_id - RDMA_RTS_ID;
+          else 
+            wrq_id = wrq_id - RDMA_CTS_ID;
+
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr; 
+          MRbuf_list[wrq_id].status = BUF_INUSE; 
+          memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange));    
+        
+          if(Ready_To_send == Rdma_info.opcode) 
+            // an RTS request message from remote node 
+            // prepare local RDMA buffer and send local rdma info to
+            // remote node 
+            CTS_handshaking_protocol(&Rdma_info);
+          else 
+            if((Clear_To_send == Rdma_info.opcode) && 
+                              (RDMA_BUFFER_RESERVED == Rdma_info.flag))
+               Cts_Message_arrived = YES;
+            else 
+              if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) 
+                  CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n");
+        }
+        else {
+          //
+          // this is an incoming mesage for portals layer 
+          // move to PORTALS layer for further processing 
+          //
+                     
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t)
+                                       MRbuf_list[wrq_id].buf_addr; 
+
+          MRbuf_list[wrq_id].status = BUF_INUSE; 
+          transferred_data_length = comp_desc.byte_len;
+
+          kibnal_rx(hca_data->kib_data, 
+                    bufaddr, 
+                    transferred_data_length, 
+                    MRbuf_list[wrq_id].buf_size, 
+                    priority); 
+        }
+
+        // repost this receiving buffer and makr it at BUF_REGISTERED 
+
+        vstat = repost_recv_buf(qp, wrq_id);
+        if(vstat != (VAPI_OK)) {
+          CERROR("error while polling completion queue\n");
+        }
+        else {
+          MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+        }
+
+        break;
+
+      case   VAPI_CQE_RQ_RDMA_WITH_IMM:
+        // about the Receive Q
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+
+        wrq_id = comp_desc.id ;
+        transferred_data_length = comp_desc.byte_len;
+             
+        if(wrq_id ==  RDMA_OP_ID) {
+          // this is RDAM op , locate the RDAM memory buffer address   
+               
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr;
+
+          transferred_data_length = comp_desc.byte_len;
+
+          kibnal_rx(hca_data->kib_data, 
+                    bufaddr, 
+                    transferred_data_length, 
+                    Local_rdma_info.buf_length, 
+                    priority); 
+
+          // de-regiser this RDAM receiving memory buffer
+          // too early ??    test & check 
+          vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl);
+          if(vstat != VAPI_OK) {
+            CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA"
+                   " recv  mem region %s\n", VAPI_strerror(vstat));
+          }
+        }
+
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+        break;
+
+      case   VAPI_CQE_INVAL_OPCODE:
+        //
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n");  
+        break;
+
+      default :
+        CDEBUG(D_NET, "CQE opcode-unknown opcode\n");  
+             break;
+    } // switch 
+    
+    schedule_timeout(RECEIVING_THREAD_TIMEOUT);//how often do we need to poll CQ 
+
+  }// receiving while loop
+
+
+}
+
+
+void CQE_event_handler(VAPI_hca_hndl_t hca_hndl, 
+                       VAPI_cq_hndl_t  cq_hndl, 
+                       void           *private)
+{
+ VAPI_ret_t       vstat; 
+ VAPI_wc_desc_t   comp_desc;   
+ unsigned long    polling_count = 0;
+ u_int32_t        timeout_usec;
+ unsigned int     priority = 100;
+ unsigned int     length;
+ VAPI_wr_id_t     wrq_id;
+ u_int32_t        transferred_data_length; /* Num. of bytes transferred */
+ void             *bufdata;
+ VAPI_virt_addr_t bufaddr;
+ unsigned long    buf_size = 0;
+ QP_info          *qp;       // point to QP_list
+ HCA_info         *hca_data;
+
+ // send Q and receive Q are using the same CQ 
+ // so only poll one CQ for both operations 
+ CDEBUG(D_NET, "IBNAL- enter CQE_event_handler\n");
+ printk("IBNAL- enter CQE_event_handler\n");
+
+ hca_data  = (HCA_info *) private; 
+
+ //
+ // use event driven  
+ //
+
+ vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc);   
+
+ if (vstat == VAPI_CQ_EMPTY) { 
+   CDEBUG(D_NET, "CQE_event_handler: there is no event in CQE, how could"
+                  " this " "happened \n");
+   printk("CQE_event_handler: there is no event in CQE, how could"
+                  " this " "happened \n");
+
+ } 
+ else {
+   if (vstat != (VAPI_OK)) {
+     CDEBUG(D_NET, "error while polling completion queue vstat %d - %s\n", 
+                vstat, VAPI_strerror(vstat));
+     printk("error while polling completion queue vstat %d - %s\n", 
+                                               vstat, VAPI_strerror(vstat));
+     return; 
+   }
+ }
+
+ // process the complete event 
+ switch(comp_desc.opcode) {
+    case   VAPI_CQE_SQ_SEND_DATA:
+      // about the Send Q ,POST SEND completion 
+      // who needs this information
+      // get wrq_id
+      // mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+               
+      wrq_id = comp_desc.id;
+
+#ifdef IBNAL_SELF_TESTING
+      if(wrq_id == SEND_RECV_TEST_ID) {
+        printk("IBNAL_SELF_TESTING - VAPI_CQE_SQ_SEND_DATA \n"); 
+      }
+#else  
+      if(RDMA_OP_ID < wrq_id) {
+        // this RDMA message id, adjust it to the right entry       
+        wrq_id = wrq_id - RDMA_OP_ID;
+        vstat = VAPI_deregister_mr(qp->hca_hndl, 
+                                   Local_rdma_info.send_rdma_mr_hndl);
+      }
+
+      if(vstat != VAPI_OK) {
+        CERROR(" VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMA"
+               " recv  mem region %s\n", VAPI_strerror(vstat));
+      }
+
+      if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) {
+        // RTS or CTS send complete, release send buffer 
+        if(wrq_id >= RDMA_RTS_ID)
+          wrq_id = wrq_id - RDMA_RTS_ID;
+        else 
+          wrq_id = wrq_id - RDMA_CTS_ID;
+      }
+
+      spin_lock(&MSB_mutex[(int) wrq_id]);
+      MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+      spin_unlock(&MSB_mutex[(int) wrq_id]);
+#endif 
+
+      CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n");  
+
+      break;
+
+    case   VAPI_CQE_SQ_RDMA_WRITE:
+      // about the Send Q,  RDMA write completion 
+      // who needs this information
+      // data is successfully write from pource to  destionation 
+             
+      //  get wr_id
+      //  mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+      //  de-register  rdma buffer 
+      //
+             
+       CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n");  
+       break;
+
+      case   VAPI_CQE_SQ_RDMA_READ:
+        // about the Send Q
+        // RDMA read completion 
+        // who needs this information
+        // data is successfully read from destionation to source 
+         CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n");  
+         break;
+
+      case   VAPI_CQE_SQ_COMP_SWAP:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+            
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n");  
+        break;
+
+      case   VAPI_CQE_SQ_FETCH_ADD:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n");  
+        break;
+
+      case   VAPI_CQE_SQ_BIND_MRW:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n");  
+        break;
+
+      case   VAPI_CQE_RQ_SEND_DATA:
+        // about the Receive Q
+        // process the incoming data and
+        // forward it to .....
+        // a completion recevie event is arriving at CQ 
+        // issue a recevie to get this arriving data out from CQ 
+        // pass the receiving data for further processing 
+         
+         CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n");  
+          
+         wrq_id = comp_desc.id ;
+
+#ifdef IBNAL_SELF_TESTING
+
+      char        rbuf[KB_32];
+      int i;
+
+      if(wrq_id == SEND_RECV_TEST_ID) {
+        printk("IBNAL_SELF_TESTING - VAPI_CQE_RQ_SEND_DATA\n"); 
+      }
+
+      bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) 
+                       MRbuf_list[ SEND_RECV_TEST_BUF_ID].buf_addr; 
+      MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_INUSE; 
+      memcpy(&rbuf, &bufaddr, KB_32);    
+      
+
+      for(i=0; i < 16; i++)
+              printk("rbuf[%d]=%c, ", rbuf[i]);
+      printk("\n");
+
+      // repost this receiving buffer and makr it at BUF_REGISTERED 
+      vstat = repost_recv_buf(qp,SEND_RECV_TEST_BUF_ID);
+      if(vstat != (VAPI_OK)) {
+        printk("error while polling completion queue\n");
+      }
+      else {
+        MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_REGISTERED; 
+      }
+#else  
+         transferred_data_length = comp_desc.byte_len;
+             
+         if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) {
+           // this is RTS/CTS message 
+           // process it locally and don't pass it to portals layer 
+           // adjust wrq_id to get the right entry in MRbfu_list 
+                   
+           if(wrq_id >= RDMA_RTS_ID)
+             wrq_id = wrq_id - RDMA_RTS_ID;
+           else 
+             wrq_id = wrq_id - RDMA_CTS_ID;
+
+           bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) 
+                                           MRbuf_list[wrq_id].buf_addr; 
+           MRbuf_list[wrq_id].status = BUF_INUSE; 
+           memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange));    
+        
+           if(Ready_To_send == Rdma_info.opcode) 
+             // an RTS request message from remote node 
+             // prepare local RDMA buffer and send local rdma info to
+             // remote node 
+             CTS_handshaking_protocol(&Rdma_info);
+           else 
+             if((Clear_To_send == Rdma_info.opcode) && 
+                                (RDMA_BUFFER_RESERVED == Rdma_info.flag))
+               Cts_Message_arrived = YES;
+             else 
+               if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) 
+                 CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n");
+         }
+         else {
+           //
+           // this is an incoming mesage for portals layer 
+           // move to PORTALS layer for further processing 
+           //
+                     
+           bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t)
+                                MRbuf_list[wrq_id].buf_addr; 
+
+           MRbuf_list[wrq_id].status = BUF_INUSE; 
+           transferred_data_length = comp_desc.byte_len;
+
+           kibnal_rx(hca_data->kib_data, 
+                     bufaddr, 
+                     transferred_data_length, 
+                     MRbuf_list[wrq_id].buf_size, 
+                     priority); 
+         }
+
+         // repost this receiving buffer and makr it at BUF_REGISTERED 
+         vstat = repost_recv_buf(qp, wrq_id);
+         if(vstat != (VAPI_OK)) {
+           CERROR("error while polling completion queue\n");
+         }
+         else {
+           MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+         }
+#endif
+
+         break;
+
+      case   VAPI_CQE_RQ_RDMA_WITH_IMM:
+        // about the Receive Q
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+
+        wrq_id = comp_desc.id ;
+        transferred_data_length = comp_desc.byte_len;
+             
+        if(wrq_id ==  RDMA_OP_ID) {
+          // this is RDAM op , locate the RDAM memory buffer address   
+              
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr;
+
+          transferred_data_length = comp_desc.byte_len;
+
+          kibnal_rx(hca_data->kib_data, 
+                    bufaddr, 
+                    transferred_data_length, 
+                    Local_rdma_info.buf_length, 
+                    priority); 
+
+          // de-regiser this RDAM receiving memory buffer
+          // too early ??    test & check 
+          vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl);
+          if(vstat != VAPI_OK) {
+            CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA"
+               " recv  mem region %s\n", VAPI_strerror(vstat));
+          }
+        }
+
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+        break;
+
+      case   VAPI_CQE_INVAL_OPCODE:
+        //
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n");  
+        break;
+
+      default :
+        CDEBUG(D_NET, "CQE opcode-unknown opcode\n");  
+
+        break;
+    } // switch 
+    
+  // issue a new request for completion ievent notification 
+  vstat = VAPI_req_comp_notif(hca_data->hca_hndl, 
+                              hca_data->cq_hndl,
+                              VAPI_NEXT_COMP); 
+
+
+  if(vstat != VAPI_OK) {
+    CERROR("PI_req_comp_notif: Failed %s\n", VAPI_strerror(vstat));
+  }
+
+  return; // end of event handler 
+
+}
+
+
+
+int
+kibnal_cmd(struct portal_ioctl_data * data, void * private)
+{
+  int rc ;
+
+  CDEBUG(D_NET, "kibnal_cmd \n");  
+
+  return YES;
+}
+
+
+
+void ibnal_send_recv_self_testing(int *my_role)
+{
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info              *qp;
+ VAPI_wr_id_t         send_id;
+ int                  buf_id;
+ char                 sbuf[KB_32];
+ char                 rbuf[KB_32];
+ int                  i;
+ int                  buf_length = KB_32;
+ VAPI_wc_desc_t       comp_desc;
+ int                  num_send = 1;
+ int                  loop_count = 0;
+
+ // make it as a daemon process 
+ // kportal_daemonize("ibnal_send_recv_self_testing");  
+
+ printk("My role is 0X%X\n", *my_role);
+
+if(*my_role ==  TEST_SEND_MESSAGE)  {
+ printk("Enter ibnal_send_recv_self_testing\n");
+
+ memset(&sbuf, 'a', KB_32);
+ memset(&rbuf, ' ', KB_32);
+ send_id = SEND_RECV_TEST_ID; 
+ buf_id = SEND_RECV_TEST_BUF_ID;
+
+ qp = &QP_list[buf_id];
+
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        =  send_id;
+
+ // scatter and gather info
+ sr_sg.len  = KB_32;
+ sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr;
+
+ // copy data to register send buffer
+ memcpy(&sr_sg.addr, &sbuf, buf_length);
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ /*
+ // call VAPI_post_sr to send out this data
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+   printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+ printk("VAPI_post_sr success.\n");
+ */
+
+ }
+else {
+  printk("I am a receiver and doing nothing here\n"); 
+}
+         
+ printk("ibnal_send_recv_self_testing thread exit \n");
+
+ return;
+
+}
+
+
+//
+// ibnal initialize process  
+//
+// 1.  Bring up Infiniband network interface 
+//     * 
+// 2.  Initialize a PORTALS nal interface 
+// 
+//
+int __init 
+kibnal_initialize(void)
+{
+   int           rc;
+   int           ntok;
+   unsigned long sizemask;
+   unsigned int  nid;
+   VAPI_ret_t    vstat;
+
+
+   portals_debug_set_level(IBNAL_DEBUG_LEVEL_1);
+
+   CDEBUG(D_MALLOC, "start kmem %d\n", atomic_read (&portal_kmemory));
+
+   CDEBUG(D_PORTALS, "kibnal_initialize: Enter kibnal_initialize\n");
+
+   // set api functional pointers 
+   kibnal_api.forward    = kibnal_forward;
+   kibnal_api.shutdown   = kibnal_shutdown;
+   kibnal_api.yield      = kibnal_yield;
+   kibnal_api.validate   = NULL; /* our api validate is a NOOP */
+   kibnal_api.lock       = kibnal_lock;
+   kibnal_api.unlock     = kibnal_unlock;
+   kibnal_api.nal_data   = &kibnal_data; // this is so called private data 
+   kibnal_api.refct      = 1;
+   kibnal_api.timeout    = NULL;
+   kibnal_lib.nal_data   = &kibnal_data;
+  
+   memset(&kibnal_data, 0, sizeof(kibnal_data));
+
+   // initialize kib_list list data structure 
+   INIT_LIST_HEAD(&kibnal_data.kib_list);
+
+   kibnal_data.kib_cb = &kibnal_lib;
+
+   spin_lock_init(&kibnal_data.kib_dispatch_lock);
+
+
+   //  
+   // bring up the IB inter-connect network interface 
+   // setup QP, CQ 
+   //
+   vstat = IB_Open_HCA(&kibnal_data);
+
+   if(vstat != VAPI_OK) {
+     CERROR("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", 
+                                                vstat, VAPI_strerror(vstat));
+
+     printk("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", 
+                                                vstat, VAPI_strerror(vstat));
+     return NO;
+   }
+
+   kibnal_data.kib_nid = (__u64 )Hca_hndl;//convert Hca_hndl to 64-bit format
+   kibnal_data.kib_init = 1;
+
+   CDEBUG(D_NET, " kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid);
+   printk(" kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid);
+
+   /* Network interface ready to initialise */
+   // get an entery in the PORTALS table for this IB protocol 
+
+   CDEBUG(D_PORTALS,"Call PtlNIInit to register this Infiniband Interface\n");
+   printk("Call PtlNIInit to register this Infiniband Interface\n");
+
+   rc = PtlNIInit(kibnal_init, 32, 4, 0, &kibnal_ni);
+
+   if(rc != PTL_OK) {
+     CERROR("kibnal_initialize: PtlNIInit failed %d\n", rc);
+     printk("kibnal_initialize: PtlNIInit failed %d\n", rc);
+     kibnal_finalize();
+     return (-ENOMEM);
+   }
+
+   CDEBUG(D_PORTALS,"kibnal_initialize: PtlNIInit DONE\n");
+   printk("kibnal_initialize: PtlNIInit DONE\n");
+
+
+
+#ifdef  POLL_BASED_CQE_HANDLING 
+   // create a receiving thread: main loopa
+   // this is polling based mail loop   
+   kernel_thread(k_recv_thread, &Hca_data, 0);
+#endif
+
+#ifdef EVENT_BASED_CQE_HANDLING
+  // for completion event handling,  this is event based CQE handling 
+  vstat = IB_Set_Event_Handler(Hca_data, &kibnal_data);
+
+  if (vstat != VAPI_OK) {
+     CERROR("IB_Set_Event_Handler failed: %d - %s \n", 
+                                           vstat, VAPI_strerror(vstat));
+     return vstat;
+  }
+
+  CDEBUG(D_PORTALS,"IB_Set_Event_Handler Done \n");
+  printk("IB_Set_Event_Handler Done \n");
+  
+#endif
+
+   PORTAL_SYMBOL_REGISTER(kibnal_ni);
+
+#ifdef IBNAL_SELF_TESTING
+  //
+  // test HCA send recv before normal event handling 
+  //
+  int  my_role;
+  my_role = TEST_SEND_MESSAGE;
+
+  printk("my role is TEST_RECV_MESSAGE\n");
+
+  // kernel_thread(ibnal_send_recv_self_testing, &my_role, 0);
+   
+  ibnal_send_recv_self_testing(&my_role);
+
+#endif 
+
+  return 0;
+
+}
+
+
+
+MODULE_AUTHOR("Hsingbung(HB) Chen <hbchen@lanl.gov>");
+MODULE_DESCRIPTION("Kernel Infiniband NAL v0.1");
+MODULE_LICENSE("GPL");
+
+module_init (kibnal_initialize);
+module_exit (kibnal_finalize);
+
+EXPORT_SYMBOL(kibnal_ni);
+
diff --git a/lnet/klnds/iblnd/ibnal.h b/lnet/klnds/iblnd/ibnal.h
new file mode 100644 (file)
index 0000000..ff5aeb3
--- /dev/null
@@ -0,0 +1,564 @@
+#ifndef _IBNAL_H
+#define _IBNAL_H
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/segment.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <linux/ipc.h>
+#include <linux/shm.h>
+
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/locks.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/in.h>
+#include <unistd.h>
+
+#define DEBUG_SUBSYSTEM S_IBNAL
+
+#include <portals/p30.h>
+#include <portals/lib-p30.h>
+#include <linux/kp30.h>
+
+// Infiniband VAPI/EVAPI header files  
+// Mellanox MT23108 VAPI
+#include <vapi.h>
+#include <vapi_types.h>
+#include <vapi_common.h>
+#include <evapi.h>
+
+// pick a port for this RDMA information exhange between two hosts
+#define HOST_PORT           11211 
+#define QUEUE_SIZE          1024
+#define HCA_PORT_1          1
+#define HCA_PORT_2          2 
+#define DEBUG_SUBSYSTEM S_IBNAL
+
+#define START_SEND_WRQ_ID        0
+#define START_RECV_WRQ_ID        0
+#define START_RDMA_WRQ_ID        0  
+
+#define DEFAULT_PRIORITY         100
+
+#define WAIT_FOT_R_RDMA_TIMEOUT 10000
+#define MAX_NUM_TRY      3000 
+
+#define MAX_NUM_POLL     300 
+#define MAX_LOOP_COUNT   500
+
+#define MAX_GID          32 
+#define MCG_BUF_LENGTH   128
+
+#define SHARED_SEGMENT_SIZE   0x10000   
+#define HCA_EXCHANGE_SHM_KEY  999 // shared memory key for HCA data exchange 
+
+// some internals opcodes for IB operations used in IBNAL
+#define SEND_QP_INFO          0X00000001 
+#define RECV_QP_INFO          0X00000010 
+
+// Mellanox InfiniHost MT23108 
+// QP/CQ related information
+//
+
+#define MTU_256     1 /* 1-256,2-512,3-1024,4-2048 */
+#define MTU_512     2 /* 1-256,2-512,3-1024,4-2048 */
+#define MTU_1024    3 /* 1-256,2-512,3-1024,4-2048 */
+#define MTU_2048    4 /* 1-256,2-512,3-1024,4-2048 */
+
+// number of entries for each CQ and WQ 
+// how much do we need ?
+#define NUM_CQE        1024
+#define NUM_WQE        1024 
+#define MAX_OUT_SQ     64 
+#define MAX_OUT_RQ     64
+
+#define NUM_MBUF       256 
+#define NUM_RDMA_RESERVED_ENTRY 128 
+#define NUM_QPS        256 
+
+#define INVALID_WR_ID  ((VAPI_wr_id_t) -1)
+
+
+// for Vector IO 
+// scatter and gather 
+// Portals can support upto 64 IO-Vectors 
+// how much do we need ? 
+#define NUM_SGE        1 
+#define NUM_SG         1 
+#define NUM_CQ        1        
+
+#define ONE_KB    1024
+#define ONE_MB    1024 * ONE_KB 
+#define ONE_GB    1024 * ONE_MB 
+
+
+#define KB_4      1024 * 4 
+#define KB_8      1024 * 8 
+#define KB_16     1024 * 16
+#define KB_32     1024 * 32
+#define KB_64     1024 * 64
+#define KB_128    1024 * 128 
+#define KB_256    1024 * 256 
+
+// 256 entry in registered buffer list 
+// small size message 
+#define Num_4_KB       64 
+#define Num_8_KB       64 
+#define Num_16_KB      40 
+#define Num_32_KB      40 
+#define Num_64_KB      40 
+#define Num_128_KB     4 
+#define Num_256_KB     4 
+
+#define SMALL_MSG_SIZE KB_32     
+
+#define MAX_MSG_SIZE   ONE_MB * 512   
+
+//   128's  64KB bufer for send
+//   128's  64KB bufer for recv  
+//   used in RDAM operation only 
+
+#define NUM_ENTRY      128 
+
+#define End_4_kb        Num_4_KB 
+#define End_8_kb        End_4_kb  + Num_8_KB 
+#define End_16_kb       End_8_kb  + Num_16_KB
+#define End_32_kb       End_16_kb + Num_32_KB
+#define End_64_kb       End_32_kb + Num_64_KB
+#define End_128_kb      End_64_kb + Num_128_KB
+#define End_256_kb      End_128_kb+ Num_256_KB
+
+
+#define SEND_BUF_SIZE   KB_32
+#define RECV_BUF_SIZE   SEND_BUF_SIZE
+
+// #define POLL_BASED_CQE_HANDLING     1
+#define EVENT_BASED_CQE_HANDLING        1
+#define IBNAL_SELF_TESTING             1
+
+#ifdef  IBNAL_SELF_TESTING
+#undef  IBNAL_SELF_TESTING
+#endif
+
+
+#define MSG_SIZE_SMALL 1 
+#define MSG_SIZE_LARGE 2 
+
+
+
+// some defauly configuration values for early testing 
+#define DEFAULT_DLID   1  // default destination link ID
+#define DEFAULT_QP_NUM 4  // default QP number 
+#define P_KEY          0xFFFF // do we need default value
+#define PKEY_IX        0x0 // do we need default value
+#define Q_KEY          0x012  // do we need default value 
+#define L_KEY          0x12345678 // do we need default value 
+#define R_KEY          0x87654321 // do we need default value 
+#define HCA_ID         "InfiniHost0" // default 
+#define START_PSN      0
+#define START_SQ_PSN   0
+#define START_RQ_PSN   0
+
+
+#define __u_long_long   unsigned long long
+
+#define         IBNAL_DEBUG      1
+
+#define         USE_SHARED_MEMORY_AND_SOCKET 1
+
+// operation type
+#define TRY_SEND_ONLY    1
+
+#define YES     1  
+#define NO      0 
+
+//
+// a common data structure for IB QP's operation
+// each QP is associated with an QP_info structure 
+//
+typedef struct QP_info 
+{
+  VAPI_hca_hndl_t       hca_hndl;      // HCA handle
+  IB_port_t             port;          // port number 
+  VAPI_qp_hndl_t        qp_hndl;       // QP's handle list 
+  VAPI_qp_state_t       qp_state;      // QP's current state 
+  VAPI_pd_hndl_t        pd_hndl;       // protection domain
+  VAPI_cq_hndl_t        cq_hndl;    // send-queue CQ's handle 
+  VAPI_cq_hndl_t        sq_cq_hndl;    // send-queue CQ's handle 
+  VAPI_cq_hndl_t        rq_cq_hndl;    // receive-queue CQ's handle
+  VAPI_ud_av_hndl_t     av_hndl;    // receive-queue CQ's handle
+  VAPI_qp_init_attr_t   qp_init_attr;  // QP's init attribute 
+  VAPI_qp_attr_t        qp_attr;       // QP's attribute - dlid 
+  VAPI_qp_prop_t        qp_prop;       // QP's propertities
+  VAPI_hca_port_t       hca_port;  
+  VAPI_qp_num_t         qp_num;    // QP's number 
+  VAPI_qp_num_t         rqp_num;       // remote QP's number 
+  IB_lid_t              slid;
+  IB_lid_t              dlid;
+  VAPI_gid_t            src_gid;
+
+  u_int32_t            buf_size;
+  VAPI_virt_addr_t      buf_addr;
+  char                *bufptr;
+  VAPI_mrw_t            mr;       
+  VAPI_mr_hndl_t        mr_hndl;
+  VAPI_virt_addr_t      raddr;
+  VAPI_rkey_t           rkey;
+  VAPI_lkey_t           lkey;
+
+  VAPI_wr_id_t          last_posted_send_id; // user defined work request ID 
+  VAPI_wr_id_t          last_posted_rcv_id;  // user defined work request ID
+  VAPI_mw_hndl_t        mw_hndl;       // memory window handle 
+  VAPI_rkey_t           mw_rkey;       // memory window rkey
+  VAPI_sg_lst_entry_t   sg_lst[256];       // scatter and gather list 
+  int                   sg_list_sz;    // set as NUM_SGE
+  VAPI_wr_id_t          wr_id;         //
+  spinlock_t            snd_mutex;
+  spinlock_t            rcv_mutex;
+  spinlock_t            bl_mutex;
+  spinlock_t            cln_mutex;
+  int                   cur_RDMA_outstanding;
+  int                   cur_send_outstanding;
+  int                   cur_posted_rcv_bufs;
+  int                   snd_rcv_balance;
+} QP_info; 
+
+
+// buffer status 
+#define  BUF_REGISTERED   0x10000000 
+#define  BUF_INUSE       0x01000000  
+#define  BUF_UNREGISTERED 0x00100000 
+
+// buffer type 
+#define  REG_BUF          0x10000000
+#define  RDMA_BUF         0x01000000 
+
+//
+// IMM data 
+// 
+#define   IMM_000         (0 << 32); 
+#define   IMM_001         (1 << 32); 
+#define   IMM_002         (2 << 32); 
+#define   IMM_003         (3 << 32); 
+#define   IMM_004         (4 << 32); 
+#define   IMM_005         (5 << 32); 
+#define   IMM_006         (6 << 32); 
+#define   IMM_007         (7 << 32); 
+#define   IMM_008         (8 << 32); 
+#define   IMM_009         (9 << 32); 
+#define   IMM_010         (10 << 32); 
+#define   IMM_011         (11 << 32); 
+#define   IMM_012         (12 << 32); 
+#define   IMM_013         (13 << 32); 
+#define   IMM_014         (14 << 32); 
+#define   IMM_015         (15 << 32); 
+#define   IMM_016         (16 << 32); 
+#define   IMM_017         (17 << 32); 
+#define   IMM_018         (18 << 32); 
+#define   IMM_019         (19 << 32); 
+#define   IMM_020         (20 << 32); 
+#define   IMM_021         (21 << 32); 
+#define   IMM_022         (22 << 32); 
+#define   IMM_023         (23 << 32); 
+#define   IMM_024         (24 << 32); 
+#define   IMM_025         (25 << 32); 
+#define   IMM_026         (26 << 32); 
+#define   IMM_027         (27 << 32); 
+#define   IMM_028         (28 << 32); 
+#define   IMM_029         (29 << 32); 
+#define   IMM_030         (30 << 32); 
+#define   IMM_031         (31 << 32); 
+
+
+typedef struct Memory_buffer_info{
+       u_int32_t        buf_size;
+       VAPI_virt_addr_t buf_addr;
+       char             *bufptr;
+       VAPI_mrw_t       mr;       
+       VAPI_mr_hndl_t   mr_hndl;
+        int              status;
+       int              ref_count;  
+        int              buf_type;
+       VAPI_virt_addr_t raddr;
+       VAPI_rkey_t      rkey;
+       VAPI_lkey_t      lkey;
+} Memory_buffer_info;
+
+typedef struct RDMA_Info_Exchange {
+       int               opcode;
+       int               buf_length;
+       VAPI_mrw_t        recv_rdma_mr;
+       VAPI_mr_hndl_t    recv_rdma_mr_hndl;
+       VAPI_mrw_t        send_rdma_mr;
+       VAPI_mr_hndl_t    send_rdma_mr_hndl;
+       VAPI_virt_addr_t  raddr;
+       VAPI_rkey_t       rkey;
+       int               flag;
+}  RDMA_Info_Exchange;
+
+// opcode for Rdma info exchange RTS/CTS 
+#define  Ready_To_send     0x10000000
+#define  Clear_To_send     0x01000000
+
+#define  RDMA_RTS_ID      5555 
+#define  RDMA_CTS_ID      7777 
+#define  RDMA_OP_ID       9999 
+#define  SEND_RECV_TEST_ID 2222 
+#define  SEND_RECV_TEST_BUF_ID 0 
+
+#define  TEST_SEND_MESSAGE 0x00000001 
+#define  TEST_RECV_MESSAGE 0x00000002
+
+
+#define  RTS_CTS_TIMEOUT           50
+#define  RECEIVING_THREAD_TIMEOUT  50 
+#define  WAIT_FOR_SEND_BUF_TIMEOUT 50
+
+#define  IBNAL_DEBUG_LEVEL_1   0XFFFFFFFF  
+#define  IBNAL_DEBUG_LEVEL_2   D_PORTALS | D_NET   | D_WARNING | D_MALLOC | \ 
+                              D_ERROR   | D_OTHER | D_TRACE   | D_INFO
+                              
+
+// flag for Rdma info exhange 
+#define  RDMA_BUFFER_RESERVED       0x10000000
+#define  RDMA_BUFFER_UNAVAILABLE    0x01000000
+
+
+// receiving data structure 
+typedef struct {
+        ptl_hdr_t         *krx_buffer; // pointer to receiving buffer
+        unsigned long     krx_len;  // length of buffer
+        unsigned int      krx_size; // 
+        unsigned int      krx_priority; // do we need this 
+        struct list_head  krx_item;
+}  kibnal_rx_t;
+
+// transmitting data structure 
+typedef struct {
+        nal_cb_t      *ktx_nal;
+        void          *ktx_private;
+        lib_msg_t     *ktx_cookie;
+        char          *ktx_buffer;
+        size_t         ktx_len;
+        unsigned long  ktx_size;
+        int            ktx_ndx;
+        unsigned int   ktx_priority;
+        unsigned int   ktx_tgt_node;
+        unsigned int   ktx_tgt_port_id;
+}  kibnal_tx_t;
+
+
+typedef struct {
+        char              kib_init;
+        char              kib_shuttingdown;
+        IB_port_t         port_num; // IB port information
+        struct list_head  kib_list;
+        ptl_nid_t         kib_nid;
+        nal_t            *kib_nal; 
+        nal_cb_t         *kib_cb;
+        struct kib_trans *kib_trans; // do I need this 
+        struct tq_struct  kib_ready_tq;
+        spinlock_t        kib_dispatch_lock;
+}  kibnal_data_t;
+
+
+//
+// A data structure for keeping the HCA information in system
+// information related to HCA and hca_handle will be kept here 
+//
+typedef struct HCA_Info 
+{
+  VAPI_hca_hndl_t       hca_hndl;     // HCA handle
+  VAPI_pd_hndl_t        pd_hndl;      // protection domain
+  IB_port_t             port;         // port number 
+  int                   num_qp;       // number of qp used  
+  QP_info               *qp_ptr[NUM_QPS]; // point to QP_list
+  int                   num_cq;       // number of cq used 
+  VAPI_cq_hndl_t        cq_hndl;   
+  VAPI_cq_hndl_t        sq_cq_hndl;   
+  VAPI_cq_hndl_t        rq_cq_hndl;   
+  IB_lid_t              dlid;
+  IB_lid_t              slid;
+  kibnal_data_t         *kib_data; // for PORTALS operations
+} HCA_info;
+
+
+
+
+// Remote HCA Info information 
+typedef struct Remote_HCA_Info {
+        unsigned long     opcode;
+        unsigned long     length; 
+        IB_lid_t          dlid[NUM_QPS];
+        VAPI_qp_num_t     rqp_num[NUM_QPS];
+} Remote_QP_Info;
+
+typedef struct  Bucket_index{
+     int start;
+     int end;
+} Bucket_index;
+
+// functional prototypes 
+// infiniband initialization 
+int kib_init(kibnal_data_t *);
+
+// receiving thread 
+void kibnal_recv_thread(HCA_info *);
+void recv_thread(HCA_info *);
+
+// forward data packet 
+void kibnal_fwd_packet (void *, kpr_fwd_desc_t *);
+
+// global data structures 
+extern kibnal_data_t        kibnal_data;
+extern ptl_handle_ni_t      kibnal_ni;
+extern nal_t                kibnal_api;
+extern nal_cb_t             kibnal_lib;
+extern QP_info              QP_list[];
+extern QP_info              CQ_list[];
+extern HCA_info             Hca_data;
+extern VAPI_hca_hndl_t      Hca_hndl; 
+extern VAPI_pd_hndl_t       Pd_hndl;
+extern VAPI_hca_vendor_t    Hca_vendor;
+extern VAPI_hca_cap_t       Hca_cap;
+extern VAPI_hca_port_t      Hca_port_1_props;
+extern VAPI_hca_port_t      Hca_port_2_props;
+extern VAPI_hca_attr_t      Hca_attr;
+extern VAPI_hca_attr_mask_t Hca_attr_mask;
+extern VAPI_cq_hndl_t       Cq_SQ_hndl;   
+extern VAPI_cq_hndl_t       Cq_RQ_hndl;   
+extern VAPI_cq_hndl_t       Cq_hndl;   
+extern unsigned long        User_Defined_Small_Msg_Size;
+extern Remote_QP_Info      L_HCA_RDMA_Info;  
+extern Remote_QP_Info      R_HCA_RDMA_Info; 
+extern unsigned int         Num_posted_recv_buf;
+extern int                  R_RDMA_DATA_ARRIVED;
+extern Memory_buffer_info   MRbuf_list[];
+extern Memory_buffer_info   MSbuf_list[];
+extern Bucket_index         Bucket[]; 
+extern RDMA_Info_Exchange   Rdma_info;
+extern int                  Cts_Message_arrived;
+extern RDMA_Info_Exchange   Local_rdma_info;
+extern spinlock_t          MSB_mutex[];
+
+
+
+// kernel NAL API function prototype 
+int  kibnal_forward(nal_t *,int ,void *,size_t ,void *,size_t );
+void kibnal_lock(nal_t *, unsigned long *);
+void kibnal_unlock(nal_t *, unsigned long *);
+int  kibnal_shutdown(nal_t *, int );
+void kibnal_yield( nal_t * );
+void kibnal_invalidate(nal_cb_t *,void *,size_t ,void *);
+int  kibnal_validate(nal_cb_t *,void *,size_t ,void  **);
+
+
+
+nal_t *kibnal_init(int , ptl_pt_index_t , ptl_ac_index_t , ptl_pid_t );
+void __exit kibnal_finalize(void ); 
+VAPI_ret_t create_qp(QP_info *, int );
+VAPI_ret_t init_qp(QP_info *, int );
+VAPI_ret_t IB_Open_HCA(kibnal_data_t *);
+VAPI_ret_t IB_Close_HCA(void );
+VAPI_ret_t createMemRegion(VAPI_hca_hndl_t, VAPI_pd_hndl_t); 
+VAPI_ret_t  deleteMemRegion(QP_info *, int );
+
+void ibnal_send_recv_self_testing(int *);
+
+int  __init kibnal_initialize(void);
+
+
+
+/* CB NAL functions */
+int kibnal_send(nal_cb_t *, 
+                void *, 
+                lib_msg_t *, 
+                ptl_hdr_t *,
+                int, 
+                ptl_nid_t, 
+                ptl_pid_t, 
+                unsigned int, 
+                ptl_kiov_t *, 
+                size_t);
+
+int kibnal_send_pages(nal_cb_t *, 
+                      void *, 
+                      lib_msg_t *, 
+                      ptl_hdr_t *,
+                      int, 
+                      ptl_nid_t, 
+                      ptl_pid_t, 
+                      unsigned int, 
+                      ptl_kiov_t *, 
+                      size_t);
+int kibnal_recv(nal_cb_t *, void *, lib_msg_t *,
+                        unsigned int, struct iovec *, size_t, size_t);
+int kibnal_recv_pages(nal_cb_t *, void *, lib_msg_t *,
+                        unsigned int, ptl_kiov_t *, size_t, size_t);
+int  kibnal_read(nal_cb_t *,void *,void *,user_ptr ,size_t );
+int  kibnal_write(nal_cb_t *,void *,user_ptr ,void *,size_t );
+int  kibnal_callback(nal_cb_t * , void *, lib_eq_t *, ptl_event_t *);
+void *kibnal_malloc(nal_cb_t *,size_t );
+void kibnal_free(nal_cb_t *,void *,size_t );
+int  kibnal_map(nal_cb_t *, unsigned int , struct iovec *, void **);
+void kibnal_unmap(nal_cb_t *, unsigned int , struct iovec *, void **);
+int  kibnal_map_pages(nal_cb_t *, unsigned int , ptl_kiov_t *, void **);
+void kibnal_unmap_pages(nal_cb_t * , unsigned int , ptl_kiov_t *, void **);
+void kibnal_printf(nal_cb_t *, const char *, ...);
+void kibnal_cli(nal_cb_t *,unsigned long *); 
+void kibnal_sti(nal_cb_t *,unsigned long *);
+int  kibnal_dist(nal_cb_t *,ptl_nid_t ,unsigned long *);
+
+void kibnal_fwd_packet (void *, kpr_fwd_desc_t *);
+void kibnal_rx(kibnal_data_t *, 
+               VAPI_virt_addr_t ,
+               u_int32_t,
+               u_int32_t,
+               unsigned int);
+                
+int  kibnal_end(kibnal_data_t *);
+
+void async_event_handler(VAPI_hca_hndl_t , VAPI_event_record_t *,void *);
+
+void CQE_event_handler(VAPI_hca_hndl_t ,VAPI_cq_hndl_t , void  *);
+
+
+VAPI_ret_t Send_Small_Msg(char *, int );
+VAPI_ret_t Send_Large_Msg(char *, int );
+
+VAPI_ret_t repost_recv_buf(QP_info *, VAPI_wr_id_t );
+int post_recv_bufs(VAPI_wr_id_t );
+int  server_listen_thread(void *);
+VAPI_wr_id_t RTS_handshaking_protocol(int );
+VAPI_wr_id_t CTS_handshaking_protocol(RDMA_Info_Exchange *);
+
+VAPI_ret_t createMemRegion_RDMA(VAPI_hca_hndl_t ,
+                               VAPI_pd_hndl_t  ,
+                               char         *,
+                               int             , 
+                               VAPI_mr_hndl_t  *,
+                               VAPI_mrw_t      *);
+
+
+VAPI_ret_t IB_Set_Event_Handler(HCA_info , kibnal_data_t *);
+
+VAPI_ret_t IB_Set_Async_Event_Handler(HCA_info ,kibnal_data_t *);
+
+VAPI_wr_id_t find_available_buf(int );
+VAPI_wr_id_t search_send_buf(int );
+VAPI_wr_id_t find_filler_list(int ,int );
+int insert_MRbuf_list(int );
+
+
+#endif  /* _IBNAL_H */
diff --git a/lnet/klnds/iblnd/ibnal_cb.c b/lnet/klnds/iblnd/ibnal_cb.c
new file mode 100644 (file)
index 0000000..2c07cc4
--- /dev/null
@@ -0,0 +1,1288 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Based on ksocknal and qswnal
+ *
+ *  Author: Hsing-bung Chen <hbchen@lanl.gov>
+ *
+ *   This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ *
+ *   Portals is free software; you can redistribute it and/or
+ *   modify it under the terms of version 2 of the GNU General Public
+ *   License as published by the Free Software Foundation.
+ *
+ *   Portals is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Portals; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include "ibnal.h"
+
+
+
+
+RDMA_Info_Exchange   Rdma_nfo;
+int  Cts_Msg_Arrived = NO;
+
+
+/*
+ *  LIB functions follow
+ */
+
+//
+// read
+// copy a block of data from scr_addr to dst_addr 
+// it all happens in kernel space - dst_addr and src_addr 
+//
+// original definition is to read a block od data from a 
+// specified user address  
+// 
+// cb_read
+
+int kibnal_read (nal_cb_t *nal, 
+                 void     *private, 
+                 void     *dst_addr, 
+                 user_ptr src_addr, 
+                 size_t   len)
+{
+        CDEBUG(D_NET, "kibnal_read: 0x%Lx: reading %ld bytes from %p -> %p\n",
+               nal->ni.nid, (long)len, src_addr, dst_addr );
+
+        memcpy( dst_addr, src_addr, len );
+
+        return 0;
+}
+
+//
+// it seems that read and write are doing the same thing
+// because they all happen in kernel space 
+// why do we need two functions like read and write 
+// to make PORTALS API compatable 
+//
+
+//
+// write 
+// copy a block of data from scr_addr to dst_addr 
+// it all happens in kernel space - dst_addr and src_addr 
+//
+// original definition is to write a block od data to a 
+// specified user address  
+// 
+// cb_write
+
+int kibnal_write(nal_cb_t   *nal, 
+                 void       *private, 
+                 user_ptr   dst_addr, 
+                 void       *src_addr, 
+                 size_t     len)
+{
+        CDEBUG(D_NET, "kibnal_write: 0x%Lx: writing %ld bytes from %p -> %p\n",
+               nal->ni.nid, (long)len, src_addr, dst_addr );
+
+
+        memcpy( dst_addr, src_addr, len );
+
+        return 0;
+}
+
+//
+// malloc
+//
+// either vmalloc or kmalloc is used 
+// dynamically allocate a block of memory based on the size of buffer  
+//
+// cb_malloc
+
+void * kibnal_malloc(nal_cb_t *nal, size_t length)
+{
+        void *buffer;
+
+        // PORTAL_ALLOC will do the job 
+        // allocate a buffer with size "length"
+        PORTAL_ALLOC(buffer, length);
+
+        return buffer;
+}
+
+//
+// free
+// release a dynamically allocated memory pointed by buffer pointer 
+//
+// cb_free
+
+void kibnal_free(nal_cb_t *nal, void *buffer, size_t length)
+{
+        //
+        // release allocated buffer to system 
+        //
+        PORTAL_FREE(buffer, length);
+}
+
+//
+// invalidate 
+// because evernthing is in kernel space (LUSTRE)
+// there is no need to mark a piece of user memory as no longer in use by
+// the system
+//
+// cb_invalidate
+
+void kibnal_invalidate(nal_cb_t      *nal, 
+                              void          *base, 
+                              size_t        extent, 
+                              void          *addrkey)
+{
+  // do nothing 
+  CDEBUG(D_NET, "kibnal_invalidate: 0x%Lx: invalidating %p : %d\n", 
+                                        nal->ni.nid, base, extent);
+  return;
+}
+
+
+//
+// validate 
+// because everything is in kernel space (LUSTRE)
+// there is no need to mark a piece of user memory in use by
+// the system
+//
+// cb_validate
+
+int kibnal_validate(nal_cb_t        *nal,  
+                           void            *base, 
+                           size_t          extent, 
+                           void            **addrkey)
+{
+  // do nothing 
+  CDEBUG(D_NET, "kibnal_validate: 0x%Lx: validating %p : %d\n", 
+                                        nal->ni.nid, base, extent);
+
+  return 0;
+}
+
+
+//
+// log messages from kernel space 
+// printk() is used 
+//
+// cb_printf
+
+void kibnal_printf(nal_cb_t *nal, const char *fmt, ...)
+{
+        va_list ap;
+        char    msg[256];
+
+        if (portal_debug & D_NET) {
+                va_start( ap, fmt );
+                vsnprintf( msg, sizeof(msg), fmt, ap );
+                va_end( ap );
+
+                printk("CPUId: %d %s",smp_processor_id(), msg);
+        }
+}
+
+//
+// clear interrupt
+// use spin_lock to lock protected area such as MD, ME...
+// so a process can enter a protected area and do some works
+// this won't physicall disable interrup but use a software 
+// spin-lock to control some protected areas 
+//
+// cb_cli 
+
+void kibnal_cli(nal_cb_t *nal, unsigned long *flags) 
+{ 
+        kibnal_data_t *data= nal->nal_data;
+
+        CDEBUG(D_NET, "kibnal_cli \n");
+
+        spin_lock_irqsave(&data->kib_dispatch_lock,*flags);
+
+}
+
+//
+// set interrupt
+// use spin_lock to unlock protected area such as MD, ME...
+// this won't physicall enable interrup but use a software 
+// spin-lock to control some protected areas 
+//
+// cb_sti
+
+void kibnal_sti(nal_cb_t *nal, unsigned long *flags)
+{
+        kibnal_data_t *data= nal->nal_data;
+
+        CDEBUG(D_NET, "kibnal_sti \n");
+
+        spin_unlock_irqrestore(&data->kib_dispatch_lock,*flags);
+}
+
+
+
+//
+// nic distance 
+// 
+// network distance doesn't mean much for this nal 
+// here we only indicate 
+//      0 - operation is happened on the same node 
+//      1 - operation is happened on different nodes 
+//          router will handle the data routing 
+//
+// cb_dist
+
+int kibnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
+{
+        CDEBUG(D_NET, "kibnal_dist \n");
+
+        if ( nal->ni.nid == nid ) {
+                *dist = 0;
+        } 
+        else {
+                *dist = 1;
+        }
+
+        return 0; // always retrun 0 
+}
+
+
+//
+// This is the cb_send() on IB based interconnect system
+// prepare a data package and use VAPI_post_sr() to send it
+// down-link out-going message 
+//
+
+
+int
+kibnal_send(nal_cb_t        *nal,
+            void            *private,
+            lib_msg_t       *cookie,
+            ptl_hdr_t       *hdr,
+            int              type,
+            ptl_nid_t        nid,
+            ptl_pid_t        pid,
+            unsigned int     niov,
+            ptl_kiov_t      *iov,
+            size_t           len)
+{
+        
+        int           rc=0;
+        void         *buf = NULL; 
+        unsigned long buf_length = sizeof(ptl_hdr_t) + len;
+        int           expected_buf_size = 0;
+        VAPI_ret_t    vstat;
+
+        PROF_START(kibnal_send); // time stamp send start 
+
+        CDEBUG(D_NET,"kibnal_send: sending %d bytes from %p to nid: 0x%Lx pid %d\n",
+               buf_length, iov, nid, HCA_PORT_1);
+
+
+        // do I need to check the gateway information
+        // do I have problem to send direct 
+        // do I have to forward a data packet to gateway
+        // 
+        // The current connection is back-to-back 
+        // I always know that data will be send from one-side to
+        // the other side
+        //
+        
+        //
+        //  check data buffer size 
+        //
+        //  MSG_SIZE_SMALL 
+        //      regular post send 
+        //  
+        //  MSG_SIZE_LARGE
+        //      rdma write
+        
+        if(buf_length <= SMALL_MSG_SIZE) {  
+           expected_buf_size = MSG_SIZE_SMALL;
+        } 
+        else { 
+          if(buf_length > MAX_MSG_SIZE) { 
+             CERROR("kibnal_send:request exceeds Transmit data size (%d).\n",
+                      MAX_MSG_SIZE);
+             rc = -1;
+             return rc;
+          }
+          else {
+             expected_buf_size = MSG_SIZE_LARGE; // this is a large data package 
+          } 
+        }
+                
+        // prepare data packet for send operation 
+        //
+        // allocate a data buffer "buf" with size of buf_len(header + payload)
+        //                 ---------------
+        //  buf            | hdr         |  size = sizeof(ptl_hdr_t)
+        //                 --------------
+        //                 |payload data |  size = len
+        //                 ---------------
+        
+        // copy header to buf 
+        memcpy(buf, hdr, sizeof(ptl_hdr_t));
+
+        // copy payload data from iov to buf
+        // use portals library function lib_copy_iov2buf()
+        
+        if (len != 0)
+           lib_copy_iov2buf(((char *)buf) + sizeof (ptl_hdr_t),
+                            niov, 
+                            iov, 
+                            len);
+
+        // buf is ready to do a post send 
+        // the send method is base on the buf_size 
+
+        CDEBUG(D_NET,"ib_send %d bytes (size %d) from %p to nid: 0x%Lx "
+               " port %d\n", buf_length, expected_buf_size, iov, nid, HCA_PORT_1);
+
+        switch(expected_buf_size) {
+          case MSG_SIZE_SMALL:
+            // send small message 
+            if((vstat = Send_Small_Msg(buf, buf_length)) != VAPI_OK){
+                CERROR("Send_Small_Msg() is failed\n");
+            } 
+            break;
+
+          case MSG_SIZE_LARGE:
+            // send small message 
+            if((vstat = Send_Large_Msg(buf, buf_length)) != VAPI_OK){
+                CERROR("Send_Large_Msg() is failed\n");
+            } 
+            break;
+
+          default:
+            CERROR("Unknown message size %d\n", expected_buf_size);
+            break;
+        }
+
+        PROF_FINISH(kibnal_send); // time stapm of send operation 
+
+        rc = 1;
+
+        return rc; 
+}
+
+//
+// kibnal_send_pages
+//
+// no support 
+//
+// do you need this 
+//
+int kibnal_send_pages(nal_cb_t * nal, 
+                      void *private, 
+                      lib_msg_t * cookie,
+                      ptl_hdr_t * hdr, 
+                      int type, 
+                      ptl_nid_t nid, 
+                      ptl_pid_t pid,
+                      unsigned int niov, 
+                      ptl_kiov_t *iov, 
+                      size_t mlen)
+{
+   int rc = 1;
+
+   CDEBUG(D_NET, "kibnal_send_pages\n");
+
+   // do nothing now for Infiniband 
+   
+   return rc;
+}
+
+
+
+
+
+//
+// kibnal_fwd_packet 
+//
+// no support 
+//
+// do you need this 
+//
+void kibnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
+{
+        CDEBUG(D_NET, "forwarding not implemented\n");
+        return;
+      
+}
+
+//
+// kibnal_callback 
+//
+// no support 
+//
+// do you need this 
+//
+int kibnal_callback(nal_cb_t * nal, 
+                           void *private, 
+                           lib_eq_t *eq,
+                           ptl_event_t *ev)
+{
+        CDEBUG(D_NET,  "callback not implemented\n");
+        return PTL_OK;
+}
+
+
+/* Process a received portals packet */
+//
+//  conver receiving data in to PORTALS header 
+//
+
+void kibnal_rx(kibnal_data_t    *kib, 
+                      VAPI_virt_addr_t buffer_addr,
+                      u_int32_t        buffer_len,
+                      u_int32_t        buffer_size,
+                      unsigned int     priority) 
+{
+        ptl_hdr_t  *hdr = (ptl_hdr_t *)  buffer_addr; // case to ptl header format 
+        kibnal_rx_t krx;
+
+        CDEBUG(D_NET,"kibnal_rx: buf %p, len %ld\n", buffer_addr, buffer_len);
+
+        if ( buffer_len < sizeof( ptl_hdr_t ) ) {
+                /* XXX what's this for? */
+                if (kib->kib_shuttingdown)
+                        return;
+                CERROR("kibnal_rx: did not receive complete portal header, "
+                       "len= %ld", buffer_len);
+
+                return;
+        }
+
+       // typedef struct {
+       //         char             *krx_buffer; // pointer to receiving buffer
+       //         unsigned long     krx_len;  // length of buffer
+       //         unsigned int      krx_size; //
+       //         unsigned int      krx_priority; // do we need this
+       //         struct list_head  krx_item;
+       // } kibnal_rx_t;
+       //
+        krx.krx_buffer    = hdr;
+        krx.krx_len       = buffer_len;
+        krx.krx_size      = buffer_size;
+        krx.krx_priority  = priority;
+
+        if ( hdr->dest_nid == kibnal_lib.ni.nid ) {
+           // this is my data 
+           PROF_START(lib_parse);
+
+           lib_parse(&kibnal_lib, (ptl_hdr_t *)krx.krx_buffer, &krx);
+
+           PROF_FINISH(lib_parse);
+        } else {
+           /* forward to gateway */
+           // Do we expect this happened ?
+           //      
+           CERROR("kibnal_rx: forwarding not implemented yet");
+        }
+
+        return;
+}
+
+
+
+
+//
+// kibnal_recv_pages 
+//
+// no support 
+//
+// do you need this 
+//
+int
+kibnal_recv_pages(nal_cb_t * nal, 
+                  void *private, 
+                  lib_msg_t * cookie,
+                  unsigned int niov, 
+                  ptl_kiov_t *iov, 
+                  size_t mlen,
+                  size_t rlen)
+{
+
+  CDEBUG(D_NET, "recv_pages not implemented\n");
+  return PTL_OK;
+       
+}
+
+
+int 
+kibnal_recv(nal_cb_t     *nal,
+            void         *private,
+            lib_msg_t    *cookie,
+            unsigned int  niov,
+            struct iovec *iov,
+            size_t        mlen,
+            size_t        rlen)
+{
+        kibnal_rx_t *krx = private;
+
+        CDEBUG(D_NET,"kibnal_recv: mlen=%d, rlen=%d\n", mlen, rlen);
+
+        /* What was actually received must be >= what sender claims to
+         * have sent.  This is an LASSERT, since lib-move doesn't
+         * check cb return code yet. */
+        LASSERT (krx->krx_len >= sizeof (ptl_hdr_t) + rlen);
+        LASSERT (mlen <= rlen);
+
+        PROF_START(kibnal_recv);
+
+        if(mlen != 0) {
+                PROF_START(memcpy);
+                lib_copy_buf2iov (niov, iov, krx->krx_buffer +
+                                  sizeof (ptl_hdr_t), mlen);
+                PROF_FINISH(memcpy);
+        }
+
+        PROF_START(lib_finalize);
+        
+        lib_finalize(nal, private, cookie);
+        
+        PROF_FINISH(lib_finalize);
+        PROF_FINISH(kibnal_recv);
+
+        return rlen;
+}
+
+//
+// kibnal_map 
+// no support 
+// do you need this 
+//
+int kibnal_map(nal_cb_t * nal, 
+               unsigned int niov, 
+               struct iovec *iov,
+               void **addrkey)
+{
+  CDEBUG(D_NET, "map not implemented\n");
+  return PTL_OK; 
+}
+
+
+
+//
+// kibnal_unmap
+//
+// no support 
+//
+// do you need this 
+//
+void kibnal_unmap(nal_cb_t * nal, 
+                  unsigned int niov, 
+                  struct iovec *iov,
+                  void **addrkey)
+{
+  CDEBUG(D_NET, "unmap not implemented\n");
+  return;
+}
+
+
+
+//
+// kibnal_map_pages 
+// no support 
+// do you need this 
+/* as (un)map, but with a set of page fragments */
+int kibnal_map_pages(nal_cb_t * nal, 
+                     unsigned int niov, 
+                     ptl_kiov_t *iov,
+                     void **addrkey)
+{
+  CDEBUG(D_NET, "map_pages not implemented\n");
+  return PTL_OK;
+}
+
+
+
+//
+// kibnal_unmap_pages 
+//
+// no support 
+//
+// do you need this 
+//
+void kibnal_unmap_pages(nal_cb_t * nal, 
+                               unsigned int niov, 
+                               ptl_kiov_t *iov,
+                               void **addrkey)
+{
+  CDEBUG(D_NET, "unmap_pages not implemented\n");
+  return ;
+}
+
+
+int kibnal_end(kibnal_data_t *kib)
+{
+
+  /* wait for sends to finish ? */
+  /* remove receive buffers */
+  /* shutdown receive thread */
+
+  CDEBUG(D_NET, "kibnal_end\n");
+  IB_Close_HCA();
+
+  return 0;
+}
+
+
+//
+//
+//  asynchronous event handler: response to some unexpetced operation errors 
+//    
+//  void async_event_handler(VAPI_hca_hndl_t      hca_hndl,
+//                           VAPI_event_record_t *event_record_p,
+//                           void*                private_data)
+//  the HCA drive will prepare evetn_record_p                        
+//
+//  this handler is registered with VAPI_set_async_event_handler()
+//  VAPI_set_async_event_handler() is issued when an HCA is created 
+//
+//
+void async_event_handler(VAPI_hca_hndl_t      hca_hndl,
+                         VAPI_event_record_t *event_record_p,  
+                         void*                private_data)
+{
+  //
+  // * event_record_p is prepared by the system when an async
+  //   event happened
+  // * what to do with private_data 
+  // * do we expect more async events happened if so what are they 
+  //
+  //   only log ERROR message now 
+
+  switch (event_record_p->type) {
+    case VAPI_PORT_ERROR:
+         printk("Got PORT_ERROR event. port number=%d\n", 
+                 event_record_p->modifier.port_num);
+         break;
+    case VAPI_PORT_ACTIVE:
+         printk("Got PORT_ACTIVE event. port number=%d\n", 
+                 event_record_p->modifier.port_num);
+         break;
+    case VAPI_QP_PATH_MIGRATED:    /*QP*/
+         printk("Got P_PATH_MIGRATED event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_EEC_PATH_MIGRATED:   /*EEC*/
+         printk("Got EEC_PATH_MIGRATED event. eec_hndl=%d\n", 
+                 event_record_p->modifier.eec_hndl);
+         break;
+    case VAPI_QP_COMM_ESTABLISHED: /*QP*/
+         printk("Got QP_COMM_ESTABLISHED event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_EEC_COMM_ESTABLISHED: /*EEC*/
+         printk("Got EEC_COMM_ESTABLISHED event. eec_hndl=%d\n",
+                 event_record_p->modifier.eec_hndl);
+         break;
+    case VAPI_SEND_QUEUE_DRAINED:  /*QP*/
+         printk("Got SEND_QUEUE_DRAINED event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_CQ_ERROR:            /*CQ*/
+         printk("Got CQ_ERROR event. cq_hndl=%lu\n", 
+                 event_record_p->modifier.cq_hndl);
+         break;
+    case VAPI_LOCAL_WQ_INV_REQUEST_ERROR: /*QP*/
+         printk("Got LOCAL_WQ_INV_REQUEST_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR: /*QP*/
+         printk("Got LOCAL_WQ_ACCESS_VIOL_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR: /*QP*/
+         printk("Got LOCAL_WQ_CATASTROPHIC_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_PATH_MIG_REQ_ERROR:  /*QP*/
+         printk("Got PATH_MIG_REQ_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_LOCAL_CATASTROPHIC_ERROR: /*none*/
+         printk("Got LOCAL_CATASTROPHIC_ERROR event. \n");
+         break;
+    default:
+         printk(":got non-valid event type=%d. IGNORING\n",
+                    event_record_p->type);
+  }
+
+}
+
+
+
+
+VAPI_wr_id_t 
+search_send_buf(int buf_length)
+{
+  VAPI_wr_id_t send_id = -1;
+  u_int32_t    i;
+  int          flag = NO;
+  int          loop_count = 0;  
+
+  CDEBUG(D_NET, "search_send_buf \n");
+  
+  while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) {
+    for(i=0; i < NUM_ENTRY; i++) {
+      // problem about using spinlock
+      spin_lock(&MSB_mutex[i]);
+      if(MSbuf_list[i].status == BUF_REGISTERED)  {
+        MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse
+        flag =  YES;
+        spin_unlock(&MSB_mutex[i]);
+        break;
+      }
+      else
+        spin_unlock(&MSB_mutex[i]); 
+    }
+
+    loop_count++;
+    schedule_timeout(200); // wait for a while 
+  }
+   
+  if(flag == NO)  {
+    CDEBUG(D_NET, "search_send_buf: could not locate an entry in MSbuf_list\n");
+  }
+
+  send_id = (VAPI_wr_id_t ) i;
+
+  return send_id;
+}
+
+
+
+VAPI_wr_id_t 
+search_RDMA_recv_buf(int buf_length)
+{
+  VAPI_wr_id_t recv_id = -1;
+  u_int32_t    i;
+  int          flag = NO;
+  int          loop_count = 0;  
+
+  CDEBUG(D_NET, "search_RDMA_recv_buf\n");
+
+  while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) {
+
+    for(i=NUM_ENTRY; i < NUM_MBUF; i++) {
+
+      spin_lock(&MSB_mutex[i]);
+
+      if((MRbuf_list[i].status == BUF_REGISTERED)  &&
+         (MRbuf_list[i].buf_size >= buf_length)) {
+          MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse
+          flag =  YES;
+          spin_unlock(&MSB_mutex[i]);
+          break;
+      }
+      else
+        spin_unlock(&MSB_mutex[i]);
+    }
+
+    loop_count++;
+
+    schedule_timeout(200); // wait for a while 
+  }
+   
+  if(flag == NO)  {
+    CERROR("search_RDMA_recv_buf: could not locate an entry in MBbuf_list\n");
+  }
+
+  recv_id = (VAPI_wr_id_t ) i;
+
+  return recv_id;
+
+}
+
+
+
+
+
+
+
+VAPI_ret_t Send_Small_Msg(char *buf, int buf_length)
+{
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info              *qp;
+ VAPI_wr_id_t         send_id;
+
+ CDEBUG(D_NET, "Send_Small_Msg\n");
+
+ send_id = search_send_buf(buf_length); 
+
+ if(send_id < 0){
+   CERROR("Send_Small_Msg: Can not find a QP \n");
+   return(~VAPI_OK);
+ }
+
+ qp = &QP_list[(int) send_id];
+
+ // find a suitable/registered send_buf from MSbuf_list
+ CDEBUG(D_NET, "Send_Small_Msg: current send id  %d \n", send_id);
+
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        =  send_id;
+
+
+ // scatter and gather info 
+ sr_sg.len  = buf_length;
+ sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR 
+
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr;
+
+ // copy data to register send buffer 
+ memcpy(&sr_sg.addr, buf, buf_length);
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used 
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ // call VAPI_post_sr to send out this data 
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+    CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+ CDEBUG(D_NET, "VAPI_post_sr success.\n");
+
+ return (vstat);
+
+}
+
+
+
+
+VAPI_wr_id_t
+RTS_handshaking_protocol(int buf_length) 
+{
+
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ VAPI_wr_id_t         send_id;
+
+ RDMA_Info_Exchange   rdma_info;
+
+ rdma_info.opcode     = Ready_To_send;
+ rdma_info.buf_length = buf_length; 
+ rdma_info.raddr      = (VAPI_virt_addr_t) 0;
+ rdma_info.rkey       = (VAPI_rkey_t) 0 ; 
+
+ QP_info              *qp;
+
+ CDEBUG(D_NET, "RTS_handshaking_protocol\n");
+
+ // find a suitable/registered send_buf from MSbuf_list
+ send_id = search_send_buf(sizeof(RDMA_Info_Exchange));   
+
+ qp = &QP_list[(int) send_id];
+
+ CDEBUG(D_NET, "RTS_CTS: current send id  %d \n", send_id);
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        = send_id + RDMA_RTS_ID;// this RTS mesage ID 
+
+ // scatter and gather info 
+ sr_sg.len  = sizeof(RDMA_Info_Exchange);
+ sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR 
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr;
+
+ // copy data to register send buffer 
+ memcpy(&sr_sg.addr, &rdma_info, sizeof(RDMA_Info_Exchange));
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used 
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ // call VAPI_post_sr to send out this RTS message data 
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+    CERROR("RTS: VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat));
+ }
+
+ return send_id;
+
+}
+
+
+
+// create local receiving Memory Region for a HCA
+VAPI_ret_t
+createMemRegion_RDMA(VAPI_hca_hndl_t  hca_hndl,
+                     VAPI_pd_hndl_t   pd_hndl,
+                     char            *bufptr,
+                     int              buf_length,
+                     VAPI_mr_hndl_t   *rep_mr_hndl,
+                     VAPI_mrw_t       *rep_mr)
+{
+  VAPI_ret_t      vstat;
+  VAPI_mrw_t      mrw;
+  
+  CDEBUG(D_NET, "createMemRegion_RDMA\n");
+
+  // memory region address and size of memory region
+  // allocate a block of memory for this HCA 
+  // RDMA data buffer
+  
+  
+  if(bufptr == NULL) {
+    // need to allcate a local buffer to receive data from a
+    // remore VAPI_RDMA_WRITE_IMM
+    PORTAL_ALLOC(bufptr, buf_length);
+  }
+
+  if(bufptr == NULL) {
+    CDEBUG(D_MALLOC, "Failed to malloc a block of RDMA receiving memory, size %d\n",
+                                    buf_length);
+    return(VAPI_ENOMEM);
+  }
+
+  /* Register RDAM data Memory region */
+  CDEBUG(D_NET, "Register a RDMA data memory region\n");
+
+  mrw.type   = VAPI_MR;
+  mrw.pd_hndl= pd_hndl;
+  mrw.start  = (VAPI_virt_addr_t )(MT_virt_addr_t )bufptr;
+  mrw.size   = buf_length;
+  mrw.acl    = VAPI_EN_LOCAL_WRITE  | 
+               VAPI_EN_REMOTE_WRITE | 
+               VAPI_EN_REMOTE_READ;
+
+  // register send memory region
+  vstat = VAPI_register_mr(hca_hndl,
+                           &mrw,
+                           rep_mr_hndl,
+                           rep_mr);
+
+  // this memory region is going to be reused until deregister is called
+  if (vstat != VAPI_OK) {
+     CERROR("Failed registering a mem region Addr=%p, Len=%d. %s\n",
+             bufptr, buf_length, VAPI_strerror(vstat));
+  }
+
+  return(vstat);
+
+}
+
+
+
+RDMA_Info_Exchange  Local_rdma_info;
+
+int insert_MRbuf_list(int buf_lenght)
+{
+  int  recv_id = NUM_ENTRY;      
+
+  CDEBUG(D_NET, "insert_MRbuf_list\n");
+
+  for(recv_id= NUM_ENTRY; recv_id < NUM_MBUF; recv_id++){
+       if(BUF_UNREGISTERED == MRbuf_list[recv_id].status)  {
+         MRbuf_list[recv_id].status   = BUF_UNREGISTERED;
+         MRbuf_list[recv_id].buf_size = buf_lenght;
+         break;
+       }
+  }
+
+  return recv_id;
+
+}  
+
+VAPI_wr_id_t
+CTS_handshaking_protocol(RDMA_Info_Exchange *rdma_info) 
+{
+
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info             *qp;
+ VAPI_wr_id_t         send_id;
+ VAPI_mr_hndl_t       rep_mr_hndl;
+ VAPI_mrw_t           rep_mr;
+ int                  recv_id;
+ char                *bufptr = NULL;
+
+ // search MRbuf_list for an available entry that
+ // has registered data buffer with size equal to rdma_info->buf_lenght
+
+ CDEBUG(D_NET, "CTS_handshaking_protocol\n");
+
+ // register memory buffer for RDAM operation
+
+ vstat = createMemRegion_RDMA(Hca_hndl,
+                              Pd_hndl,
+                              bufptr, 
+                              rdma_info->buf_length,
+                              &rep_mr_hndl,
+                              &rep_mr);
+
+
+ Local_rdma_info.opcode            = Clear_To_send;
+ Local_rdma_info.recv_rdma_mr      = rep_mr;
+ Local_rdma_info.recv_rdma_mr_hndl = rep_mr_hndl;
+
+ if (vstat != VAPI_OK) {
+    CERROR("CST_handshaking_protocol: Failed registering a mem region"
+           "Len=%d. %s\n", rdma_info->buf_length, VAPI_strerror(vstat));
+    Local_rdma_info.flag = RDMA_BUFFER_UNAVAILABLE;
+ }
+ else {
+    // successfully allcate reserved RDAM data buffer 
+    recv_id = insert_MRbuf_list(rdma_info->buf_length);   
+
+    if(recv_id >=  NUM_ENTRY) { 
+      MRbuf_list[recv_id].buf_addr     = rep_mr.start;
+      MRbuf_list[recv_id].mr           = rep_mr;
+      MRbuf_list[recv_id].mr_hndl      = rep_mr_hndl;
+      MRbuf_list[recv_id].ref_count    = 0;
+      Local_rdma_info.flag             = RDMA_BUFFER_RESERVED;
+      Local_rdma_info.buf_length       = rdma_info->buf_length; 
+      Local_rdma_info.raddr            = rep_mr.start;
+      Local_rdma_info.rkey             = rep_mr.r_key; 
+    }
+    else {
+      CERROR("Can not find an entry in MRbuf_list - how could this happen\n");  
+    }
+ }
+
+ // find a suitable/registered send_buf from MSbuf_list
+ send_id = search_send_buf(sizeof(RDMA_Info_Exchange)); 
+ CDEBUG(D_NET, "CTS: current send id  %d \n", send_id);
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        = send_id + RDMA_CTS_ID; // this CST message ID 
+
+ // scatter and gather info 
+ sr_sg.len  = sizeof(RDMA_Info_Exchange);
+ sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR 
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr;
+
+ // copy data to register send buffer 
+ memcpy(&sr_sg.addr, &Local_rdma_info, sizeof(RDMA_Info_Exchange));
+
+ sr_desc.sg_lst_p   = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used 
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ // call VAPI_post_sr to send out this RTS message data 
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+    CERROR("CTS: VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+
+}
+
+
+
+VAPI_ret_t Send_Large_Msg(char *buf, int buf_length)
+{
+  VAPI_ret_t           vstat;
+  VAPI_sr_desc_t       sr_desc;
+  VAPI_sg_lst_entry_t  sr_sg;
+  QP_info             *qp;
+  VAPI_mrw_t           rep_mr; 
+  VAPI_mr_hndl_t       rep_mr_hndl;
+  int                  send_id;
+  VAPI_imm_data_t      imm_data = 0XAAAA5555;
+
+
+  CDEBUG(D_NET, "Send_Large_Msg: Enter\n");
+
+  // register this large buf 
+  // don't need to copy this buf to send buffer
+  vstat = createMemRegion_RDMA(Hca_hndl,
+                               Pd_hndl,
+                               buf,
+                               buf_length,
+                               &rep_mr_hndl,
+                               &rep_mr);
+
+  if (vstat != VAPI_OK) {
+    CERROR("Send_Large_M\sg:  createMemRegion_RDMAi() failed (%s).\n",
+                        VAPI_strerror(vstat));
+  }
+  
+
+  Local_rdma_info.send_rdma_mr      = rep_mr;
+  Local_rdma_info.send_rdma_mr_hndl = rep_mr_hndl;
+
+  //
+  //     Prepare descriptor for send queue
+  //
+  // ask for a remote rdma buffer with size buf_lenght
+  send_id = RTS_handshaking_protocol(buf_length); 
+
+  qp = &QP_list[send_id];
+
+  // wait for CTS message receiving from remote node 
+  while(1){
+     if(YES == Cts_Message_arrived) {
+        // receive CST message from remote node 
+        // Rdma_info is available for use
+        break;
+     }
+     schedule_timeout(RTS_CTS_TIMEOUT);
+  }
+  
+  sr_desc.id        = send_id + RDMA_OP_ID;
+  sr_desc.opcode    = VAPI_RDMA_WRITE_WITH_IMM;
+  sr_desc.comp_type = VAPI_SIGNALED;
+
+  // scatter and gather info 
+  sr_sg.len  = buf_length;
+
+  // rdma mr 
+  sr_sg.lkey = rep_mr.l_key;  
+  sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) rep_mr.start;
+  sr_desc.sg_lst_p = &sr_sg;
+  sr_desc.sg_lst_len = 1; // only 1 entry is used 
+
+  // immediate data - not used here 
+  sr_desc.imm_data = imm_data;
+  sr_desc.fence = TRUE;
+  sr_desc.set_se = FALSE;
+
+  // RDAM operation only
+  // raddr and rkey is receiving from remote node  
+  sr_desc.remote_addr = Rdma_info.raddr;
+  sr_desc.r_key       = Rdma_info.rkey;
+
+  // call VAPI_post_sr to send out this data 
+  vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+  if (vstat != VAPI_OK) {
+     CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat));
+  }
+
+}
+
+
+
+
+
+
+//
+//  repost_recv_buf
+//  post a used recv buffer back to recv WQE list 
+//  wrq_id is used to indicate the starting position of recv-buffer 
+//
+VAPI_ret_t 
+repost_recv_buf(QP_info      *qp,
+                VAPI_wr_id_t  wrq_id) 
+{
+  VAPI_rr_desc_t       rr;
+  VAPI_sg_lst_entry_t  sg_entry;
+  VAPI_ret_t           ret;
+
+  CDEBUG(D_NET, "repost_recv_buf\n");
+
+  sg_entry.lkey = MRbuf_list[wrq_id].mr.l_key;
+  sg_entry.len  = MRbuf_list[wrq_id].buf_size;
+  sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr;
+  rr.opcode     = VAPI_RECEIVE;
+  rr.comp_type  = VAPI_SIGNALED; /* All with CQE (IB compliant) */
+  rr.sg_lst_len = 1; /* single buffers */
+  rr.sg_lst_p   = &sg_entry;
+  rr.id         = wrq_id; /* WQE id used is the index to buffers ptr array */
+
+  ret= VAPI_post_rr(qp->hca_hndl,qp->qp_hndl,&rr);
+     
+  if (ret != VAPI_OK){
+     CERROR("failed reposting RQ WQE (%s) buffer \n",VAPI_strerror_sym(ret));
+     return ret;
+  }
+
+  CDEBUG(D_NET, "Successfully reposting an RQ WQE %d recv bufer \n", wrq_id);
+
+  return ret ;
+}
+                       
+//
+// post_recv_bufs
+//     post "num_o_bufs" for receiving data
+//      each receiving buf (buffer starting address, size of buffer)
+//      each buffer is associated with an id 
+//
+int 
+post_recv_bufs(VAPI_wr_id_t  start_id)
+{
+  int i;
+  VAPI_rr_desc_t       rr;
+  VAPI_sg_lst_entry_t  sg_entry;
+  VAPI_ret_t           ret;
+
+  CDEBUG(D_NET, "post_recv_bufs\n");
+
+  for(i=0; i< NUM_ENTRY; i++) {
+    sg_entry.lkey = MRbuf_list[i].mr.l_key;
+    sg_entry.len  = MRbuf_list[i].buf_size;
+    sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[i].buf_addr;
+    rr.opcode     = VAPI_RECEIVE;
+    rr.comp_type  = VAPI_SIGNALED;  /* All with CQE (IB compliant) */
+    rr.sg_lst_len = 1; /* single buffers */
+    rr.sg_lst_p   = &sg_entry;
+    rr.id         = start_id+i; /* WQE id used is the index to buffers ptr array */
+
+    ret= VAPI_post_rr(QP_list[i].hca_hndl,QP_list[i].qp_hndl, &rr);
+    if (ret != VAPI_OK) {
+       CERROR("failed posting RQ WQE (%s)\n",VAPI_strerror_sym(ret));
+       return i;
+    } 
+  }
+
+  return i; /* num of buffers posted */
+}
+                       
+int 
+post_RDMA_bufs(QP_info      *qp, 
+               void         *buf_array,
+               unsigned int  num_bufs,
+               unsigned int  buf_size,
+               VAPI_wr_id_t  start_id)
+{
+
+  CDEBUG(D_NET, "post_RDMA_bufs \n");
+  return YES;
+}
+
+
+
+//
+// LIB NAL
+// assign function pointers to theirs corresponding entries
+//
+
+nal_cb_t kibnal_lib = {
+        nal_data:       &kibnal_data,  /* NAL private data */
+        cb_send:        kibnal_send,
+        cb_send_pages:  NULL, // not implemented  
+        cb_recv:        kibnal_recv,
+        cb_recv_pages:  NULL, // not implemented 
+        cb_read:        kibnal_read,
+        cb_write:       kibnal_write,
+        cb_callback:    NULL, // not implemented 
+        cb_malloc:      kibnal_malloc,
+        cb_free:        kibnal_free,
+        cb_map:         NULL, // not implemented 
+        cb_unmap:       NULL, // not implemented 
+        cb_map_pages:   NULL, // not implemented 
+        cb_unmap_pages: NULL, // not implemented 
+        cb_printf:      kibnal_printf,
+        cb_cli:         kibnal_cli,
+        cb_sti:         kibnal_sti,
+        cb_dist:        kibnal_dist // no used at this moment 
+};
diff --git a/lnet/klnds/iblnd/ibnal_send_recv_self_testing.c b/lnet/klnds/iblnd/ibnal_send_recv_self_testing.c
new file mode 100644 (file)
index 0000000..82defdb
--- /dev/null
@@ -0,0 +1,116 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *  *
+ *  * Based on ksocknal, qswnal, and gmnal
+ *  *
+ *  * Copyright (C) 2003 LANL
+ *  *   Author: HB Chen <hbchen@lanl.gov>
+ *  *   Los Alamos National Lab
+ *  *
+ *  *   Portals is free software; you can redistribute it and/or
+ *  *   modify it under the terms of version 2 of the GNU General Public
+ *  *   License as published by the Free Software Foundation.
+ *  *
+ *  *   Portals is distributed in the hope that it will be useful,
+ *  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  *   GNU General Public License for more details.
+ *  *
+ *  *   You should have received a copy of the GNU General Public License
+ *  *   along with Portals; if not, write to the Free Software
+ *  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *  *
+ *  */
+
+#include "ibnal.h"
+
+
+
+VAPI_ret_t ibnal_send_recv_self_testing()
+{
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info              *qp;
+ VAPI_wr_id_t         send_id;
+ int                  buf_id;
+ char                 sbuf[KB_32];
+ char                 rbuf[KB_32];
+ int                  i;
+ int                  buf_length = KB_32;
+ VAPI_wc_desc_t       comp_desc;
+ int                  num_send = 1;
+ int                  loop_count = 0;
+
+
+ printk("ibnal_send_recv_self_testing\n");
+
+ memset(&sbuf, 'a', KB_32);
+ memset(&rbuf, ' ', KB_32);
+ send_id = 2222; 
+ buf_id = 0;
+
+ qp = &QP_list[0];
+
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+
+ // scatter and gather info
+ sr_sg.len  = KB_32;
+ sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr;
+
+ // copy data to register send buffer
+ memcpy(&sr_sg.addr, &buf, buf_length);
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+
+ // call VAPI_post_sr to send out this data
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+   printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+ printk("VAPI_post_sr success.\n");
+
+ // poll for completion
+
+ while( loop_count < 100 ){
+   vstat = VAPI_poll_cq(qp->hca_hndl, qp->cq_hndl, &comp_desc);
+   if( vstat == VAPI_OK ) {
+       if(comp_desc.opcode == VAPI_CQE_SQ_SEND_DATA ) {
+          /* SEND completion */
+         printk("received SQ completion\n");
+       }
+       else { 
+          if(comp_desc.opcode == VAPI_CQE_RQ_SEND_DATA ) {
+           /* RECEIVE completion */
+            printk("received RQ completion\n");
+            memcpy(&rbuf, (char *) MRbuf_list[buf_id].buf_addar, KB_32);
+           
+           int n;
+
+           n = memcmp($sbuf, &rbuf, KB_32);
+           printk("compare sbuf and rbuf  n = %d\n", n); 
+           
+          }
+                 else  {
+            printk("unexpected completion opcode %d \n", comp_desc.opcode);
+         }
+       }
+   }
+
+   loop_count++; 
+   schedule_timeout(500);
+ }
+
+ printk("end of ibnal_self_send_recv_testing\n");
+
+
+}
diff --git a/lnet/klnds/iblnd/uagent.c b/lnet/klnds/iblnd/uagent.c
new file mode 100644 (file)
index 0000000..d7e939a
--- /dev/null
@@ -0,0 +1,391 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+
+#include <linux/shm.h>
+#include <linux/ipc.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+
+// Infiniband VAPI/EVAPI header files Mellanox MT23108 VAPI
+#include <vapi.h>
+#include <vapi_types.h>
+#include <vapi_common.h>
+#include <evapi.h>
+
+// Remote HCA Info information
+ typedef struct Remote_HCA_Info {
+       unsigned long     opcode;
+       unsigned long     length;
+       IB_lid_t          dlid[256];
+       VAPI_qp_num_t     rqp_num[256];
+       VAPI_rkey_t       rkey;   // for remote RDAM request
+       unsigned long     vaddr1; // virtual address fisrt 4 bytes
+       unsigned long     vaddr2; // virtual address second 4 bytes
+       u_int32_t         size;   // size of RDMA memory buffer
+       char              dest_ip[256]; //destination server IP address 
+ } Remote_HCA_Info;
+
+#define SHARED_SEGMENT_SIZE  0x10000 // 16KB shared memory between U and K
+
+// some internals opcodes for IB operations used in IBNAL
+#define SEND_QP_INFO          0X00000001
+#define RECV_QP_INFO          0X00000010
+#define DEFAULT_SOCKET_PORT   11211 
+#define LISTEN_QUEUE_SIZE     2048 
+#define DEST_IP                      "10.128.105.26"
+
+// server_thread
+// + wait for an incoming connection from remote node 
+// + receive remote HCA's data 
+//
+//
+//
+//
+// 
+void *server_thread(void *vargp)
+{
+  Remote_HCA_Info   *hca_data;
+  Remote_HCA_Info   hca_data_buffer;
+  
+  int    serverfd;
+  int    infd;
+  struct hostent  *hp;
+  struct sockaddr_in serveraddr;
+  struct sockaddr_in clientaddr;
+  int    sin_size=sizeof(struct sockaddr_in);
+  int   bytes_recv;
+  int    i;
+
+
+  hca_data = (Remote_HCA_Info *) vargp;
+  
+  if((serverfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+    printf("server_thread couldnot create a socket \n");
+    pthread_exit((void *) 0);
+  }
+  printf("server_thread create a socket \n");
+
+  bzero((char *) &serveraddr, sizeof(serveraddr));
+
+  serveraddr.sin_family = AF_INET;
+  serveraddr.sin_addr.s_addr = htons(INADDR_ANY);
+  serveraddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT);
+  
+  if(bind(serverfd,(struct sockaddr *)&serveraddr,sizeof(struct sockaddr)) < 0) {
+    printf("server_thread couldnot bind to a socket \n");
+    pthread_exit((void *) 0);
+  }
+
+  printf("server_thread bind to a socket \n");
+
+  if(listen(serverfd, LISTEN_QUEUE_SIZE) < 0) {
+    printf("server_thread couldnot listen to a socket \n");
+    pthread_exit((void *) 0);
+  }
+
+  printf("server_thread listen to a socket \n");
+
+  //
+  // I only expect to receive one HCA data from a remote HCA 
+  //
+  printf("server_thread: Waiting for a connection\n");
+  infd= accept(serverfd,(struct sockaddr*)&clientaddr,&sin_size);
+  printf("server_thread: Got an incoming connection");
+
+  /* receive data from socket into buffer */
+  bytes_recv = recv(infd,
+                    &hca_data_buffer,  
+                    sizeof(Remote_HCA_Info),
+                   0);
+
+  if(bytes_recv > 0) {
+/*       
+      printf("server_thread receive data\n");
+      printf("opcode is 0x%X\n", hca_data_buffer.opcode);
+      printf("length is 0x%X\n", hca_data_buffer.length);
+
+      for(i=0; i < 256; i++) {
+        printf("dlid %d is 0x%X\n", i, hca_data_buffer.dlid[i]);
+        printf("rqp_num %d is 0x%X\n", hca_data_buffer.rqp_num[i]);
+      }
+
+      printf("rkey is 0x%X\n", hca_data_buffer.rkey);
+      printf("vaddr1 is 0x%X\n", hca_data_buffer.vaddr1);
+      printf("vaddr2 is 0x%X\n", hca_data_buffer.vaddr2);
+      printf("size is 0x%X\n", hca_data_buffer.size);
+      printf("After conversion hton \n");
+      printf("opcode is 0x%X\n", htonl(hca_data_buffer.opcode));
+      printf("length is 0x%X\n", htonl(hca_data_buffer.length));
+
+      for(i=0; i < 256; i++) {
+        printf("dlid %d is 0x%X\n", htons(hca_data_buffer.dlid[i]));
+        printf("rqp_num %d is 0x%X\n", htonl(hca_data_buffer.rqp_num[i]));
+      }
+
+      printf("rkey is 0x%X\n", htonl(hca_data_buffer.rkey));
+      printf("vaddr1 is 0x%X\n", htonl(hca_data_buffer.vaddr1));
+      printf("vaddr2 is 0x%X\n", htonl(hca_data_buffer.vaddr2));
+      printf("size is 0x%X\n", htonl(hca_data_buffer.size));
+*/     
+
+      hca_data->opcode  = ntohl(hca_data_buffer.opcode); // long 
+      hca_data->length  = ntohl(hca_data_buffer.length); // long
+
+      for(i=0; i < 256; i++) {
+        hca_data->dlid[i]    = ntohs(hca_data_buffer.dlid[i]);   // u_int16
+        hca_data->rqp_num[i] = ntohl(hca_data_buffer.rqp_num[i]);// u_int32
+      }
+
+      hca_data->rkey    = ntohl(hca_data_buffer.rkey);   // u_int32
+      hca_data->vaddr1  = ntohl(hca_data_buffer.vaddr1); // first word u_int32
+      hca_data->vaddr2  = ntohl(hca_data_buffer.vaddr2); // second word u_int32
+      hca_data->size    = ntohl(hca_data_buffer.size);   // u_int32
+    }
+    else {
+      printf("server_thread receive ERROR bytes_recv = %d\n", bytes_recv);
+    }
+
+    close(infd);
+    close(serverfd);
+
+  printf("server_thread EXIT \n");
+      
+  pthread_exit((void *) 0);
+
+}
+
+//
+// client_thread 
+// + connect to a remote server_thread
+// + send local HCA's data to remote server_thread
+//
+void *client_thread(void *vargp)
+{
+
+  Remote_HCA_Info   *hca_data;
+  Remote_HCA_Info   hca_data_buffer;
+
+  int    clientfd;
+  struct hostent  *hp;
+  struct sockaddr_in clientaddr;
+  int    bytes_send;
+  int    i;
+  
+  hca_data = (Remote_HCA_Info *) vargp;
+
+  if((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+    printf("client_thread couldnot create a socket \n");
+    pthread_exit((void *) 0);
+  }
+  printf("client_thread create a socket \n");
+  
+  bzero((char *) &clientaddr, sizeof(clientaddr));
+
+  clientaddr.sin_family = AF_INET;
+  clientaddr.sin_addr.s_addr = inet_addr(hca_data->dest_ip);
+  printf("client_thread get server Ip address = %s\n", hca_data->dest_ip);
+  clientaddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT);
+  memset(&(clientaddr.sin_zero), '\0', 8);
+
+  connect(clientfd, (struct sockaddr *) &clientaddr, sizeof(struct sockaddr));
+
+  printf("client_thread connect to  server Ip address = %s\n", hca_data->dest_ip);
+
+  hca_data_buffer.opcode  = htonl(hca_data->opcode); // long 
+  hca_data_buffer.length  = htonl(hca_data->length); // long
+
+  for(i=0; i < 256; i++) {
+    hca_data_buffer.dlid[i]    = htons(hca_data->dlid[i]);   // u_int16
+    hca_data_buffer.rqp_num[i] = htonl(hca_data->rqp_num[i]);// u_int32
+  }
+
+  hca_data_buffer.rkey    = htonl(hca_data->rkey);   // u_int32
+  hca_data_buffer.vaddr1  = htonl(hca_data->vaddr1); // first word u_int32
+  hca_data_buffer.vaddr2  = htonl(hca_data->vaddr2); // second word u_int32
+  hca_data_buffer.size    = htonl(hca_data->size);   // u_int32
+  bytes_send = send(clientfd, & hca_data_buffer, sizeof(Remote_HCA_Info), 0); 
+  
+  if(bytes_send == sizeof(Remote_HCA_Info)) {
+    printf("client_thread: send successfully \n");
+  }
+  else {
+    printf("client_thread: send failed \n");
+  }
+
+  printf("client_thread EXIT \n");
+
+  pthread_exit((void *) 0);
+}
+
+
+//
+//  main 
+//  + create a shared-memory between this main()/user address and
+//    a kernel thread/kernel address space associated with inbal 
+//    kernel module 
+//  + access local HCA's data through this shared memory 
+//
+//  + create a server_thread for receiving remote HCA's data
+//  + create a client_thread for sending out local HCA's data
+//  + after receiving remote HCA's data update this shared memory
+//
+int  main(int argc , char *argv[])
+{
+  int              segment_id;
+  struct shmid_ds  shmbuffer;
+  int              segment_size;
+  const int        shared_segment_size = sizeof(Remote_HCA_Info);
+  key_t            key = 999;
+  unsigned long    raddr;
+  Remote_HCA_Info  *shared_memory;
+  Remote_HCA_Info  exchange_hca_data;
+  Remote_HCA_Info  remote_hca_data;
+  int i; 
+
+  /* pthread */
+  pthread_t          sid;
+  pthread_t          cid;
+  pthread_attr_t     attr; 
+  int                rc, status;
+
+  char dest_ip[256];
+
+  if(argc != 2) {
+         printf("USAGE:   uagent   server_ip_address\n");
+         printf("argc = %d \n", argc);
+         exit(1);
+  }
+
+  strcpy(&exchange_hca_data.dest_ip[0], argv[1]);
+  printf("the destinational server IP address = %s\n", 
+                                      &exchange_hca_data.dest_ip); 
+
+  segment_id =  shmget(key, shared_segment_size, IPC_CREAT | 0666);
+
+  printf("sys_shmget is done segment_id = %d\n", segment_id);
+
+  shared_memory = (Remote_HCA_Info *) shmat(segment_id, 0, 0);
+
+  if(shared_memory == (char *) -1) {
+    printf("Shared memory attach failed shared_memory=%p\n",shared_memory);
+    exit(0);
+  }
+
+  printf("shared menory attached at address %p\n", shared_memory);
+
+  while (1) {
+    if(shared_memory->opcode ==  SEND_QP_INFO) {
+      printf("Local HCA data received from kernel thread\n");
+      break;
+    }
+    usleep(1000);
+    continue;
+  }
+
+  printf("Local HCA data received from kernel thread\n");
+
+  // save local HCA's data in exchange_hca_data
+  //
+  exchange_hca_data.opcode  = shared_memory->opcode;
+  exchange_hca_data.length  = shared_memory->length;
+
+  for(i=0; i < 256; i++) {
+    exchange_hca_data.dlid[i]    = shared_memory->dlid[i];
+    exchange_hca_data.rqp_num[i] = shared_memory->rqp_num[i];
+  }
+
+  exchange_hca_data.rkey    = shared_memory->rkey;
+  exchange_hca_data.vaddr1  = shared_memory->vaddr1;
+  exchange_hca_data.vaddr2  = shared_memory->vaddr2;
+  exchange_hca_data.size    = shared_memory->size;
+
+  /* Initialize and set thread detached attribute */
+  pthread_attr_init(&attr);
+  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+  /* create a server thread for procsssing incoming remote node socket data */
+  // 
+  pthread_create(&sid, 
+                 &attr, 
+                 server_thread,
+                 (Remote_HCA_Info *) &remote_hca_data);
+
+  printf("Main: created a server thread \n");
+
+  sleep(10);
+  
+  /* create a clint thread to send out local HCA data to remote node */
+  pthread_create(&cid, 
+                 &attr, 
+                 client_thread,
+                 (Remote_HCA_Info *) &exchange_hca_data);
+
+  printf("Main: created a client  thread \n");
+
+  /* synchronization between server_thread and client_thread */
+  pthread_attr_destroy(&attr);
+
+  rc = pthread_join(sid, (void **) &status);
+  if(rc) {
+    printf("Error: return code from pthread_join() is %d\n", rc);
+    exit(-1);
+  }
+
+  printf("completed join with thread %d status = %d\n", sid, status);
+
+  rc = pthread_join(cid, (void **) &status);
+  if(rc) {
+    printf("Error: return code from pthread_join() is %d\n", rc);
+    exit(-1);
+  }
+  printf("completed join with thread %d status = %d\n", cid, status);
+
+  // update shared memory with remote HCA's data 
+
+  shared_memory->opcode = RECV_QP_INFO;
+  shared_memory->length = remote_hca_data.length;
+  for(i=0; i < 256; i++) {
+    shared_memory->dlid[i]   = remote_hca_data.dlid[i];
+    shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i];
+  }
+  shared_memory->rkey   = remote_hca_data.rkey;
+  shared_memory->vaddr1 = remote_hca_data.vaddr1;
+  shared_memory->vaddr2 = remote_hca_data.vaddr2;
+  shared_memory->size   = remote_hca_data.size;
+
+  sleep(5);
+
+  shared_memory->opcode = RECV_QP_INFO;
+  shared_memory->length = remote_hca_data.length;
+  for(i=0; i < 256; i++) {
+    shared_memory->dlid[i]   = remote_hca_data.dlid[i];
+    shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i];
+  }
+  
+  shared_memory->rkey   = remote_hca_data.rkey;
+  shared_memory->vaddr1 = remote_hca_data.vaddr1;
+  shared_memory->vaddr2 = remote_hca_data.vaddr2;
+  shared_memory->size   = remote_hca_data.size;
+
+  sleep(10);
+  
+//  shmdt(shared_memory);
+   
+  printf("uagent is DONE \n");
+  
+
+  exit(0);
+
+}
+
index aca06a6..8b02d26 100644 (file)
@@ -930,6 +930,7 @@ char *portals_nid2str(int nal, ptl_nid_t nid, char *str)
                 break;
         case QSWNAL:
         case GMNAL:
+        case IBNAL:
         case TOENAL:
         case SCIMACNAL:
                 sprintf(str, "%u:%u", (__u32)(nid >> 32), (__u32)nid);
index d233903..a15ce6a 100644 (file)
@@ -320,6 +320,8 @@ kportal_get_ni (int nal)
                 return  (PORTAL_SYMBOL_GET(ktoenal_ni));
         case GMNAL:
                 return  (PORTAL_SYMBOL_GET(kgmnal_ni));
+        case IBNAL:
+                return  (PORTAL_SYMBOL_GET(kibnal_ni));
         case TCPNAL:
                 /* userspace NAL */
                 return (NULL);
@@ -350,6 +352,9 @@ kportal_put_ni (int nal)
         case GMNAL:
                 PORTAL_SYMBOL_PUT(kgmnal_ni);
                 break;
+        case IBNAL:
+                PORTAL_SYMBOL_PUT(kibnal_ni);
+                break;
         case TCPNAL:
                 /* A lesson to a malicious caller */
                 LBUG ();
index f77a439..020a2a9 100644 (file)
@@ -26,7 +26,7 @@
 #include <portals/api-support.h>
 
 int ptl_init;
-unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL);
+unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL | S_IBNAL);
 unsigned int portal_debug = ~0;
 unsigned int portal_cerror = 1;
 unsigned int portal_printk;
index af34cba..5309eb4 100644 (file)
@@ -66,6 +66,7 @@ static name2num_t nalnames[] = {
         {"toe",                TOENAL},
         {"elan",       QSWNAL},
         {"gm",         GMNAL},
+        {"ib",         IBNAL},
         {"scimac",      SCIMACNAL},
         {NULL,         -1}
 };
index 7910823..6171555 100644 (file)
@@ -297,6 +297,28 @@ AC_SUBST(with_gm)
 AC_SUBST(GMNAL)
 
 
+#fixme: where are the default IB includes?
+default_ib_include_dir=/usr/local/ib/include
+an_ib_include_file=vapi.h
+
+AC_ARG_WITH(ib, [ --with-ib=[yes/no/path] Path to IB includes], with_ib=$withval, with_ib=$default_ib)
+AC_MSG_CHECKING(if IB headers are present)
+if test "$with_ib" = yes; then
+    with_ib=$default_ib_include_dir
+fi
+if test "$with_ib" != no -a -f ${with_ib}/${an_ib_include_file}; then
+    AC_MSG_RESULT(yes)
+    IBNAL="ibnal"
+    with_ib="-I${with_ib}"
+else
+    AC_MSG_RESULT(no)
+    IBNAL=""
+    with_ib=""
+fi
+AC_SUBST(IBNAL)
+AC_SUBST(with_ib)
+
+
 def_scamac=/opt/scali/include
 AC_ARG_WITH(scamac, [  --with-scamac=[yes/no/path] Path to ScaMAC includes (default=/opt/scali/include)], with_scamac=$withval, with_scamac=$def_scamac)
 AC_MSG_CHECKING(if ScaMAC headers are present)
@@ -317,7 +339,7 @@ AC_SUBST(with_scamac)
 AC_SUBST(SCIMACNAL)
 
 CFLAGS="$KCFLAGS"
-CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac "
+CPPFLAGS="$KINCFLAGS $KCPPFLAGS $MFLAGS $enable_zerocopy $enable_affinity $with_quadrics $with_gm $with_scamac $with_ib"
 
 AC_SUBST(MOD_LINK)
 AC_SUBST(LINUX25)
index f152725..dfca5fb 100644 (file)
@@ -43,6 +43,7 @@ extern unsigned int portal_cerror;
 #define S_GMNAL       (1 << 19)
 #define S_PTLROUTER   (1 << 20)
 #define S_COBD        (1 << 21)
+#define S_IBNAL       (1 << 22)
 
 /* If you change these values, please keep portals/utils/debug.c
  * up to date! */
@@ -1034,6 +1035,7 @@ enum {
         TCPNAL,
         SCIMACNAL,
         ROUTER,
+        IBNAL,
         NAL_ENUM_END_MARKER
 };
 
@@ -1042,6 +1044,7 @@ extern ptl_handle_ni_t  kqswnal_ni;
 extern ptl_handle_ni_t  ksocknal_ni;
 extern ptl_handle_ni_t  ktoenal_ni;
 extern ptl_handle_ni_t  kgmnal_ni;
+extern ptl_handle_ni_t  kibnal_ni;
 extern ptl_handle_ni_t  kscimacnal_ni;
 #endif
 
index fed2785..25aab9d 100644 (file)
@@ -3,5 +3,5 @@
 # This code is issued under the GNU General Public License.
 # See the file COPYING in this distribution
 
-DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal 
-SUBDIRS= socknal toenal        @QSWNAL@ @GMNAL@ @SCIMACNAL@
+DIST_SUBDIRS= socknal toenal qswnal gmnal scimacnal ibnal
+SUBDIRS= socknal toenal        @QSWNAL@ @GMNAL@ @SCIMACNAL@ @IBNAL@
diff --git a/lustre/portals/knals/ibnal/Makefile.am b/lustre/portals/knals/ibnal/Makefile.am
new file mode 100644 (file)
index 0000000..788c641
--- /dev/null
@@ -0,0 +1,10 @@
+include ../../Rules.linux
+
+MODULE = kibnal
+modulenet_DATA = kibnal.o
+EXTRA_PROGRAMS = kibnal
+
+
+DEFS =
+CPPFLAGS=@CPPFLAGS@ @with_@
+kibnal_SOURCES = ibnal.h ibnal.c ibnal_cb.c
diff --git a/lustre/portals/knals/ibnal/ibnal.c b/lustre/portals/knals/ibnal/ibnal.c
new file mode 100644 (file)
index 0000000..948badf
--- /dev/null
@@ -0,0 +1,2146 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Based on ksocknal, qswnal, and gmnal
+ *
+ * Copyright (C) 2003 LANL 
+ *   Author: HB Chen <hbchen@lanl.gov>
+ *   Los Alamos National Lab
+ *
+ *   Portals is free software; you can redistribute it and/or
+ *   modify it under the terms of version 2 of the GNU General Public
+ *   License as published by the Free Software Foundation.
+ *
+ *   Portals is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Portals; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *   
+ */
+
+#include "ibnal.h"
+
+// portal handle ID for this IB-NAL
+ptl_handle_ni_t kibnal_ni;
+
+// message send buffer mutex
+spinlock_t   MSBuf_mutex[NUM_MBUF];
+
+// message recv buffer mutex
+spinlock_t   MRBuf_mutex[NUM_MBUF];
+
+// IB-NAL API information 
+nal_t  kibnal_api; 
+
+// nal's private data 
+kibnal_data_t kibnal_data; 
+
+int ibnal_debug = 0;
+VAPI_pd_hndl_t      Pd_hndl;    
+unsigned int    Num_posted_recv_buf;
+
+// registered send buffer list
+Memory_buffer_info MSbuf_list[NUM_MBUF]; 
+
+// registered recv buffer list 
+Memory_buffer_info MRbuf_list[NUM_MBUF];
+
+//
+// for router 
+// currently there is no need fo IBA  
+//
+kpr_nal_interface_t kibnal_router_interface = {
+        kprni_nalid: IBNAL,
+        kprni_arg:   &kibnal_data,
+        kprni_fwd:   kibnal_fwd_packet, // forward data to router  
+                                        // is router invloving the
+                                        // data transmision 
+};
+
+
+// Queue-pair list 
+QP_info QP_list[NUM_QPS];
+
+// information associated with a HCA 
+HCA_info        Hca_data;
+
+// something about HCA 
+VAPI_hca_hndl_t      Hca_hndl; // assume we only use one HCA now 
+VAPI_hca_vendor_t    Hca_vendor;
+VAPI_hca_cap_t       Hca_cap;
+VAPI_hca_port_t      Hca_port_1_props;
+VAPI_hca_port_t      Hca_port_2_props;
+VAPI_hca_attr_t      Hca_attr;
+VAPI_hca_attr_mask_t Hca_attr_mask;
+VAPI_cq_hndl_t       Cq_RQ_hndl;    // CQ's handle
+VAPI_cq_hndl_t       Cq_SQ_hndl;    // CQ's handle
+VAPI_cq_hndl_t       Cq_hndl;    // CQ's handle
+Remote_QP_Info       L_QP_data;
+Remote_QP_Info       R_QP_data;
+
+
+//
+// forward  API
+//
+int 
+kibnal_forward(nal_t   *nal,
+               int     id,
+               void    *args,  
+               size_t args_len,
+               void    *ret,   
+               size_t ret_len)
+{
+        kibnal_data_t *knal_data = nal->nal_data;
+        nal_cb_t      *nal_cb = knal_data->kib_cb;
+
+        // ASSERT checking 
+        LASSERT (nal == &kibnal_api);
+        LASSERT (knal_data == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // dispatch forward API function 
+        
+        CDEBUG(D_NET,"kibnal_forward: function id = %d\n", id);
+
+        lib_dispatch(nal_cb, knal_data, id, args, ret); 
+
+        CDEBUG(D_TRACE,"IBNAL- Done kibnal_forward\n");
+
+        return PTL_OK; // always return PTL_OK
+}
+
+//
+// lock API  
+//
+void 
+kibnal_lock(nal_t *nal, unsigned long *flags)
+{
+        kibnal_data_t *knal_data = nal->nal_data;
+        nal_cb_t      *nal_cb = knal_data->kib_cb;
+
+        // ASSERT checking 
+        LASSERT (nal == &kibnal_api);
+        LASSERT (knal_data == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // disable logical interrrupt 
+        nal_cb->cb_cli(nal_cb,flags);
+
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_lock\n");
+
+}
+
+//
+// unlock API
+//
+void 
+kibnal_unlock(nal_t *nal, unsigned long *flags)
+{
+        kibnal_data_t *k = nal->nal_data;
+        nal_cb_t      *nal_cb = k->kib_cb;
+
+        // ASSERT checking
+        LASSERT (nal == &kibnal_api);
+        LASSERT (k == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // enable logical interrupt 
+        nal_cb->cb_sti(nal_cb,flags);
+
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_unlock");
+
+}
+
+//
+// shutdown API 
+//     showdown this network interface 
+//
+int
+kibnal_shutdown(nal_t *nal, int ni)
+{       
+        VAPI_ret_t          vstat;
+        kibnal_data_t *k = nal->nal_data;
+        nal_cb_t      *nal_cb = k->kib_cb;
+
+        // assert checking
+        LASSERT (nal == &kibnal_api);
+        LASSERT (k == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // take down this IB network interface 
+        // there is not corresponding cb function to hande this
+        // do we actually need this one 
+        // reference to IB network interface shutdown 
+        //
+        
+        vstat = IB_Close_HCA();
+
+        if (vstat != VAPI_OK) {
+           CERROR("Failed to close HCA  - %s\n",VAPI_strerror(vstat));
+           return (~PTL_OK);
+        }
+
+        CDEBUG(D_TRACE,"IBNAL- Done kibnal_shutdown\n");
+
+        return PTL_OK;
+}
+
+//
+// yield 
+// when do we call this yield function 
+//
+void 
+kibnal_yield( nal_t *nal )
+{
+        kibnal_data_t *k = nal->nal_data;
+        nal_cb_t      *nal_cb = k->kib_cb;
+        
+        // assert checking
+        LASSERT (nal == &kibnal_api);
+        LASSERT (k    == &kibnal_data);
+        LASSERT (nal_cb == &kibnal_lib);
+
+        // check under what condition that we need to 
+        // call schedule()
+        // who set this need_resched 
+        if (current->need_resched)
+                schedule();
+
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_yield");
+
+        return;
+}
+
+//
+// ibnal init 
+//
+nal_t *
+kibnal_init(int             interface, // no use here 
+            ptl_pt_index_t  ptl_size,
+            ptl_ac_index_t  ac_size, 
+            ptl_pid_t       requested_pid // no use here
+           )
+{
+  nal_t         *nal       = NULL;
+  nal_cb_t      *nal_cb    = NULL;
+  kibnal_data_t *nal_data  = NULL;
+  int            rc;
+
+  unsigned int nnids = 1; // number of nids 
+                          // do we know how many nodes are in this
+                          // system related to this kib_nid  
+                          //
+
+  CDEBUG(D_NET, "kibnal_init:calling lib_init with nid 0x%u\n",
+                  kibnal_data.kib_nid);
+
+
+  CDEBUG(D_NET, "kibnal_init: interface [%d], ptl_size [%d], ac_size[%d]\n", 
+                 interface, ptl_size, ac_size);
+  CDEBUG(D_NET, "kibnal_init: &kibnal_lib  0x%X\n", &kibnal_lib);
+  CDEBUG(D_NET, "kibnal_init: kibnal_data.kib_nid  %d\n", kibnal_data.kib_nid);
+
+  rc = lib_init(&kibnal_lib, 
+                kibnal_data.kib_nid, 
+                0, // process id is set as 0  
+                nnids,
+                ptl_size, 
+                ac_size);
+
+  if(rc != PTL_OK) {
+     CERROR("kibnal_init: Failed lib_init with nid 0x%u, rc=%d\n",
+                                  kibnal_data.kib_nid,rc);
+  }
+  else {
+      CDEBUG(D_NET,"kibnal_init: DONE lib_init with nid 0x%x%x\n",
+                                  kibnal_data.kib_nid);
+  }
+
+  return &kibnal_api;
+
+}
+
+
+//
+// called before remove ibnal kernel module 
+//
+void __exit 
+kibnal_finalize(void) 
+{ 
+        struct list_head *tmp;
+
+        inter_module_unregister("kibnal_ni");
+
+        // release resources allocated to this Infiniband network interface 
+        PtlNIFini(kibnal_ni); 
+
+        lib_fini(&kibnal_lib); 
+
+        IB_Close_HCA();
+
+        // how much do we need to do here?
+        list_for_each(tmp, &kibnal_data.kib_list) {
+                kibnal_rx_t *conn;
+                conn = list_entry(tmp, kibnal_rx_t, krx_item);
+                CDEBUG(D_IOCTL, "freeing conn %p\n",conn);
+                tmp = tmp->next;
+                list_del(&conn->krx_item);
+                PORTAL_FREE(conn, sizeof(*conn));
+        }
+
+        CDEBUG(D_MALLOC,"done kmem %d\n",atomic_read(&portal_kmemory));
+        CDEBUG(D_TRACE,"IBNAL-Done kibnal_finalize\n");
+
+        return;
+}
+
+
+//
+// * k_server_thread is a kernel thread 
+//   use a shared memory ro exchange HCA's data with a pthread in user 
+//   address space
+// * will be replaced when CM is used to handle communication management 
+//
+
+void k_server_thread(Remote_QP_Info *hca_data)
+{
+  int              segment_id;
+  const int        shared_segment_size = sizeof(Remote_QP_Info); 
+  key_t            key = HCA_EXCHANGE_SHM_KEY;
+  unsigned long    raddr;
+  int exchanged_done = NO;
+  int i;
+
+  Remote_QP_Info  *exchange_hca_data;
+
+  long *n;
+  long *uaddr;
+  long ret = 0;
+  // create a shared memory with pre-agreement key
+  segment_id =  sys_shmget(key,
+                           shared_segment_size,
+                           IPC_CREAT | 0666);
+
+
+  // attached to shared memoru 
+  // raddr is pointed to an user address space 
+  // use this address to update shared menory content 
+  ret = sys_shmat(segment_id, 0 , SHM_RND, &raddr);
+
+#ifdef IBNAL_DEBUG 
+  if(ret >= 0) {
+    CDEBUG(D_NET,"k_server_thread: Shared memory attach success ret = 0X%d,&raddr"
+                   " 0X%x (*(&raddr))=0x%x \n", ret, &raddr,  (*(&raddr)));
+    printk("k_server_thread: Shared memory attach success ret = 0X%d, &raddr"
+                   " 0X%x (*(&raddr))=0x%x \n", ret, &raddr,  (*(&raddr)));
+  }
+  else {
+    CERROR("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); 
+    printk("k_server_thread: Shared memory attach failed ret = 0x%d \n", ret); 
+    return;
+  }
+#endif
+
+  n = &raddr;
+  uaddr = *n; // get the U-address 
+  /* cast uaddr to exchange_hca_data */
+  exchange_hca_data = (Remote_QP_Info  *) uaddr; 
+  
+  /* copy data from local HCA to shared memory */
+  exchange_hca_data->opcode  = hca_data->opcode;
+  exchange_hca_data->length  = hca_data->length;
+
+  for(i=0; i < NUM_QPS; i++) {
+    exchange_hca_data->dlid[i]    = hca_data->dlid[i];
+    exchange_hca_data->rqp_num[i] = hca_data->rqp_num[i];
+  }
+
+  // periodically check shared memory until get updated 
+  // remote HCA's data from user mode pthread  
+  while(exchanged_done == NO) {
+    if(exchange_hca_data->opcode == RECV_QP_INFO){
+       exchanged_done = YES;
+       /* copy data to local buffer from shared memory */
+       hca_data->opcode  = exchange_hca_data->opcode;
+       hca_data->length  = exchange_hca_data->length;
+
+       for(i=0; i < NUM_QPS; i++) {
+         hca_data->dlid[i]    = exchange_hca_data->dlid[i];
+         hca_data->rqp_num[i] = exchange_hca_data->rqp_num[i];
+       }
+       break;
+    }
+    else { 
+       schedule_timeout(1000);
+    }
+  }
+  
+  // detached shared memory 
+  sys_shmdt(uaddr);
+
+  CDEBUG(D_NET, "Exit from kernel thread: k_server_thread \n");
+  printk("Exit from kernel thread: k_server_thread \n");
+
+  return;
+
+}
+
+//
+// create QP 
+// 
+VAPI_ret_t 
+create_qp(QP_info *qp, int qp_index)
+{
+
+  VAPI_ret_t          vstat;
+  VAPI_qp_init_attr_t qp_init_attr;
+  VAPI_qp_prop_t      qp_prop;
+
+  qp->hca_hndl = Hca_hndl;
+  qp->port     = 1; // default 
+  qp->slid     = Hca_port_1_props.lid;
+  qp->hca_port = Hca_port_1_props;
+
+
+  /* Queue Pair Creation Attributes */
+  qp_init_attr.cap.max_oust_wr_rq = NUM_WQE;
+  qp_init_attr.cap.max_oust_wr_sq = NUM_WQE;
+  qp_init_attr.cap.max_sg_size_rq = NUM_SG;
+  qp_init_attr.cap.max_sg_size_sq = NUM_SG;
+  qp_init_attr.pd_hndl            = qp->pd_hndl;
+  qp_init_attr.rdd_hndl           = 0;
+  qp_init_attr.rq_cq_hndl         = qp->rq_cq_hndl;
+  /* we use here polling */
+  //qp_init_attr.rq_sig_type        = VAPI_SIGNAL_REQ_WR;
+  qp_init_attr.rq_sig_type        = VAPI_SIGNAL_ALL_WR;
+  qp_init_attr.sq_cq_hndl         = qp->sq_cq_hndl;
+  /* we use here polling */
+  //qp_init_attr.sq_sig_type        = VAPI_SIGNAL_REQ_WR;
+  qp_init_attr.sq_sig_type        = VAPI_SIGNAL_ALL_WR;
+  // transport servce - reliable connection
+
+  qp_init_attr.ts_type            = VAPI_TS_RC;
+          
+  vstat = VAPI_create_qp(qp->hca_hndl,   
+                         &qp_init_attr,      
+                         &qp->qp_hndl, &qp_prop); 
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed creating QP. Return Failed - %s\n",VAPI_strerror(vstat));
+     return vstat;
+  }
+  
+  qp->qp_num = qp_prop.qp_num; // the qp number 
+  qp->last_posted_send_id  = 0; // user defined work request ID
+  qp->last_posted_rcv_id   = 0; // user defined work request ID
+  qp->cur_send_outstanding = 0;
+  qp->cur_posted_rcv_bufs  = 0;
+  qp->snd_rcv_balance      = 0;
+  
+  CDEBUG(D_OTHER, "create_qp: qp_num = %d, slid = %d, qp_hndl = 0X%X", 
+                  qp->qp_num, qp->slid, qp->qp_hndl);
+
+  // initialize spin-lock mutex variables
+  spin_lock_init(&(qp->snd_mutex));
+  spin_lock_init(&(qp->rcv_mutex));
+  spin_lock_init(&(qp->bl_mutex));
+  spin_lock_init(&(qp->cln_mutex));
+  // number of outstanding requests on the send Q
+  qp->cur_send_outstanding = 0; 
+  // number of posted receive buffers
+  qp->cur_posted_rcv_bufs  = 0;  
+  qp->snd_rcv_balance      = 0;
+
+  return(VAPI_OK);
+
+}
+
+//
+// initialize a UD qp state to RTR and RTS 
+//
+VAPI_ret_t 
+init_qp_UD(QP_info *qp, int qp_index)
+{
+  VAPI_qp_attr_t      qp_attr;
+  VAPI_qp_init_attr_t qp_init_attr;
+  VAPI_qp_attr_mask_t qp_attr_mask;
+  VAPI_qp_cap_t       qp_cap;
+  VAPI_ret_t       vstat;
+
+  /* Move from RST to INIT */
+  /* Change QP to INIT */
+
+  CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index);
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state = VAPI_INIT;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.pkey_ix  = 0;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX);
+
+  CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.port     = qp->port;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT);
+
+  CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.qkey = 0;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QKEY);
+
+  CDEBUG(D_OTHER, "qkey qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  /* If I do not set this mask, I get an error from HH. QPM should catch it */
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  CDEBUG(D_OTHER, "Modifying QP from RST to INIT.\n");
+
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Move from INIT to RTR */
+  /* Change QP to RTR */
+  CDEBUG(D_OTHER, "Changing QP state to RTR\n");
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state         = VAPI_RTR;  
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  CDEBUG(D_OTHER, "INIT to RTR- qp_state : qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from INIT to RTR. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+  
+  CDEBUG(D_OTHER, "Modifying QP from INIT to RTR.\n");
+  
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                                      
+  /* RTR to RTS - Change QP to RTS */
+  CDEBUG(D_OTHER, "Changing QP state to RTS\n");
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state        = VAPI_RTS;   
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+  
+  qp_attr.sq_psn          = START_SQ_PSN;          
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN);
+  
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RTR to RTS. %s:%s\n",
+                          VAPI_strerror_sym(vstat), 
+                          VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  CDEBUG(D_OTHER, "Modifying QP from RTR to RTS. \n");
+                     
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                        
+  //
+  // a QP is at RTS state NOW
+  //
+  CDEBUG(D_OTHER, "IBNAL- UD qp is at RTS NOW\n");
+  
+  return(vstat);
+
+}
+
+
+
+//
+// initialize a RC qp state to RTR and RTS 
+// RC transport service 
+//
+VAPI_ret_t 
+init_qp_RC(QP_info *qp, int qp_index)
+{
+  VAPI_qp_attr_t      qp_attr;
+  VAPI_qp_init_attr_t qp_init_attr;
+  VAPI_qp_attr_mask_t qp_attr_mask;
+  VAPI_qp_cap_t       qp_cap;
+  VAPI_ret_t       vstat;
+
+  /* Move from RST to INIT */
+  /* Change QP to INIT */
+  
+  CDEBUG(D_OTHER, "Changing QP state to INIT qp-index = %d\n", qp_index);
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state = VAPI_INIT;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+   CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.pkey_ix  = 0;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX);
+
+  CDEBUG(D_OTHER, "pkey_ix qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.port     = qp->port;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT);
+
+  CDEBUG(D_OTHER, "port qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.remote_atomic_flags = VAPI_EN_REM_WRITE | VAPI_EN_REM_READ;
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_REMOTE_ATOMIC_FLAGS);
+
+  CDEBUG(D_OTHER, "remote_atomic_flags qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  /* If I do not set this mask, I get an error from HH. QPM should catch it */
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RST to INIT. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Move from INIT to RTR */
+  /* Change QP to RTR */
+  CDEBUG(D_OTHER, "Changing QP state to RTR qp_indexi %d\n", qp_index);
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+  qp_attr.qp_state         = VAPI_RTR;  
+
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.av.sl            = 0;/* RESPONDER_SL */
+  qp_attr.av.grh_flag      = FALSE;
+  qp_attr.av.dlid          = qp->dlid;/*RESPONDER_LID;*/
+  qp_attr.av.static_rate   = 0;
+  qp_attr.av.src_path_bits = 0;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_AV);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.path_mtu         = MTU_2048;// default is MTU_2048             
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PATH_MTU);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.rq_psn           = START_RQ_PSN;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.qp_ous_rd_atom   = NUM_WQE;        
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_OUS_RD_ATOM);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.pkey_ix          = 0;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.min_rnr_timer    = 10;              
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  qp_attr.dest_qp_num = qp->rqp_num;                   
+
+  CDEBUG(D_OTHER, "remore qp num %d\n",  qp->rqp_num);
+
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_DEST_QP_NUM);
+
+  CDEBUG(D_OTHER, "qp_state qp_attr_mask = 0X%x\n", qp_attr_mask);
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from INIT to RTR. qp_index %d - %s\n",
+                                                qp_index, VAPI_strerror(vstat));
+     return(vstat);
+  }
+  
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                                      
+  /* RTR to RTS - Change QP to RTS */
+  CDEBUG(D_OTHER, "Changing QP state to RTS\n");
+
+  QP_ATTR_MASK_CLR_ALL(qp_attr_mask);
+
+  qp_attr.qp_state        = VAPI_RTS;   
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE);
+
+  qp_attr.sq_psn          = START_SQ_PSN;          
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN);
+
+  qp_attr.timeout         = 0x18;         
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_TIMEOUT);
+
+  qp_attr.retry_count     = 10;         
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RETRY_COUNT);
+
+  qp_attr.rnr_retry       = 14;         
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RNR_RETRY);
+
+  qp_attr.ous_dst_rd_atom = 100;        
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_OUS_DST_RD_ATOM);
+
+  qp_attr.min_rnr_timer   = 5;          
+  QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_MIN_RNR_TIMER);
+
+  vstat = VAPI_modify_qp(qp->hca_hndl,
+                         qp->qp_hndl,
+                         &qp_attr,
+                         &qp_attr_mask,
+                         &qp_cap);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed modifying QP from RTR to RTS. %s:%s\n",
+                   VAPI_strerror_sym(vstat), VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  vstat= VAPI_query_qp(qp->hca_hndl,
+                       qp->qp_hndl,
+                       &qp_attr,
+                       &qp_attr_mask,
+                       &qp_init_attr);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query QP. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+                        
+  //
+  // a QP is at RTS state NOW
+  //
+   CDEBUG(D_OTHER, "IBNAL- RC qp is at RTS NOW\n");
+  
+  return(vstat);
+}
+
+
+
+VAPI_ret_t 
+IB_Open_HCA(kibnal_data_t *kib_data)
+{
+
+  VAPI_ret_t     vstat;
+  VAPI_cqe_num_t cqe_active_num;
+  QP_info        *qp; 
+  int            i;
+  int            Num_posted_recv_buf;
+
+  /* Open HCA */
+  CDEBUG(D_PORTALS, "Opening an HCA\n");
+
+  vstat = VAPI_open_hca(HCA_ID, &Hca_hndl);
+  vstat = EVAPI_get_hca_hndl(HCA_ID, &Hca_hndl);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed opening the HCA: %s. %s...\n",HCA_ID,VAPI_strerror(vstat));
+     return(vstat);
+  } 
+
+  /* Get HCA CAP */
+  vstat = VAPI_query_hca_cap(Hca_hndl, &Hca_vendor, &Hca_cap);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query hca cap %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Get port 1 info */
+  vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_1 , &Hca_port_1_props);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query port cap %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }      
+
+  /* Get port 2 info */
+  vstat = VAPI_query_hca_port_prop(Hca_hndl, HCA_PORT_2, &Hca_port_2_props);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed query port cap %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }      
+
+  // Get a PD 
+  CDEBUG(D_PORTALS, "Allocating PD \n");
+  vstat = VAPI_alloc_pd(Hca_hndl,&Pd_hndl);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed allocating a PD. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  vstat = createMemRegion(Hca_hndl, Pd_hndl);
+  if (vstat != VAPI_OK) {
+     CERROR("Failed registering a memory region.%s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  /* Create CQ for RQ*/
+  CDEBUG(D_PORTALS, "Creating a send completion queue\n");
+
+  vstat = VAPI_create_cq(Hca_hndl,    
+                         NUM_CQE,    
+                         &Cq_hndl, 
+                         &cqe_active_num);
+
+  if (vstat != VAPI_OK) {
+     CERROR("Failed creating a CQ. %s\n",VAPI_strerror(vstat));
+     return(vstat);
+  }
+
+  if(NUM_CQE == cqe_active_num) {
+    CERROR("VAPI_create_cq: NUM_CQE EQ cqe_active_num \n");
+  }
+  else {
+    CDEBUG(D_NET, "VAPI_create_cq: NUM_CQE %d , actual cqe_active_num %d \n",
+                   NUM_CQE, cqe_active_num);
+  }
+
+  Cq_SQ_hndl     = Cq_hndl;
+  Cq_RQ_hndl     = Cq_hndl;
+
+  //
+  // create  QPs 
+  //
+  for(i=0; i < NUM_QPS; i++) {
+      QP_list[i].pd_hndl    = Pd_hndl;
+      QP_list[i].hca_hndl   = Hca_hndl;
+      // sq rq use the same Cq_hndl 
+      QP_list[i].sq_cq_hndl = Cq_hndl; 
+      QP_list[i].rq_cq_hndl = Cq_hndl;
+      vstat = create_qp(&QP_list[i], i);
+      if (vstat != VAPI_OK) {
+         CERROR("Failed creating a QP %d %s\n",i, VAPI_strerror(vstat));
+         return(vstat);
+      }
+  }      
+
+  //
+  // record HCA data 
+  //
+
+  Hca_data.hca_hndl     = Hca_hndl;      // HCA handle
+  Hca_data.pd_hndl      = Pd_hndl;       // protection domain
+  Hca_data.port         = 1;             // port number
+  Hca_data.num_qp       = NUM_QPS;        // number of qp used
+
+  for(i=0; i < NUM_QPS; i++) {
+    Hca_data.qp_ptr[i]    = &QP_list[i];   // point to QP_list
+  }
+
+  Hca_data.num_cq       = NUM_CQ;        // number of cq used
+  Hca_data.cq_hndl      = Cq_hndl;       // 
+  Hca_data.sq_cq_hndl   = Cq_SQ_hndl;    // 
+  Hca_data.rq_cq_hndl   = Cq_RQ_hndl;    // 
+  Hca_data.kib_data     = kib_data;       //
+  Hca_data.slid         = QP_list[0].slid;//
+
+  // prepare L_QP_data
+
+#ifdef USE_SHARED_MEMORY_AND_SOCKET
+
+  /*
+   *  + use a shared-memory between a user thread and a kernel thread 
+   *    for HCA's data exchange on the same node  
+   *  + use socket in user mode to exhange HCA's data with a remote node 
+   */
+
+  
+  R_QP_data.opcode  = SEND_QP_INFO;
+  R_QP_data.length  = sizeof(L_QP_data);
+
+  for(i=0; i < NUM_QPS; i++) {
+    // my slid  will be used in a remote node as dlid 
+    R_QP_data.dlid[i]    = QP_list[i].slid;
+    // my qp_num will be used in remode node as remote_qp_number 
+    // RC is used here so we need dlid and rqp_num  
+    R_QP_data.rqp_num[i] = QP_list[i].qp_num ;
+  }
+
+  // create a kernel thread for exchanging HCA's data 
+  // R_QP_data will be exchanged with a remoe node
+
+  kernel_thread(k_server_thread, &R_QP_data, 0); // 
+  // check if the HCA'data have been updated by kernel_thread 
+  // loop until the HCA's data is updated 
+  // make sure that uagent is running 
+  
+  // QP info is exchanged with a remote node   
+  while (1) {
+    schedule_timeout(1000);
+    if(R_QP_data.opcode ==  RECV_QP_INFO) {
+       CDEBUG(D_NET, "HCA's data is being updated\n");
+       break;
+   }
+  }
+#endif
+
+#ifdef USE_SHARED_MEMORY_AND_MULTICAST
+
+  /*
+   *  + use a shared-memory between a user thread and a kernel thread 
+   *    for HCA's data exchange on the same node  
+   *  + use Infinoband UR/multicast in user mode to exhange HCA's data with i
+   *    a remote node 
+   */
+
+  // use CM, opemSM 
+  
+#endif
+
+  // 
+  for(i=0; i < NUM_QPS; i++) {
+     qp = (QP_info *) &QP_list[i];
+     QP_list[i].rqp_num = R_QP_data.rqp_num[i]; // remoter qp number 
+     QP_list[i].dlid    = R_QP_data.dlid[i];    // remote dlid 
+  }
+
+  // already have remote_qp_num adn dlid information
+  // initialize QP to RTR/RTS state 
+  //
+  for(i=0; i < NUM_QPS; i++) {
+    vstat = init_qp_RC(&QP_list[i], i);
+    if (vstat != VAPI_OK) {
+       CERROR("Failed change a QP %d to RTS state%s\n",
+                    i,VAPI_strerror(vstat));
+       return(vstat);
+    }
+  }
+
+  // post receiving buffer before any send happened 
+  
+  Num_posted_recv_buf = post_recv_bufs( (VAPI_wr_id_t ) START_RECV_WRQ_ID); 
+
+  // for irregular completion event or some unexpected failure event 
+  vstat = IB_Set_Async_Event_Handler(Hca_data, &kibnal_data);
+  if (vstat != VAPI_OK) {
+     CERROR("IB_Set_Async_Event_Handler failed: %d\n", vstat);
+     return vstat;
+  }
+
+
+  CDEBUG(D_PORTALS, "IBNAL- done with IB_Open_HCA\n");
+
+  for(i=0;  i < NUM_MBUF; i++) {
+    spin_lock_init(&MSB_mutex[i]);
+  }
+
+  return(VAPI_OK);
+
+}
+
+
+/* 
+  Function:  IB_Set_Event_Handler()
+             
+             IN   Hca_info hca_data
+             IN   kibnal_data_t *kib_data  -- private data      
+             OUT  NONE
+
+        return: VAPI_OK - success
+                else    - fail 
+
+*/
+
+VAPI_ret_t 
+IB_Set_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data)
+{
+  VAPI_ret_t vstat;
+  EVAPI_compl_handler_hndl_t   comp_handler_hndl;
+
+  // register CQE_Event_Hnadler 
+  // VAPI function 
+  vstat = VAPI_set_comp_event_handler(hca_data.hca_hndl,
+                                      CQE_event_handler,
+                                      &hca_data);
+
+  /*
+  or use extended VAPI function 
+  vstat = EVAPI_set_comp_eventh(hca_data.hca_hndl,
+                                hca_data.cq_hndl,
+                                CQE_event_handler,
+                                &hca_data,
+                                &comp_handler_hndl
+                                );
+  */
+                                    
+  if (vstat != VAPI_OK) {
+      CERROR("IB_Set_Event_Handler: failed EVAPI_set_comp_eventh for"
+             " HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat));
+      return vstat;
+  }
+
+  // issue a request for completion ievent notification 
+  vstat = VAPI_req_comp_notif(hca_data.hca_hndl, 
+                              hca_data.cq_hndl,
+                              VAPI_NEXT_COMP); 
+
+  if (vstat != VAPI_OK) {
+      CERROR("IB_Set_Event_Handler: failed VAPI_req_comp_notif for HCA ID"
+             " = %s (%s).\n", HCA_ID, VAPI_strerror(vstat));
+  }
+
+  return vstat;
+}
+
+
+
+/* 
+  Function:  IB_Set_Async_Event_Handler()
+             
+             IN   HCA_info hca_data
+             IN   kibnal_data_t *kib_data -- private data      
+             OUT  NONE
+
+        return: VAPI_OK - success
+                else    - fail 
+
+*/
+
+
+VAPI_ret_t 
+IB_Set_Async_Event_Handler(HCA_info hca_data, kibnal_data_t *kib_data)
+{
+  VAPI_ret_t    vstat;
+
+  //
+  // register an asynchronous event handler for this HCA 
+  //
+
+  vstat= VAPI_set_async_event_handler(hca_data.hca_hndl,
+                                      async_event_handler, 
+                                      kib_data);
+
+  if (vstat != VAPI_OK) {
+      CERROR("IB_Set_Async_Event_Handler: failed VAPI_set_async_comp_event_handler"
+             " for HCA ID = %s (%s).\n", HCA_ID, VAPI_strerror(vstat));
+  }
+
+  return vstat;
+}
+
+//
+// IB_Close_HCA
+// close this Infiniband HCA interface 
+// release allocated resources to system 
+//
+VAPI_ret_t 
+IB_Close_HCA(void )
+{
+        
+  VAPI_ret_t  vstat;
+  int         ok = 1;
+  int         i;
+            
+  /* Destroy QP */
+  CDEBUG(D_PORTALS, "Destroying QP\n");
+
+  for(i=0; i < NUM_QPS; i++) {
+     vstat = VAPI_destroy_qp(QP_list[i].hca_hndl, QP_list[i].qp_hndl);
+     if (vstat != VAPI_OK) {
+        CERROR("Failed destroying QP %d. %s\n", i, VAPI_strerror(vstat));
+        ok = 0;
+     }
+  }
+
+  if (ok) {
+     /* Destroy CQ */
+     CDEBUG(D_PORTALS, "Destroying CQ\n");
+     for(i=0; i < NUM_QPS; i++) {
+        // send_cq adn receive_cq are shared the same CQ
+        // so only destroy one of them 
+        vstat = VAPI_destroy_cq(QP_list[i].hca_hndl, QP_list[i].sq_cq_hndl);
+        if (vstat != VAPI_OK) {
+           CERROR("Failed destroying CQ %d. %s\n", i, VAPI_strerror(vstat));
+           ok = 0;
+        }
+     }
+  }
+
+  if (ok) {
+     /* Destroy Memory Region */
+     CDEBUG(D_PORTALS, "Deregistering MR\n");
+     for(i=0; i < NUM_QPS; i++) {
+        vstat = deleteMemRegion(&QP_list[i], i);
+        if (vstat != VAPI_OK) {
+           CERROR("Failed deregister mem reg %d. %s\n",i, VAPI_strerror(vstat));
+           ok = 0;
+           break;
+        }
+     }
+  }
+
+  if (ok) {
+     // finally 
+     /* Close HCA */
+     CDEBUG(D_PORTALS, "Closing HCA\n");
+     vstat = VAPI_close_hca(Hca_hndl);
+     if (vstat != VAPI_OK) {
+        CERROR("Failed to close HCA. %s\n", VAPI_strerror(vstat));
+        ok = 0;
+     }
+  }
+
+  CDEBUG(D_PORTALS, "IBNAL- Done with closing HCA \n");
+  
+  return vstat; 
+}
+
+
+VAPI_ret_t 
+createMemRegion(VAPI_hca_hndl_t hca_hndl, 
+                   VAPI_pd_hndl_t  pd_hndl) 
+{
+  VAPI_ret_t  vstat;
+  VAPI_mrw_t  mrw;
+  VAPI_mrw_t  rep_mr;   
+  VAPI_mr_hndl_t   rep_mr_hndl;
+  int         buf_size;
+  char        *bufptr;
+  int         i;
+
+  // send registered memory region 
+  for(i=0; i < NUM_ENTRY; i++) {
+    MSbuf_list[i].buf_size = KB_32; 
+    PORTAL_ALLOC(bufptr, MSbuf_list[i].buf_size);
+    if(bufptr == NULL) {
+       CDEBUG(D_MALLOC,"Failed to malloc a block of send memory, qix %d size %d\n",
+                                          i, MSbuf_list[i].buf_size);
+       CERROR("Failed to malloc a block of send memory, qix %d size %d\n",
+                                          i, MSbuf_list[i].buf_size);
+       return(VAPI_ENOMEM);
+    }
+
+    mrw.type   = VAPI_MR; 
+    mrw.pd_hndl= pd_hndl;
+    mrw.start  = MSbuf_list[i].buf_addr = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr;
+    mrw.size   = MSbuf_list[i].buf_size;
+    mrw.acl    = VAPI_EN_LOCAL_WRITE  | 
+                 VAPI_EN_REMOTE_WRITE | 
+                 VAPI_EN_REMOTE_READ;
+
+    // register send memory region  
+    vstat = VAPI_register_mr(hca_hndl, 
+                             &mrw, 
+                             &rep_mr_hndl, 
+                             &rep_mr);
+
+    // this memory region is going to be reused until deregister is called 
+    if(vstat != VAPI_OK) {
+       CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n",
+                          i, mrw.start, mrw.size, VAPI_strerror(vstat));
+       return(vstat);
+    }
+
+    MSbuf_list[i].mr        = rep_mr;
+    MSbuf_list[i].mr_hndl   = rep_mr_hndl;
+    MSbuf_list[i].bufptr    = bufptr;
+    MSbuf_list[i].buf_addr  = rep_mr.start;
+    MSbuf_list[i].status    = BUF_REGISTERED;
+    MSbuf_list[i].ref_count = 0;
+    MSbuf_list[i].buf_type  = REG_BUF;
+    MSbuf_list[i].raddr     = 0x0;
+    MSbuf_list[i].rkey      = 0x0;
+  }
+
+  // RDAM buffer is not reserved for RDAM WRITE/READ
+  
+  for(i=NUM_ENTRY; i< NUM_MBUF; i++) {
+    MSbuf_list[i].status    = BUF_UNREGISTERED;
+    MSbuf_list[i].buf_type  = RDMA_BUF;
+  }
+
+
+  // recv registered memory region 
+  for(i=0; i < NUM_ENTRY; i++) {
+    MRbuf_list[i].buf_size = KB_32; 
+    PORTAL_ALLOC(bufptr, MRbuf_list[i].buf_size);
+
+    if(bufptr == NULL) {
+       CDEBUG(D_MALLOC, "Failed to malloc a block of send memory, qix %d size %d\n",
+                      i, MRbuf_list[i].buf_size);
+       return(VAPI_ENOMEM);
+    }
+
+    mrw.type   = VAPI_MR; 
+    mrw.pd_hndl= pd_hndl;
+    mrw.start  = (VAPI_virt_addr_t)(MT_virt_addr_t) bufptr;
+    mrw.size   = MRbuf_list[i].buf_size;
+    mrw.acl    = VAPI_EN_LOCAL_WRITE  | 
+                 VAPI_EN_REMOTE_WRITE | 
+                 VAPI_EN_REMOTE_READ;
+
+    // register send memory region  
+    vstat = VAPI_register_mr(hca_hndl, 
+                             &mrw, 
+                             &rep_mr_hndl, 
+                             &rep_mr);
+
+    // this memory region is going to be reused until deregister is called 
+    if(vstat != VAPI_OK) {
+       CERROR("Failed registering a mem region qix %d Addr=%p, Len=%d. %s\n",
+                          i, mrw.start, mrw.size, VAPI_strerror(vstat));
+       return(vstat);
+    }
+
+    MRbuf_list[i].mr        = rep_mr;
+    MRbuf_list[i].mr_hndl   = rep_mr_hndl;
+    MRbuf_list[i].bufptr    = bufptr;
+    MRbuf_list[i].buf_addr  = rep_mr.start;
+    MRbuf_list[i].status    = BUF_REGISTERED;
+    MRbuf_list[i].ref_count = 0;
+    MRbuf_list[i].buf_type  = REG_BUF;
+    MRbuf_list[i].raddr     = 0x0;
+    MRbuf_list[i].rkey      = rep_mr.r_key;
+    MRbuf_list[i].lkey      = rep_mr.l_key;
+  
+  }
+  // keep extra information for a qp 
+  for(i=0; i < NUM_QPS; i++) {
+    QP_list[i].mr_hndl    = MSbuf_list[i].mr_hndl; 
+    QP_list[i].mr         = MSbuf_list[i].mr;
+    QP_list[i].bufptr     = MSbuf_list[i].bufptr;
+    QP_list[i].buf_addr   = MSbuf_list[i].buf_addr;
+    QP_list[i].buf_size   = MSbuf_list[i].buf_size;
+    QP_list[i].raddr      = MSbuf_list[i].raddr;
+    QP_list[i].rkey       = MSbuf_list[i].rkey;
+    QP_list[i].lkey       = MSbuf_list[i].lkey;
+  }
+
+  CDEBUG(D_PORTALS, "IBNAL- done VAPI_ret_t createMemRegion \n");
+
+  return vstat;
+
+} /* createMemRegion */
+
+
+
+VAPI_ret_t  
+deleteMemRegion(QP_info *qp, int qix)
+{
+  VAPI_ret_t  vstat;
+
+  //
+  // free send memory assocaited with this memory region  
+  //
+  PORTAL_FREE(MSbuf_list[qix].bufptr, MSbuf_list[qix].buf_size);
+
+  // de-register it 
+  vstat =  VAPI_deregister_mr(qp->hca_hndl, MSbuf_list[qix].mr_hndl);
+
+  if(vstat != VAPI_OK) {
+     CERROR("Failed deregistering a send mem region qix %d %s\n",
+                         qix, VAPI_strerror(vstat));
+     return vstat;
+  }
+
+  //
+  // free recv memory assocaited with this memory region  
+  //
+  PORTAL_FREE(MRbuf_list[qix].bufptr, MRbuf_list[qix].buf_size);
+
+  // de-register it 
+  vstat =  VAPI_deregister_mr(qp->hca_hndl, MRbuf_list[qix].mr_hndl);
+
+  if(vstat != VAPI_OK) {
+     CERROR("Failed deregistering a recv mem region qix %d %s\n",
+                         qix, VAPI_strerror(vstat));
+     return vstat;
+  }
+
+  return vstat;
+}
+
+
+//
+// polling based event handling 
+// + a daemon process
+// + poll the CQ and check what is in the CQ 
+// + process incoming CQ event
+// + 
+//
+
+
+RDMA_Info_Exchange   Rdma_info;
+int                  Cts_Message_arrived = NO;
+
+void k_recv_thread(HCA_info *hca_data)
+{
+ VAPI_ret_t       vstat; 
+ VAPI_wc_desc_t   comp_desc;   
+ unsigned long    polling_count = 0;
+ u_int32_t        timeout_usec;
+ unsigned int     priority = 100;
+ unsigned int     length;
+ VAPI_wr_id_t     wrq_id;
+ u_int32_t        transferred_data_length; /* Num. of bytes transferred */
+ void             *bufdata;
+ VAPI_virt_addr_t bufaddr;
+ unsigned long    buf_size = 0;
+ QP_info          *qp;       // point to QP_list
+
+ kportal_daemonize("k_recv_thread"); // make it as a daemon process 
+
+ // tuning variable 
+ timeout_usec = 100; // how is the impact on the performance
+
+ // send Q and receive Q are using the same CQ 
+ // so only poll one CQ for both operations 
+ CDEBUG(D_NET, "IBNAL- enter kibnal_recv_thread\n");
+ CDEBUG(D_NET, "hca_hndl = 0X%x, cq_hndl=0X%x\n", 
+                         hca_data->hca_hndl,hca_data->cq_hndl); 
+
+ qp = hca_data->qp_ptr;
+ if(qp == NULL) {
+   CDEBUG(D_NET, "in recv_thread qp is NULL\n");
+   CDEBUG(D_NET, "Exit from  recv_thread qp is NULL\n");
+   return; 
+ }
+ else {
+   CDEBUG(D_NET, "in recv_thread qp is 0X%X\n", qp);
+ }
+
+ CDEBUG(D_NET, "kibnal_recv_thread - enter event driver polling loop\n");
+
+ //
+ // use event driver 
+ //
+
+
+ while(1) {
+    polling_count++;
+
+    //
+    // send Q and receive Q are using the same CQ 
+    // so only poll one CQ for both operations 
+    //
+
+    vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc);                      
+
+    if (vstat == VAPI_CQ_EMPTY) { 
+      // there is no event in CQE 
+      continue;
+    } 
+    else {
+      if (vstat != (VAPI_OK)) {
+        CERROR("error while polling completion queuei vstat %d \n", vstat);
+        return; 
+      }
+    }
+
+    // process the complete event 
+    switch(comp_desc.opcode) {
+      case   VAPI_CQE_SQ_SEND_DATA:
+        // about the Send Q ,POST SEND completion 
+        // who needs this information
+        // get wrq_id
+        // mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+               
+        wrq_id = comp_desc.id;
+
+        if(RDMA_OP_ID < wrq_id) {
+          // this RDMA message id, adjust it to the right entry       
+          wrq_id = wrq_id - RDMA_OP_ID;
+          vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.send_rdma_mr_hndl);
+        }
+        
+        if(vstat != VAPI_OK) {
+            CERROR("VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMAi recv"                   " mem region %s\n", VAPI_strerror(vstat));
+        }
+
+        if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) {
+          // RTS or CTS send complete, release send buffer 
+          if(wrq_id >= RDMA_RTS_ID)
+            wrq_id = wrq_id - RDMA_RTS_ID;
+          else 
+            wrq_id = wrq_id - RDMA_CTS_ID;
+        }
+
+        spin_lock(&MSB_mutex[(int) wrq_id]);
+        MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+        spin_unlock(&MSB_mutex[(int) wrq_id]);
+
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n");  
+        break;
+
+      case   VAPI_CQE_SQ_RDMA_WRITE:
+        // about the Send Q,  RDMA write completion 
+        // who needs this information
+        // data is successfully write from pource to  destionation 
+             
+        //  get wr_id
+        //  mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+        //  de-register  rdma buffer 
+        //
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n");  
+        break;
+
+      case   VAPI_CQE_SQ_RDMA_READ:
+        // about the Send Q
+        // RDMA read completion 
+        // who needs this information
+        // data is successfully read from destionation to source 
+        CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n");  
+        break;
+
+      case   VAPI_CQE_SQ_COMP_SWAP:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n");  
+        break;
+
+      case   VAPI_CQE_SQ_FETCH_ADD:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n");  
+        break;
+
+      case   VAPI_CQE_SQ_BIND_MRW:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n");  
+        break;
+
+      case   VAPI_CQE_RQ_SEND_DATA:
+        // about the Receive Q
+        // process the incoming data and
+        // forward it to .....
+        // a completion recevie event is arriving at CQ 
+        // issue a recevie to get this arriving data out from CQ 
+        // pass the receiving data for further processing 
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n");  
+        wrq_id = comp_desc.id ;
+        transferred_data_length = comp_desc.byte_len;
+             
+        if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) {
+          // this is RTS/CTS message 
+          // process it locally and don't pass it to portals layer 
+          // adjust wrq_id to get the right entry in MRbfu_list 
+                   
+          if(wrq_id >= RDMA_RTS_ID)
+            wrq_id = wrq_id - RDMA_RTS_ID;
+          else 
+            wrq_id = wrq_id - RDMA_CTS_ID;
+
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr; 
+          MRbuf_list[wrq_id].status = BUF_INUSE; 
+          memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange));    
+        
+          if(Ready_To_send == Rdma_info.opcode) 
+            // an RTS request message from remote node 
+            // prepare local RDMA buffer and send local rdma info to
+            // remote node 
+            CTS_handshaking_protocol(&Rdma_info);
+          else 
+            if((Clear_To_send == Rdma_info.opcode) && 
+                              (RDMA_BUFFER_RESERVED == Rdma_info.flag))
+               Cts_Message_arrived = YES;
+            else 
+              if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) 
+                  CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n");
+        }
+        else {
+          //
+          // this is an incoming mesage for portals layer 
+          // move to PORTALS layer for further processing 
+          //
+                     
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t)
+                                       MRbuf_list[wrq_id].buf_addr; 
+
+          MRbuf_list[wrq_id].status = BUF_INUSE; 
+          transferred_data_length = comp_desc.byte_len;
+
+          kibnal_rx(hca_data->kib_data, 
+                    bufaddr, 
+                    transferred_data_length, 
+                    MRbuf_list[wrq_id].buf_size, 
+                    priority); 
+        }
+
+        // repost this receiving buffer and makr it at BUF_REGISTERED 
+
+        vstat = repost_recv_buf(qp, wrq_id);
+        if(vstat != (VAPI_OK)) {
+          CERROR("error while polling completion queue\n");
+        }
+        else {
+          MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+        }
+
+        break;
+
+      case   VAPI_CQE_RQ_RDMA_WITH_IMM:
+        // about the Receive Q
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+
+        wrq_id = comp_desc.id ;
+        transferred_data_length = comp_desc.byte_len;
+             
+        if(wrq_id ==  RDMA_OP_ID) {
+          // this is RDAM op , locate the RDAM memory buffer address   
+               
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr;
+
+          transferred_data_length = comp_desc.byte_len;
+
+          kibnal_rx(hca_data->kib_data, 
+                    bufaddr, 
+                    transferred_data_length, 
+                    Local_rdma_info.buf_length, 
+                    priority); 
+
+          // de-regiser this RDAM receiving memory buffer
+          // too early ??    test & check 
+          vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl);
+          if(vstat != VAPI_OK) {
+            CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA"
+                   " recv  mem region %s\n", VAPI_strerror(vstat));
+          }
+        }
+
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+        break;
+
+      case   VAPI_CQE_INVAL_OPCODE:
+        //
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n");  
+        break;
+
+      default :
+        CDEBUG(D_NET, "CQE opcode-unknown opcode\n");  
+             break;
+    } // switch 
+    
+    schedule_timeout(RECEIVING_THREAD_TIMEOUT);//how often do we need to poll CQ 
+
+  }// receiving while loop
+
+
+}
+
+
+void CQE_event_handler(VAPI_hca_hndl_t hca_hndl, 
+                       VAPI_cq_hndl_t  cq_hndl, 
+                       void           *private)
+{
+ VAPI_ret_t       vstat; 
+ VAPI_wc_desc_t   comp_desc;   
+ unsigned long    polling_count = 0;
+ u_int32_t        timeout_usec;
+ unsigned int     priority = 100;
+ unsigned int     length;
+ VAPI_wr_id_t     wrq_id;
+ u_int32_t        transferred_data_length; /* Num. of bytes transferred */
+ void             *bufdata;
+ VAPI_virt_addr_t bufaddr;
+ unsigned long    buf_size = 0;
+ QP_info          *qp;       // point to QP_list
+ HCA_info         *hca_data;
+
+ // send Q and receive Q are using the same CQ 
+ // so only poll one CQ for both operations 
+ CDEBUG(D_NET, "IBNAL- enter CQE_event_handler\n");
+ printk("IBNAL- enter CQE_event_handler\n");
+
+ hca_data  = (HCA_info *) private; 
+
+ //
+ // use event driven  
+ //
+
+ vstat = VAPI_poll_cq(hca_data->hca_hndl,hca_data->cq_hndl, &comp_desc);   
+
+ if (vstat == VAPI_CQ_EMPTY) { 
+   CDEBUG(D_NET, "CQE_event_handler: there is no event in CQE, how could"
+                  " this " "happened \n");
+   printk("CQE_event_handler: there is no event in CQE, how could"
+                  " this " "happened \n");
+
+ } 
+ else {
+   if (vstat != (VAPI_OK)) {
+     CDEBUG(D_NET, "error while polling completion queue vstat %d - %s\n", 
+                vstat, VAPI_strerror(vstat));
+     printk("error while polling completion queue vstat %d - %s\n", 
+                                               vstat, VAPI_strerror(vstat));
+     return; 
+   }
+ }
+
+ // process the complete event 
+ switch(comp_desc.opcode) {
+    case   VAPI_CQE_SQ_SEND_DATA:
+      // about the Send Q ,POST SEND completion 
+      // who needs this information
+      // get wrq_id
+      // mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+               
+      wrq_id = comp_desc.id;
+
+#ifdef IBNAL_SELF_TESTING
+      if(wrq_id == SEND_RECV_TEST_ID) {
+        printk("IBNAL_SELF_TESTING - VAPI_CQE_SQ_SEND_DATA \n"); 
+      }
+#else  
+      if(RDMA_OP_ID < wrq_id) {
+        // this RDMA message id, adjust it to the right entry       
+        wrq_id = wrq_id - RDMA_OP_ID;
+        vstat = VAPI_deregister_mr(qp->hca_hndl, 
+                                   Local_rdma_info.send_rdma_mr_hndl);
+      }
+
+      if(vstat != VAPI_OK) {
+        CERROR(" VAPI_CQE_SQ_SEND_DATA: Failed deregistering a RDMA"
+               " recv  mem region %s\n", VAPI_strerror(vstat));
+      }
+
+      if((RDMA_CTS_ID <= wrq_id) && (RDMA_OP_ID < wrq_id)) {
+        // RTS or CTS send complete, release send buffer 
+        if(wrq_id >= RDMA_RTS_ID)
+          wrq_id = wrq_id - RDMA_RTS_ID;
+        else 
+          wrq_id = wrq_id - RDMA_CTS_ID;
+      }
+
+      spin_lock(&MSB_mutex[(int) wrq_id]);
+      MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+      spin_unlock(&MSB_mutex[(int) wrq_id]);
+#endif 
+
+      CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_SEND_DATA\n");  
+
+      break;
+
+    case   VAPI_CQE_SQ_RDMA_WRITE:
+      // about the Send Q,  RDMA write completion 
+      // who needs this information
+      // data is successfully write from pource to  destionation 
+             
+      //  get wr_id
+      //  mark MSbuf_list[wr_id].status = BUF_REGISTERED 
+      //  de-register  rdma buffer 
+      //
+             
+       CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_RDMA_WRITE\n");  
+       break;
+
+      case   VAPI_CQE_SQ_RDMA_READ:
+        // about the Send Q
+        // RDMA read completion 
+        // who needs this information
+        // data is successfully read from destionation to source 
+         CDEBUG(D_NET, "CQE opcode- VAPI_CQE_SQ_RDMA_READ\n");  
+         break;
+
+      case   VAPI_CQE_SQ_COMP_SWAP:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+            
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_COMP_SWAP\n");  
+        break;
+
+      case   VAPI_CQE_SQ_FETCH_ADD:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_FETCH_ADD\n");  
+        break;
+
+      case   VAPI_CQE_SQ_BIND_MRW:
+        // about the Send Q
+        // RDMA write completion 
+        // who needs this information
+             
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_SQ_BIND_MRW\n");  
+        break;
+
+      case   VAPI_CQE_RQ_SEND_DATA:
+        // about the Receive Q
+        // process the incoming data and
+        // forward it to .....
+        // a completion recevie event is arriving at CQ 
+        // issue a recevie to get this arriving data out from CQ 
+        // pass the receiving data for further processing 
+         
+         CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_SEND_DATA\n");  
+          
+         wrq_id = comp_desc.id ;
+
+#ifdef IBNAL_SELF_TESTING
+
+      char        rbuf[KB_32];
+      int i;
+
+      if(wrq_id == SEND_RECV_TEST_ID) {
+        printk("IBNAL_SELF_TESTING - VAPI_CQE_RQ_SEND_DATA\n"); 
+      }
+
+      bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) 
+                       MRbuf_list[ SEND_RECV_TEST_BUF_ID].buf_addr; 
+      MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_INUSE; 
+      memcpy(&rbuf, &bufaddr, KB_32);    
+      
+
+      for(i=0; i < 16; i++)
+              printk("rbuf[%d]=%c, ", rbuf[i]);
+      printk("\n");
+
+      // repost this receiving buffer and makr it at BUF_REGISTERED 
+      vstat = repost_recv_buf(qp,SEND_RECV_TEST_BUF_ID);
+      if(vstat != (VAPI_OK)) {
+        printk("error while polling completion queue\n");
+      }
+      else {
+        MRbuf_list[SEND_RECV_TEST_BUF_ID].status = BUF_REGISTERED; 
+      }
+#else  
+         transferred_data_length = comp_desc.byte_len;
+             
+         if((wrq_id >= RDMA_CTS_ID) && (wrq_id < RDMA_OP_ID)) {
+           // this is RTS/CTS message 
+           // process it locally and don't pass it to portals layer 
+           // adjust wrq_id to get the right entry in MRbfu_list 
+                   
+           if(wrq_id >= RDMA_RTS_ID)
+             wrq_id = wrq_id - RDMA_RTS_ID;
+           else 
+             wrq_id = wrq_id - RDMA_CTS_ID;
+
+           bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) 
+                                           MRbuf_list[wrq_id].buf_addr; 
+           MRbuf_list[wrq_id].status = BUF_INUSE; 
+           memcpy(&Rdma_info, &bufaddr, sizeof(RDMA_Info_Exchange));    
+        
+           if(Ready_To_send == Rdma_info.opcode) 
+             // an RTS request message from remote node 
+             // prepare local RDMA buffer and send local rdma info to
+             // remote node 
+             CTS_handshaking_protocol(&Rdma_info);
+           else 
+             if((Clear_To_send == Rdma_info.opcode) && 
+                                (RDMA_BUFFER_RESERVED == Rdma_info.flag))
+               Cts_Message_arrived = YES;
+             else 
+               if(RDMA_BUFFER_UNAVAILABLE == Rdma_info.flag) 
+                 CERROR("RDMA operation abort-RDMA_BUFFER_UNAVAILABLE\n");
+         }
+         else {
+           //
+           // this is an incoming mesage for portals layer 
+           // move to PORTALS layer for further processing 
+           //
+                     
+           bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t)
+                                MRbuf_list[wrq_id].buf_addr; 
+
+           MRbuf_list[wrq_id].status = BUF_INUSE; 
+           transferred_data_length = comp_desc.byte_len;
+
+           kibnal_rx(hca_data->kib_data, 
+                     bufaddr, 
+                     transferred_data_length, 
+                     MRbuf_list[wrq_id].buf_size, 
+                     priority); 
+         }
+
+         // repost this receiving buffer and makr it at BUF_REGISTERED 
+         vstat = repost_recv_buf(qp, wrq_id);
+         if(vstat != (VAPI_OK)) {
+           CERROR("error while polling completion queue\n");
+         }
+         else {
+           MRbuf_list[wrq_id].status = BUF_REGISTERED; 
+         }
+#endif
+
+         break;
+
+      case   VAPI_CQE_RQ_RDMA_WITH_IMM:
+        // about the Receive Q
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+
+        wrq_id = comp_desc.id ;
+        transferred_data_length = comp_desc.byte_len;
+             
+        if(wrq_id ==  RDMA_OP_ID) {
+          // this is RDAM op , locate the RDAM memory buffer address   
+              
+          bufaddr = (VAPI_virt_addr_t)(MT_virt_addr_t) Local_rdma_info.raddr;
+
+          transferred_data_length = comp_desc.byte_len;
+
+          kibnal_rx(hca_data->kib_data, 
+                    bufaddr, 
+                    transferred_data_length, 
+                    Local_rdma_info.buf_length, 
+                    priority); 
+
+          // de-regiser this RDAM receiving memory buffer
+          // too early ??    test & check 
+          vstat = VAPI_deregister_mr(qp->hca_hndl, Local_rdma_info.recv_rdma_mr_hndl);
+          if(vstat != VAPI_OK) {
+            CERROR("VAPI_CQE_RQ_RDMA_WITH_IMM: Failed deregistering a RDMA"
+               " recv  mem region %s\n", VAPI_strerror(vstat));
+          }
+        }
+
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_RQ_RDMA_WITH_IMM\n");  
+        break;
+
+      case   VAPI_CQE_INVAL_OPCODE:
+        //
+        CDEBUG(D_NET, "CQE opcode-VAPI_CQE_INVAL_OPCODE\n");  
+        break;
+
+      default :
+        CDEBUG(D_NET, "CQE opcode-unknown opcode\n");  
+
+        break;
+    } // switch 
+    
+  // issue a new request for completion ievent notification 
+  vstat = VAPI_req_comp_notif(hca_data->hca_hndl, 
+                              hca_data->cq_hndl,
+                              VAPI_NEXT_COMP); 
+
+
+  if(vstat != VAPI_OK) {
+    CERROR("PI_req_comp_notif: Failed %s\n", VAPI_strerror(vstat));
+  }
+
+  return; // end of event handler 
+
+}
+
+
+
+int
+kibnal_cmd(struct portal_ioctl_data * data, void * private)
+{
+  int rc ;
+
+  CDEBUG(D_NET, "kibnal_cmd \n");  
+
+  return YES;
+}
+
+
+
+void ibnal_send_recv_self_testing(int *my_role)
+{
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info              *qp;
+ VAPI_wr_id_t         send_id;
+ int                  buf_id;
+ char                 sbuf[KB_32];
+ char                 rbuf[KB_32];
+ int                  i;
+ int                  buf_length = KB_32;
+ VAPI_wc_desc_t       comp_desc;
+ int                  num_send = 1;
+ int                  loop_count = 0;
+
+ // make it as a daemon process 
+ // kportal_daemonize("ibnal_send_recv_self_testing");  
+
+ printk("My role is 0X%X\n", *my_role);
+
+if(*my_role ==  TEST_SEND_MESSAGE)  {
+ printk("Enter ibnal_send_recv_self_testing\n");
+
+ memset(&sbuf, 'a', KB_32);
+ memset(&rbuf, ' ', KB_32);
+ send_id = SEND_RECV_TEST_ID; 
+ buf_id = SEND_RECV_TEST_BUF_ID;
+
+ qp = &QP_list[buf_id];
+
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        =  send_id;
+
+ // scatter and gather info
+ sr_sg.len  = KB_32;
+ sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr;
+
+ // copy data to register send buffer
+ memcpy(&sr_sg.addr, &sbuf, buf_length);
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ /*
+ // call VAPI_post_sr to send out this data
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+   printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+ printk("VAPI_post_sr success.\n");
+ */
+
+ }
+else {
+  printk("I am a receiver and doing nothing here\n"); 
+}
+         
+ printk("ibnal_send_recv_self_testing thread exit \n");
+
+ return;
+
+}
+
+
+//
+// ibnal initialize process  
+//
+// 1.  Bring up Infiniband network interface 
+//     * 
+// 2.  Initialize a PORTALS nal interface 
+// 
+//
+int __init 
+kibnal_initialize(void)
+{
+   int           rc;
+   int           ntok;
+   unsigned long sizemask;
+   unsigned int  nid;
+   VAPI_ret_t    vstat;
+
+
+   portals_debug_set_level(IBNAL_DEBUG_LEVEL_1);
+
+   CDEBUG(D_MALLOC, "start kmem %d\n", atomic_read (&portal_kmemory));
+
+   CDEBUG(D_PORTALS, "kibnal_initialize: Enter kibnal_initialize\n");
+
+   // set api functional pointers 
+   kibnal_api.forward    = kibnal_forward;
+   kibnal_api.shutdown   = kibnal_shutdown;
+   kibnal_api.yield      = kibnal_yield;
+   kibnal_api.validate   = NULL; /* our api validate is a NOOP */
+   kibnal_api.lock       = kibnal_lock;
+   kibnal_api.unlock     = kibnal_unlock;
+   kibnal_api.nal_data   = &kibnal_data; // this is so called private data 
+   kibnal_api.refct      = 1;
+   kibnal_api.timeout    = NULL;
+   kibnal_lib.nal_data   = &kibnal_data;
+  
+   memset(&kibnal_data, 0, sizeof(kibnal_data));
+
+   // initialize kib_list list data structure 
+   INIT_LIST_HEAD(&kibnal_data.kib_list);
+
+   kibnal_data.kib_cb = &kibnal_lib;
+
+   spin_lock_init(&kibnal_data.kib_dispatch_lock);
+
+
+   //  
+   // bring up the IB inter-connect network interface 
+   // setup QP, CQ 
+   //
+   vstat = IB_Open_HCA(&kibnal_data);
+
+   if(vstat != VAPI_OK) {
+     CERROR("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", 
+                                                vstat, VAPI_strerror(vstat));
+
+     printk("kibnal_initialize: IB_Open_HCA failed: %d- %s\n", 
+                                                vstat, VAPI_strerror(vstat));
+     return NO;
+   }
+
+   kibnal_data.kib_nid = (__u64 )Hca_hndl;//convert Hca_hndl to 64-bit format
+   kibnal_data.kib_init = 1;
+
+   CDEBUG(D_NET, " kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid);
+   printk(" kibnal_data.kib_nid 0x%x%x\n", kibnal_data.kib_nid);
+
+   /* Network interface ready to initialise */
+   // get an entery in the PORTALS table for this IB protocol 
+
+   CDEBUG(D_PORTALS,"Call PtlNIInit to register this Infiniband Interface\n");
+   printk("Call PtlNIInit to register this Infiniband Interface\n");
+
+   rc = PtlNIInit(kibnal_init, 32, 4, 0, &kibnal_ni);
+
+   if(rc != PTL_OK) {
+     CERROR("kibnal_initialize: PtlNIInit failed %d\n", rc);
+     printk("kibnal_initialize: PtlNIInit failed %d\n", rc);
+     kibnal_finalize();
+     return (-ENOMEM);
+   }
+
+   CDEBUG(D_PORTALS,"kibnal_initialize: PtlNIInit DONE\n");
+   printk("kibnal_initialize: PtlNIInit DONE\n");
+
+
+
+#ifdef  POLL_BASED_CQE_HANDLING 
+   // create a receiving thread: main loopa
+   // this is polling based mail loop   
+   kernel_thread(k_recv_thread, &Hca_data, 0);
+#endif
+
+#ifdef EVENT_BASED_CQE_HANDLING
+  // for completion event handling,  this is event based CQE handling 
+  vstat = IB_Set_Event_Handler(Hca_data, &kibnal_data);
+
+  if (vstat != VAPI_OK) {
+     CERROR("IB_Set_Event_Handler failed: %d - %s \n", 
+                                           vstat, VAPI_strerror(vstat));
+     return vstat;
+  }
+
+  CDEBUG(D_PORTALS,"IB_Set_Event_Handler Done \n");
+  printk("IB_Set_Event_Handler Done \n");
+  
+#endif
+
+   PORTAL_SYMBOL_REGISTER(kibnal_ni);
+
+#ifdef IBNAL_SELF_TESTING
+  //
+  // test HCA send recv before normal event handling 
+  //
+  int  my_role;
+  my_role = TEST_SEND_MESSAGE;
+
+  printk("my role is TEST_RECV_MESSAGE\n");
+
+  // kernel_thread(ibnal_send_recv_self_testing, &my_role, 0);
+   
+  ibnal_send_recv_self_testing(&my_role);
+
+#endif 
+
+  return 0;
+
+}
+
+
+
+MODULE_AUTHOR("Hsingbung(HB) Chen <hbchen@lanl.gov>");
+MODULE_DESCRIPTION("Kernel Infiniband NAL v0.1");
+MODULE_LICENSE("GPL");
+
+module_init (kibnal_initialize);
+module_exit (kibnal_finalize);
+
+EXPORT_SYMBOL(kibnal_ni);
+
diff --git a/lustre/portals/knals/ibnal/ibnal.h b/lustre/portals/knals/ibnal/ibnal.h
new file mode 100644 (file)
index 0000000..ff5aeb3
--- /dev/null
@@ -0,0 +1,564 @@
+#ifndef _IBNAL_H
+#define _IBNAL_H
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/segment.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <linux/ipc.h>
+#include <linux/shm.h>
+
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/locks.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/in.h>
+#include <unistd.h>
+
+#define DEBUG_SUBSYSTEM S_IBNAL
+
+#include <portals/p30.h>
+#include <portals/lib-p30.h>
+#include <linux/kp30.h>
+
+// Infiniband VAPI/EVAPI header files  
+// Mellanox MT23108 VAPI
+#include <vapi.h>
+#include <vapi_types.h>
+#include <vapi_common.h>
+#include <evapi.h>
+
+// pick a port for this RDMA information exhange between two hosts
+#define HOST_PORT           11211 
+#define QUEUE_SIZE          1024
+#define HCA_PORT_1          1
+#define HCA_PORT_2          2 
+#define DEBUG_SUBSYSTEM S_IBNAL
+
+#define START_SEND_WRQ_ID        0
+#define START_RECV_WRQ_ID        0
+#define START_RDMA_WRQ_ID        0  
+
+#define DEFAULT_PRIORITY         100
+
+#define WAIT_FOT_R_RDMA_TIMEOUT 10000
+#define MAX_NUM_TRY      3000 
+
+#define MAX_NUM_POLL     300 
+#define MAX_LOOP_COUNT   500
+
+#define MAX_GID          32 
+#define MCG_BUF_LENGTH   128
+
+#define SHARED_SEGMENT_SIZE   0x10000   
+#define HCA_EXCHANGE_SHM_KEY  999 // shared memory key for HCA data exchange 
+
+// some internals opcodes for IB operations used in IBNAL
+#define SEND_QP_INFO          0X00000001 
+#define RECV_QP_INFO          0X00000010 
+
+// Mellanox InfiniHost MT23108 
+// QP/CQ related information
+//
+
+#define MTU_256     1 /* 1-256,2-512,3-1024,4-2048 */
+#define MTU_512     2 /* 1-256,2-512,3-1024,4-2048 */
+#define MTU_1024    3 /* 1-256,2-512,3-1024,4-2048 */
+#define MTU_2048    4 /* 1-256,2-512,3-1024,4-2048 */
+
+// number of entries for each CQ and WQ 
+// how much do we need ?
+#define NUM_CQE        1024
+#define NUM_WQE        1024 
+#define MAX_OUT_SQ     64 
+#define MAX_OUT_RQ     64
+
+#define NUM_MBUF       256 
+#define NUM_RDMA_RESERVED_ENTRY 128 
+#define NUM_QPS        256 
+
+#define INVALID_WR_ID  ((VAPI_wr_id_t) -1)
+
+
+// for Vector IO 
+// scatter and gather 
+// Portals can support upto 64 IO-Vectors 
+// how much do we need ? 
+#define NUM_SGE        1 
+#define NUM_SG         1 
+#define NUM_CQ        1        
+
+#define ONE_KB    1024
+#define ONE_MB    1024 * ONE_KB 
+#define ONE_GB    1024 * ONE_MB 
+
+
+#define KB_4      1024 * 4 
+#define KB_8      1024 * 8 
+#define KB_16     1024 * 16
+#define KB_32     1024 * 32
+#define KB_64     1024 * 64
+#define KB_128    1024 * 128 
+#define KB_256    1024 * 256 
+
+// 256 entry in registered buffer list 
+// small size message 
+#define Num_4_KB       64 
+#define Num_8_KB       64 
+#define Num_16_KB      40 
+#define Num_32_KB      40 
+#define Num_64_KB      40 
+#define Num_128_KB     4 
+#define Num_256_KB     4 
+
+#define SMALL_MSG_SIZE KB_32     
+
+#define MAX_MSG_SIZE   ONE_MB * 512   
+
+//   128's  64KB bufer for send
+//   128's  64KB bufer for recv  
+//   used in RDAM operation only 
+
+#define NUM_ENTRY      128 
+
+#define End_4_kb        Num_4_KB 
+#define End_8_kb        End_4_kb  + Num_8_KB 
+#define End_16_kb       End_8_kb  + Num_16_KB
+#define End_32_kb       End_16_kb + Num_32_KB
+#define End_64_kb       End_32_kb + Num_64_KB
+#define End_128_kb      End_64_kb + Num_128_KB
+#define End_256_kb      End_128_kb+ Num_256_KB
+
+
+#define SEND_BUF_SIZE   KB_32
+#define RECV_BUF_SIZE   SEND_BUF_SIZE
+
+// #define POLL_BASED_CQE_HANDLING     1
+#define EVENT_BASED_CQE_HANDLING        1
+#define IBNAL_SELF_TESTING             1
+
+#ifdef  IBNAL_SELF_TESTING
+#undef  IBNAL_SELF_TESTING
+#endif
+
+
+#define MSG_SIZE_SMALL 1 
+#define MSG_SIZE_LARGE 2 
+
+
+
+// some defauly configuration values for early testing 
+#define DEFAULT_DLID   1  // default destination link ID
+#define DEFAULT_QP_NUM 4  // default QP number 
+#define P_KEY          0xFFFF // do we need default value
+#define PKEY_IX        0x0 // do we need default value
+#define Q_KEY          0x012  // do we need default value 
+#define L_KEY          0x12345678 // do we need default value 
+#define R_KEY          0x87654321 // do we need default value 
+#define HCA_ID         "InfiniHost0" // default 
+#define START_PSN      0
+#define START_SQ_PSN   0
+#define START_RQ_PSN   0
+
+
+#define __u_long_long   unsigned long long
+
+#define         IBNAL_DEBUG      1
+
+#define         USE_SHARED_MEMORY_AND_SOCKET 1
+
+// operation type
+#define TRY_SEND_ONLY    1
+
+#define YES     1  
+#define NO      0 
+
+//
+// a common data structure for IB QP's operation
+// each QP is associated with an QP_info structure 
+//
+typedef struct QP_info 
+{
+  VAPI_hca_hndl_t       hca_hndl;      // HCA handle
+  IB_port_t             port;          // port number 
+  VAPI_qp_hndl_t        qp_hndl;       // QP's handle list 
+  VAPI_qp_state_t       qp_state;      // QP's current state 
+  VAPI_pd_hndl_t        pd_hndl;       // protection domain
+  VAPI_cq_hndl_t        cq_hndl;    // send-queue CQ's handle 
+  VAPI_cq_hndl_t        sq_cq_hndl;    // send-queue CQ's handle 
+  VAPI_cq_hndl_t        rq_cq_hndl;    // receive-queue CQ's handle
+  VAPI_ud_av_hndl_t     av_hndl;    // receive-queue CQ's handle
+  VAPI_qp_init_attr_t   qp_init_attr;  // QP's init attribute 
+  VAPI_qp_attr_t        qp_attr;       // QP's attribute - dlid 
+  VAPI_qp_prop_t        qp_prop;       // QP's propertities
+  VAPI_hca_port_t       hca_port;  
+  VAPI_qp_num_t         qp_num;    // QP's number 
+  VAPI_qp_num_t         rqp_num;       // remote QP's number 
+  IB_lid_t              slid;
+  IB_lid_t              dlid;
+  VAPI_gid_t            src_gid;
+
+  u_int32_t            buf_size;
+  VAPI_virt_addr_t      buf_addr;
+  char                *bufptr;
+  VAPI_mrw_t            mr;       
+  VAPI_mr_hndl_t        mr_hndl;
+  VAPI_virt_addr_t      raddr;
+  VAPI_rkey_t           rkey;
+  VAPI_lkey_t           lkey;
+
+  VAPI_wr_id_t          last_posted_send_id; // user defined work request ID 
+  VAPI_wr_id_t          last_posted_rcv_id;  // user defined work request ID
+  VAPI_mw_hndl_t        mw_hndl;       // memory window handle 
+  VAPI_rkey_t           mw_rkey;       // memory window rkey
+  VAPI_sg_lst_entry_t   sg_lst[256];       // scatter and gather list 
+  int                   sg_list_sz;    // set as NUM_SGE
+  VAPI_wr_id_t          wr_id;         //
+  spinlock_t            snd_mutex;
+  spinlock_t            rcv_mutex;
+  spinlock_t            bl_mutex;
+  spinlock_t            cln_mutex;
+  int                   cur_RDMA_outstanding;
+  int                   cur_send_outstanding;
+  int                   cur_posted_rcv_bufs;
+  int                   snd_rcv_balance;
+} QP_info; 
+
+
+// buffer status 
+#define  BUF_REGISTERED   0x10000000 
+#define  BUF_INUSE       0x01000000  
+#define  BUF_UNREGISTERED 0x00100000 
+
+// buffer type 
+#define  REG_BUF          0x10000000
+#define  RDMA_BUF         0x01000000 
+
+//
+// IMM data 
+// 
+#define   IMM_000         (0 << 32); 
+#define   IMM_001         (1 << 32); 
+#define   IMM_002         (2 << 32); 
+#define   IMM_003         (3 << 32); 
+#define   IMM_004         (4 << 32); 
+#define   IMM_005         (5 << 32); 
+#define   IMM_006         (6 << 32); 
+#define   IMM_007         (7 << 32); 
+#define   IMM_008         (8 << 32); 
+#define   IMM_009         (9 << 32); 
+#define   IMM_010         (10 << 32); 
+#define   IMM_011         (11 << 32); 
+#define   IMM_012         (12 << 32); 
+#define   IMM_013         (13 << 32); 
+#define   IMM_014         (14 << 32); 
+#define   IMM_015         (15 << 32); 
+#define   IMM_016         (16 << 32); 
+#define   IMM_017         (17 << 32); 
+#define   IMM_018         (18 << 32); 
+#define   IMM_019         (19 << 32); 
+#define   IMM_020         (20 << 32); 
+#define   IMM_021         (21 << 32); 
+#define   IMM_022         (22 << 32); 
+#define   IMM_023         (23 << 32); 
+#define   IMM_024         (24 << 32); 
+#define   IMM_025         (25 << 32); 
+#define   IMM_026         (26 << 32); 
+#define   IMM_027         (27 << 32); 
+#define   IMM_028         (28 << 32); 
+#define   IMM_029         (29 << 32); 
+#define   IMM_030         (30 << 32); 
+#define   IMM_031         (31 << 32); 
+
+
+typedef struct Memory_buffer_info{
+       u_int32_t        buf_size;
+       VAPI_virt_addr_t buf_addr;
+       char             *bufptr;
+       VAPI_mrw_t       mr;       
+       VAPI_mr_hndl_t   mr_hndl;
+        int              status;
+       int              ref_count;  
+        int              buf_type;
+       VAPI_virt_addr_t raddr;
+       VAPI_rkey_t      rkey;
+       VAPI_lkey_t      lkey;
+} Memory_buffer_info;
+
+typedef struct RDMA_Info_Exchange {
+       int               opcode;
+       int               buf_length;
+       VAPI_mrw_t        recv_rdma_mr;
+       VAPI_mr_hndl_t    recv_rdma_mr_hndl;
+       VAPI_mrw_t        send_rdma_mr;
+       VAPI_mr_hndl_t    send_rdma_mr_hndl;
+       VAPI_virt_addr_t  raddr;
+       VAPI_rkey_t       rkey;
+       int               flag;
+}  RDMA_Info_Exchange;
+
+// opcode for Rdma info exchange RTS/CTS 
+#define  Ready_To_send     0x10000000
+#define  Clear_To_send     0x01000000
+
+#define  RDMA_RTS_ID      5555 
+#define  RDMA_CTS_ID      7777 
+#define  RDMA_OP_ID       9999 
+#define  SEND_RECV_TEST_ID 2222 
+#define  SEND_RECV_TEST_BUF_ID 0 
+
+#define  TEST_SEND_MESSAGE 0x00000001 
+#define  TEST_RECV_MESSAGE 0x00000002
+
+
+#define  RTS_CTS_TIMEOUT           50
+#define  RECEIVING_THREAD_TIMEOUT  50 
+#define  WAIT_FOR_SEND_BUF_TIMEOUT 50
+
+#define  IBNAL_DEBUG_LEVEL_1   0XFFFFFFFF  
+#define  IBNAL_DEBUG_LEVEL_2   D_PORTALS | D_NET   | D_WARNING | D_MALLOC | \ 
+                              D_ERROR   | D_OTHER | D_TRACE   | D_INFO
+                              
+
+// flag for Rdma info exhange 
+#define  RDMA_BUFFER_RESERVED       0x10000000
+#define  RDMA_BUFFER_UNAVAILABLE    0x01000000
+
+
+// receiving data structure 
+typedef struct {
+        ptl_hdr_t         *krx_buffer; // pointer to receiving buffer
+        unsigned long     krx_len;  // length of buffer
+        unsigned int      krx_size; // 
+        unsigned int      krx_priority; // do we need this 
+        struct list_head  krx_item;
+}  kibnal_rx_t;
+
+// transmitting data structure 
+typedef struct {
+        nal_cb_t      *ktx_nal;
+        void          *ktx_private;
+        lib_msg_t     *ktx_cookie;
+        char          *ktx_buffer;
+        size_t         ktx_len;
+        unsigned long  ktx_size;
+        int            ktx_ndx;
+        unsigned int   ktx_priority;
+        unsigned int   ktx_tgt_node;
+        unsigned int   ktx_tgt_port_id;
+}  kibnal_tx_t;
+
+
+typedef struct {
+        char              kib_init;
+        char              kib_shuttingdown;
+        IB_port_t         port_num; // IB port information
+        struct list_head  kib_list;
+        ptl_nid_t         kib_nid;
+        nal_t            *kib_nal; 
+        nal_cb_t         *kib_cb;
+        struct kib_trans *kib_trans; // do I need this 
+        struct tq_struct  kib_ready_tq;
+        spinlock_t        kib_dispatch_lock;
+}  kibnal_data_t;
+
+
+//
+// A data structure for keeping the HCA information in system
+// information related to HCA and hca_handle will be kept here 
+//
+typedef struct HCA_Info 
+{
+  VAPI_hca_hndl_t       hca_hndl;     // HCA handle
+  VAPI_pd_hndl_t        pd_hndl;      // protection domain
+  IB_port_t             port;         // port number 
+  int                   num_qp;       // number of qp used  
+  QP_info               *qp_ptr[NUM_QPS]; // point to QP_list
+  int                   num_cq;       // number of cq used 
+  VAPI_cq_hndl_t        cq_hndl;   
+  VAPI_cq_hndl_t        sq_cq_hndl;   
+  VAPI_cq_hndl_t        rq_cq_hndl;   
+  IB_lid_t              dlid;
+  IB_lid_t              slid;
+  kibnal_data_t         *kib_data; // for PORTALS operations
+} HCA_info;
+
+
+
+
+// Remote HCA Info information 
+typedef struct Remote_HCA_Info {
+        unsigned long     opcode;
+        unsigned long     length; 
+        IB_lid_t          dlid[NUM_QPS];
+        VAPI_qp_num_t     rqp_num[NUM_QPS];
+} Remote_QP_Info;
+
+typedef struct  Bucket_index{
+     int start;
+     int end;
+} Bucket_index;
+
+// functional prototypes 
+// infiniband initialization 
+int kib_init(kibnal_data_t *);
+
+// receiving thread 
+void kibnal_recv_thread(HCA_info *);
+void recv_thread(HCA_info *);
+
+// forward data packet 
+void kibnal_fwd_packet (void *, kpr_fwd_desc_t *);
+
+// global data structures 
+extern kibnal_data_t        kibnal_data;
+extern ptl_handle_ni_t      kibnal_ni;
+extern nal_t                kibnal_api;
+extern nal_cb_t             kibnal_lib;
+extern QP_info              QP_list[];
+extern QP_info              CQ_list[];
+extern HCA_info             Hca_data;
+extern VAPI_hca_hndl_t      Hca_hndl; 
+extern VAPI_pd_hndl_t       Pd_hndl;
+extern VAPI_hca_vendor_t    Hca_vendor;
+extern VAPI_hca_cap_t       Hca_cap;
+extern VAPI_hca_port_t      Hca_port_1_props;
+extern VAPI_hca_port_t      Hca_port_2_props;
+extern VAPI_hca_attr_t      Hca_attr;
+extern VAPI_hca_attr_mask_t Hca_attr_mask;
+extern VAPI_cq_hndl_t       Cq_SQ_hndl;   
+extern VAPI_cq_hndl_t       Cq_RQ_hndl;   
+extern VAPI_cq_hndl_t       Cq_hndl;   
+extern unsigned long        User_Defined_Small_Msg_Size;
+extern Remote_QP_Info      L_HCA_RDMA_Info;  
+extern Remote_QP_Info      R_HCA_RDMA_Info; 
+extern unsigned int         Num_posted_recv_buf;
+extern int                  R_RDMA_DATA_ARRIVED;
+extern Memory_buffer_info   MRbuf_list[];
+extern Memory_buffer_info   MSbuf_list[];
+extern Bucket_index         Bucket[]; 
+extern RDMA_Info_Exchange   Rdma_info;
+extern int                  Cts_Message_arrived;
+extern RDMA_Info_Exchange   Local_rdma_info;
+extern spinlock_t          MSB_mutex[];
+
+
+
+// kernel NAL API function prototype 
+int  kibnal_forward(nal_t *,int ,void *,size_t ,void *,size_t );
+void kibnal_lock(nal_t *, unsigned long *);
+void kibnal_unlock(nal_t *, unsigned long *);
+int  kibnal_shutdown(nal_t *, int );
+void kibnal_yield( nal_t * );
+void kibnal_invalidate(nal_cb_t *,void *,size_t ,void *);
+int  kibnal_validate(nal_cb_t *,void *,size_t ,void  **);
+
+
+
+nal_t *kibnal_init(int , ptl_pt_index_t , ptl_ac_index_t , ptl_pid_t );
+void __exit kibnal_finalize(void ); 
+VAPI_ret_t create_qp(QP_info *, int );
+VAPI_ret_t init_qp(QP_info *, int );
+VAPI_ret_t IB_Open_HCA(kibnal_data_t *);
+VAPI_ret_t IB_Close_HCA(void );
+VAPI_ret_t createMemRegion(VAPI_hca_hndl_t, VAPI_pd_hndl_t); 
+VAPI_ret_t  deleteMemRegion(QP_info *, int );
+
+void ibnal_send_recv_self_testing(int *);
+
+int  __init kibnal_initialize(void);
+
+
+
+/* CB NAL functions */
+int kibnal_send(nal_cb_t *, 
+                void *, 
+                lib_msg_t *, 
+                ptl_hdr_t *,
+                int, 
+                ptl_nid_t, 
+                ptl_pid_t, 
+                unsigned int, 
+                ptl_kiov_t *, 
+                size_t);
+
+int kibnal_send_pages(nal_cb_t *, 
+                      void *, 
+                      lib_msg_t *, 
+                      ptl_hdr_t *,
+                      int, 
+                      ptl_nid_t, 
+                      ptl_pid_t, 
+                      unsigned int, 
+                      ptl_kiov_t *, 
+                      size_t);
+int kibnal_recv(nal_cb_t *, void *, lib_msg_t *,
+                        unsigned int, struct iovec *, size_t, size_t);
+int kibnal_recv_pages(nal_cb_t *, void *, lib_msg_t *,
+                        unsigned int, ptl_kiov_t *, size_t, size_t);
+int  kibnal_read(nal_cb_t *,void *,void *,user_ptr ,size_t );
+int  kibnal_write(nal_cb_t *,void *,user_ptr ,void *,size_t );
+int  kibnal_callback(nal_cb_t * , void *, lib_eq_t *, ptl_event_t *);
+void *kibnal_malloc(nal_cb_t *,size_t );
+void kibnal_free(nal_cb_t *,void *,size_t );
+int  kibnal_map(nal_cb_t *, unsigned int , struct iovec *, void **);
+void kibnal_unmap(nal_cb_t *, unsigned int , struct iovec *, void **);
+int  kibnal_map_pages(nal_cb_t *, unsigned int , ptl_kiov_t *, void **);
+void kibnal_unmap_pages(nal_cb_t * , unsigned int , ptl_kiov_t *, void **);
+void kibnal_printf(nal_cb_t *, const char *, ...);
+void kibnal_cli(nal_cb_t *,unsigned long *); 
+void kibnal_sti(nal_cb_t *,unsigned long *);
+int  kibnal_dist(nal_cb_t *,ptl_nid_t ,unsigned long *);
+
+void kibnal_fwd_packet (void *, kpr_fwd_desc_t *);
+void kibnal_rx(kibnal_data_t *, 
+               VAPI_virt_addr_t ,
+               u_int32_t,
+               u_int32_t,
+               unsigned int);
+                
+int  kibnal_end(kibnal_data_t *);
+
+void async_event_handler(VAPI_hca_hndl_t , VAPI_event_record_t *,void *);
+
+void CQE_event_handler(VAPI_hca_hndl_t ,VAPI_cq_hndl_t , void  *);
+
+
+VAPI_ret_t Send_Small_Msg(char *, int );
+VAPI_ret_t Send_Large_Msg(char *, int );
+
+VAPI_ret_t repost_recv_buf(QP_info *, VAPI_wr_id_t );
+int post_recv_bufs(VAPI_wr_id_t );
+int  server_listen_thread(void *);
+VAPI_wr_id_t RTS_handshaking_protocol(int );
+VAPI_wr_id_t CTS_handshaking_protocol(RDMA_Info_Exchange *);
+
+VAPI_ret_t createMemRegion_RDMA(VAPI_hca_hndl_t ,
+                               VAPI_pd_hndl_t  ,
+                               char         *,
+                               int             , 
+                               VAPI_mr_hndl_t  *,
+                               VAPI_mrw_t      *);
+
+
+VAPI_ret_t IB_Set_Event_Handler(HCA_info , kibnal_data_t *);
+
+VAPI_ret_t IB_Set_Async_Event_Handler(HCA_info ,kibnal_data_t *);
+
+VAPI_wr_id_t find_available_buf(int );
+VAPI_wr_id_t search_send_buf(int );
+VAPI_wr_id_t find_filler_list(int ,int );
+int insert_MRbuf_list(int );
+
+
+#endif  /* _IBNAL_H */
diff --git a/lustre/portals/knals/ibnal/ibnal_cb.c b/lustre/portals/knals/ibnal/ibnal_cb.c
new file mode 100644 (file)
index 0000000..2c07cc4
--- /dev/null
@@ -0,0 +1,1288 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *
+ * Based on ksocknal and qswnal
+ *
+ *  Author: Hsing-bung Chen <hbchen@lanl.gov>
+ *
+ *   This file is part of Portals, http://www.sf.net/projects/sandiaportals/
+ *
+ *   Portals is free software; you can redistribute it and/or
+ *   modify it under the terms of version 2 of the GNU General Public
+ *   License as published by the Free Software Foundation.
+ *
+ *   Portals is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Portals; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include "ibnal.h"
+
+
+
+
+RDMA_Info_Exchange   Rdma_nfo;
+int  Cts_Msg_Arrived = NO;
+
+
+/*
+ *  LIB functions follow
+ */
+
+//
+// read
+// copy a block of data from scr_addr to dst_addr 
+// it all happens in kernel space - dst_addr and src_addr 
+//
+// original definition is to read a block od data from a 
+// specified user address  
+// 
+// cb_read
+
+int kibnal_read (nal_cb_t *nal, 
+                 void     *private, 
+                 void     *dst_addr, 
+                 user_ptr src_addr, 
+                 size_t   len)
+{
+        CDEBUG(D_NET, "kibnal_read: 0x%Lx: reading %ld bytes from %p -> %p\n",
+               nal->ni.nid, (long)len, src_addr, dst_addr );
+
+        memcpy( dst_addr, src_addr, len );
+
+        return 0;
+}
+
+//
+// it seems that read and write are doing the same thing
+// because they all happen in kernel space 
+// why do we need two functions like read and write 
+// to make PORTALS API compatable 
+//
+
+//
+// write 
+// copy a block of data from scr_addr to dst_addr 
+// it all happens in kernel space - dst_addr and src_addr 
+//
+// original definition is to write a block od data to a 
+// specified user address  
+// 
+// cb_write
+
+int kibnal_write(nal_cb_t   *nal, 
+                 void       *private, 
+                 user_ptr   dst_addr, 
+                 void       *src_addr, 
+                 size_t     len)
+{
+        CDEBUG(D_NET, "kibnal_write: 0x%Lx: writing %ld bytes from %p -> %p\n",
+               nal->ni.nid, (long)len, src_addr, dst_addr );
+
+
+        memcpy( dst_addr, src_addr, len );
+
+        return 0;
+}
+
+//
+// malloc
+//
+// either vmalloc or kmalloc is used 
+// dynamically allocate a block of memory based on the size of buffer  
+//
+// cb_malloc
+
+void * kibnal_malloc(nal_cb_t *nal, size_t length)
+{
+        void *buffer;
+
+        // PORTAL_ALLOC will do the job 
+        // allocate a buffer with size "length"
+        PORTAL_ALLOC(buffer, length);
+
+        return buffer;
+}
+
+//
+// free
+// release a dynamically allocated memory pointed by buffer pointer 
+//
+// cb_free
+
+void kibnal_free(nal_cb_t *nal, void *buffer, size_t length)
+{
+        //
+        // release allocated buffer to system 
+        //
+        PORTAL_FREE(buffer, length);
+}
+
+//
+// invalidate 
+// because evernthing is in kernel space (LUSTRE)
+// there is no need to mark a piece of user memory as no longer in use by
+// the system
+//
+// cb_invalidate
+
+void kibnal_invalidate(nal_cb_t      *nal, 
+                              void          *base, 
+                              size_t        extent, 
+                              void          *addrkey)
+{
+  // do nothing 
+  CDEBUG(D_NET, "kibnal_invalidate: 0x%Lx: invalidating %p : %d\n", 
+                                        nal->ni.nid, base, extent);
+  return;
+}
+
+
+//
+// validate 
+// because everything is in kernel space (LUSTRE)
+// there is no need to mark a piece of user memory in use by
+// the system
+//
+// cb_validate
+
+int kibnal_validate(nal_cb_t        *nal,  
+                           void            *base, 
+                           size_t          extent, 
+                           void            **addrkey)
+{
+  // do nothing 
+  CDEBUG(D_NET, "kibnal_validate: 0x%Lx: validating %p : %d\n", 
+                                        nal->ni.nid, base, extent);
+
+  return 0;
+}
+
+
+//
+// log messages from kernel space 
+// printk() is used 
+//
+// cb_printf
+
+void kibnal_printf(nal_cb_t *nal, const char *fmt, ...)
+{
+        va_list ap;
+        char    msg[256];
+
+        if (portal_debug & D_NET) {
+                va_start( ap, fmt );
+                vsnprintf( msg, sizeof(msg), fmt, ap );
+                va_end( ap );
+
+                printk("CPUId: %d %s",smp_processor_id(), msg);
+        }
+}
+
+//
+// clear interrupt
+// use spin_lock to lock protected area such as MD, ME...
+// so a process can enter a protected area and do some works
+// this won't physicall disable interrup but use a software 
+// spin-lock to control some protected areas 
+//
+// cb_cli 
+
+void kibnal_cli(nal_cb_t *nal, unsigned long *flags) 
+{ 
+        kibnal_data_t *data= nal->nal_data;
+
+        CDEBUG(D_NET, "kibnal_cli \n");
+
+        spin_lock_irqsave(&data->kib_dispatch_lock,*flags);
+
+}
+
+//
+// set interrupt
+// use spin_lock to unlock protected area such as MD, ME...
+// this won't physicall enable interrup but use a software 
+// spin-lock to control some protected areas 
+//
+// cb_sti
+
+void kibnal_sti(nal_cb_t *nal, unsigned long *flags)
+{
+        kibnal_data_t *data= nal->nal_data;
+
+        CDEBUG(D_NET, "kibnal_sti \n");
+
+        spin_unlock_irqrestore(&data->kib_dispatch_lock,*flags);
+}
+
+
+
+//
+// nic distance 
+// 
+// network distance doesn't mean much for this nal 
+// here we only indicate 
+//      0 - operation is happened on the same node 
+//      1 - operation is happened on different nodes 
+//          router will handle the data routing 
+//
+// cb_dist
+
+int kibnal_dist(nal_cb_t *nal, ptl_nid_t nid, unsigned long *dist)
+{
+        CDEBUG(D_NET, "kibnal_dist \n");
+
+        if ( nal->ni.nid == nid ) {
+                *dist = 0;
+        } 
+        else {
+                *dist = 1;
+        }
+
+        return 0; // always retrun 0 
+}
+
+
+//
+// This is the cb_send() on IB based interconnect system
+// prepare a data package and use VAPI_post_sr() to send it
+// down-link out-going message 
+//
+
+
+int
+kibnal_send(nal_cb_t        *nal,
+            void            *private,
+            lib_msg_t       *cookie,
+            ptl_hdr_t       *hdr,
+            int              type,
+            ptl_nid_t        nid,
+            ptl_pid_t        pid,
+            unsigned int     niov,
+            ptl_kiov_t      *iov,
+            size_t           len)
+{
+        
+        int           rc=0;
+        void         *buf = NULL; 
+        unsigned long buf_length = sizeof(ptl_hdr_t) + len;
+        int           expected_buf_size = 0;
+        VAPI_ret_t    vstat;
+
+        PROF_START(kibnal_send); // time stamp send start 
+
+        CDEBUG(D_NET,"kibnal_send: sending %d bytes from %p to nid: 0x%Lx pid %d\n",
+               buf_length, iov, nid, HCA_PORT_1);
+
+
+        // do I need to check the gateway information
+        // do I have problem to send direct 
+        // do I have to forward a data packet to gateway
+        // 
+        // The current connection is back-to-back 
+        // I always know that data will be send from one-side to
+        // the other side
+        //
+        
+        //
+        //  check data buffer size 
+        //
+        //  MSG_SIZE_SMALL 
+        //      regular post send 
+        //  
+        //  MSG_SIZE_LARGE
+        //      rdma write
+        
+        if(buf_length <= SMALL_MSG_SIZE) {  
+           expected_buf_size = MSG_SIZE_SMALL;
+        } 
+        else { 
+          if(buf_length > MAX_MSG_SIZE) { 
+             CERROR("kibnal_send:request exceeds Transmit data size (%d).\n",
+                      MAX_MSG_SIZE);
+             rc = -1;
+             return rc;
+          }
+          else {
+             expected_buf_size = MSG_SIZE_LARGE; // this is a large data package 
+          } 
+        }
+                
+        // prepare data packet for send operation 
+        //
+        // allocate a data buffer "buf" with size of buf_len(header + payload)
+        //                 ---------------
+        //  buf            | hdr         |  size = sizeof(ptl_hdr_t)
+        //                 --------------
+        //                 |payload data |  size = len
+        //                 ---------------
+        
+        // copy header to buf 
+        memcpy(buf, hdr, sizeof(ptl_hdr_t));
+
+        // copy payload data from iov to buf
+        // use portals library function lib_copy_iov2buf()
+        
+        if (len != 0)
+           lib_copy_iov2buf(((char *)buf) + sizeof (ptl_hdr_t),
+                            niov, 
+                            iov, 
+                            len);
+
+        // buf is ready to do a post send 
+        // the send method is base on the buf_size 
+
+        CDEBUG(D_NET,"ib_send %d bytes (size %d) from %p to nid: 0x%Lx "
+               " port %d\n", buf_length, expected_buf_size, iov, nid, HCA_PORT_1);
+
+        switch(expected_buf_size) {
+          case MSG_SIZE_SMALL:
+            // send small message 
+            if((vstat = Send_Small_Msg(buf, buf_length)) != VAPI_OK){
+                CERROR("Send_Small_Msg() is failed\n");
+            } 
+            break;
+
+          case MSG_SIZE_LARGE:
+            // send small message 
+            if((vstat = Send_Large_Msg(buf, buf_length)) != VAPI_OK){
+                CERROR("Send_Large_Msg() is failed\n");
+            } 
+            break;
+
+          default:
+            CERROR("Unknown message size %d\n", expected_buf_size);
+            break;
+        }
+
+        PROF_FINISH(kibnal_send); // time stapm of send operation 
+
+        rc = 1;
+
+        return rc; 
+}
+
+//
+// kibnal_send_pages
+//
+// no support 
+//
+// do you need this 
+//
+int kibnal_send_pages(nal_cb_t * nal, 
+                      void *private, 
+                      lib_msg_t * cookie,
+                      ptl_hdr_t * hdr, 
+                      int type, 
+                      ptl_nid_t nid, 
+                      ptl_pid_t pid,
+                      unsigned int niov, 
+                      ptl_kiov_t *iov, 
+                      size_t mlen)
+{
+   int rc = 1;
+
+   CDEBUG(D_NET, "kibnal_send_pages\n");
+
+   // do nothing now for Infiniband 
+   
+   return rc;
+}
+
+
+
+
+
+//
+// kibnal_fwd_packet 
+//
+// no support 
+//
+// do you need this 
+//
+void kibnal_fwd_packet (void *arg, kpr_fwd_desc_t *fwd)
+{
+        CDEBUG(D_NET, "forwarding not implemented\n");
+        return;
+      
+}
+
+//
+// kibnal_callback 
+//
+// no support 
+//
+// do you need this 
+//
+int kibnal_callback(nal_cb_t * nal, 
+                           void *private, 
+                           lib_eq_t *eq,
+                           ptl_event_t *ev)
+{
+        CDEBUG(D_NET,  "callback not implemented\n");
+        return PTL_OK;
+}
+
+
+/* Process a received portals packet */
+//
+//  conver receiving data in to PORTALS header 
+//
+
+void kibnal_rx(kibnal_data_t    *kib, 
+                      VAPI_virt_addr_t buffer_addr,
+                      u_int32_t        buffer_len,
+                      u_int32_t        buffer_size,
+                      unsigned int     priority) 
+{
+        ptl_hdr_t  *hdr = (ptl_hdr_t *)  buffer_addr; // case to ptl header format 
+        kibnal_rx_t krx;
+
+        CDEBUG(D_NET,"kibnal_rx: buf %p, len %ld\n", buffer_addr, buffer_len);
+
+        if ( buffer_len < sizeof( ptl_hdr_t ) ) {
+                /* XXX what's this for? */
+                if (kib->kib_shuttingdown)
+                        return;
+                CERROR("kibnal_rx: did not receive complete portal header, "
+                       "len= %ld", buffer_len);
+
+                return;
+        }
+
+       // typedef struct {
+       //         char             *krx_buffer; // pointer to receiving buffer
+       //         unsigned long     krx_len;  // length of buffer
+       //         unsigned int      krx_size; //
+       //         unsigned int      krx_priority; // do we need this
+       //         struct list_head  krx_item;
+       // } kibnal_rx_t;
+       //
+        krx.krx_buffer    = hdr;
+        krx.krx_len       = buffer_len;
+        krx.krx_size      = buffer_size;
+        krx.krx_priority  = priority;
+
+        if ( hdr->dest_nid == kibnal_lib.ni.nid ) {
+           // this is my data 
+           PROF_START(lib_parse);
+
+           lib_parse(&kibnal_lib, (ptl_hdr_t *)krx.krx_buffer, &krx);
+
+           PROF_FINISH(lib_parse);
+        } else {
+           /* forward to gateway */
+           // Do we expect this happened ?
+           //      
+           CERROR("kibnal_rx: forwarding not implemented yet");
+        }
+
+        return;
+}
+
+
+
+
+//
+// kibnal_recv_pages 
+//
+// no support 
+//
+// do you need this 
+//
+int
+kibnal_recv_pages(nal_cb_t * nal, 
+                  void *private, 
+                  lib_msg_t * cookie,
+                  unsigned int niov, 
+                  ptl_kiov_t *iov, 
+                  size_t mlen,
+                  size_t rlen)
+{
+
+  CDEBUG(D_NET, "recv_pages not implemented\n");
+  return PTL_OK;
+       
+}
+
+
+int 
+kibnal_recv(nal_cb_t     *nal,
+            void         *private,
+            lib_msg_t    *cookie,
+            unsigned int  niov,
+            struct iovec *iov,
+            size_t        mlen,
+            size_t        rlen)
+{
+        kibnal_rx_t *krx = private;
+
+        CDEBUG(D_NET,"kibnal_recv: mlen=%d, rlen=%d\n", mlen, rlen);
+
+        /* What was actually received must be >= what sender claims to
+         * have sent.  This is an LASSERT, since lib-move doesn't
+         * check cb return code yet. */
+        LASSERT (krx->krx_len >= sizeof (ptl_hdr_t) + rlen);
+        LASSERT (mlen <= rlen);
+
+        PROF_START(kibnal_recv);
+
+        if(mlen != 0) {
+                PROF_START(memcpy);
+                lib_copy_buf2iov (niov, iov, krx->krx_buffer +
+                                  sizeof (ptl_hdr_t), mlen);
+                PROF_FINISH(memcpy);
+        }
+
+        PROF_START(lib_finalize);
+        
+        lib_finalize(nal, private, cookie);
+        
+        PROF_FINISH(lib_finalize);
+        PROF_FINISH(kibnal_recv);
+
+        return rlen;
+}
+
+//
+// kibnal_map 
+// no support 
+// do you need this 
+//
+int kibnal_map(nal_cb_t * nal, 
+               unsigned int niov, 
+               struct iovec *iov,
+               void **addrkey)
+{
+  CDEBUG(D_NET, "map not implemented\n");
+  return PTL_OK; 
+}
+
+
+
+//
+// kibnal_unmap
+//
+// no support 
+//
+// do you need this 
+//
+void kibnal_unmap(nal_cb_t * nal, 
+                  unsigned int niov, 
+                  struct iovec *iov,
+                  void **addrkey)
+{
+  CDEBUG(D_NET, "unmap not implemented\n");
+  return;
+}
+
+
+
+//
+// kibnal_map_pages 
+// no support 
+// do you need this 
+/* as (un)map, but with a set of page fragments */
+int kibnal_map_pages(nal_cb_t * nal, 
+                     unsigned int niov, 
+                     ptl_kiov_t *iov,
+                     void **addrkey)
+{
+  CDEBUG(D_NET, "map_pages not implemented\n");
+  return PTL_OK;
+}
+
+
+
+//
+// kibnal_unmap_pages 
+//
+// no support 
+//
+// do you need this 
+//
+void kibnal_unmap_pages(nal_cb_t * nal, 
+                               unsigned int niov, 
+                               ptl_kiov_t *iov,
+                               void **addrkey)
+{
+  CDEBUG(D_NET, "unmap_pages not implemented\n");
+  return ;
+}
+
+
+int kibnal_end(kibnal_data_t *kib)
+{
+
+  /* wait for sends to finish ? */
+  /* remove receive buffers */
+  /* shutdown receive thread */
+
+  CDEBUG(D_NET, "kibnal_end\n");
+  IB_Close_HCA();
+
+  return 0;
+}
+
+
+//
+//
+//  asynchronous event handler: response to some unexpetced operation errors 
+//    
+//  void async_event_handler(VAPI_hca_hndl_t      hca_hndl,
+//                           VAPI_event_record_t *event_record_p,
+//                           void*                private_data)
+//  the HCA drive will prepare evetn_record_p                        
+//
+//  this handler is registered with VAPI_set_async_event_handler()
+//  VAPI_set_async_event_handler() is issued when an HCA is created 
+//
+//
+void async_event_handler(VAPI_hca_hndl_t      hca_hndl,
+                         VAPI_event_record_t *event_record_p,  
+                         void*                private_data)
+{
+  //
+  // * event_record_p is prepared by the system when an async
+  //   event happened
+  // * what to do with private_data 
+  // * do we expect more async events happened if so what are they 
+  //
+  //   only log ERROR message now 
+
+  switch (event_record_p->type) {
+    case VAPI_PORT_ERROR:
+         printk("Got PORT_ERROR event. port number=%d\n", 
+                 event_record_p->modifier.port_num);
+         break;
+    case VAPI_PORT_ACTIVE:
+         printk("Got PORT_ACTIVE event. port number=%d\n", 
+                 event_record_p->modifier.port_num);
+         break;
+    case VAPI_QP_PATH_MIGRATED:    /*QP*/
+         printk("Got P_PATH_MIGRATED event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_EEC_PATH_MIGRATED:   /*EEC*/
+         printk("Got EEC_PATH_MIGRATED event. eec_hndl=%d\n", 
+                 event_record_p->modifier.eec_hndl);
+         break;
+    case VAPI_QP_COMM_ESTABLISHED: /*QP*/
+         printk("Got QP_COMM_ESTABLISHED event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_EEC_COMM_ESTABLISHED: /*EEC*/
+         printk("Got EEC_COMM_ESTABLISHED event. eec_hndl=%d\n",
+                 event_record_p->modifier.eec_hndl);
+         break;
+    case VAPI_SEND_QUEUE_DRAINED:  /*QP*/
+         printk("Got SEND_QUEUE_DRAINED event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_CQ_ERROR:            /*CQ*/
+         printk("Got CQ_ERROR event. cq_hndl=%lu\n", 
+                 event_record_p->modifier.cq_hndl);
+         break;
+    case VAPI_LOCAL_WQ_INV_REQUEST_ERROR: /*QP*/
+         printk("Got LOCAL_WQ_INV_REQUEST_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR: /*QP*/
+         printk("Got LOCAL_WQ_ACCESS_VIOL_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR: /*QP*/
+         printk("Got LOCAL_WQ_CATASTROPHIC_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_PATH_MIG_REQ_ERROR:  /*QP*/
+         printk("Got PATH_MIG_REQ_ERROR event. qp_hndl=%lu\n", 
+                 event_record_p->modifier.qp_hndl);
+         break;
+    case VAPI_LOCAL_CATASTROPHIC_ERROR: /*none*/
+         printk("Got LOCAL_CATASTROPHIC_ERROR event. \n");
+         break;
+    default:
+         printk(":got non-valid event type=%d. IGNORING\n",
+                    event_record_p->type);
+  }
+
+}
+
+
+
+
+VAPI_wr_id_t 
+search_send_buf(int buf_length)
+{
+  VAPI_wr_id_t send_id = -1;
+  u_int32_t    i;
+  int          flag = NO;
+  int          loop_count = 0;  
+
+  CDEBUG(D_NET, "search_send_buf \n");
+  
+  while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) {
+    for(i=0; i < NUM_ENTRY; i++) {
+      // problem about using spinlock
+      spin_lock(&MSB_mutex[i]);
+      if(MSbuf_list[i].status == BUF_REGISTERED)  {
+        MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse
+        flag =  YES;
+        spin_unlock(&MSB_mutex[i]);
+        break;
+      }
+      else
+        spin_unlock(&MSB_mutex[i]); 
+    }
+
+    loop_count++;
+    schedule_timeout(200); // wait for a while 
+  }
+   
+  if(flag == NO)  {
+    CDEBUG(D_NET, "search_send_buf: could not locate an entry in MSbuf_list\n");
+  }
+
+  send_id = (VAPI_wr_id_t ) i;
+
+  return send_id;
+}
+
+
+
+VAPI_wr_id_t 
+search_RDMA_recv_buf(int buf_length)
+{
+  VAPI_wr_id_t recv_id = -1;
+  u_int32_t    i;
+  int          flag = NO;
+  int          loop_count = 0;  
+
+  CDEBUG(D_NET, "search_RDMA_recv_buf\n");
+
+  while((flag == NO) && (loop_count < MAX_LOOP_COUNT)) {
+
+    for(i=NUM_ENTRY; i < NUM_MBUF; i++) {
+
+      spin_lock(&MSB_mutex[i]);
+
+      if((MRbuf_list[i].status == BUF_REGISTERED)  &&
+         (MRbuf_list[i].buf_size >= buf_length)) {
+          MSbuf_list[i].status = BUF_INUSE;// make send buf as inuse
+          flag =  YES;
+          spin_unlock(&MSB_mutex[i]);
+          break;
+      }
+      else
+        spin_unlock(&MSB_mutex[i]);
+    }
+
+    loop_count++;
+
+    schedule_timeout(200); // wait for a while 
+  }
+   
+  if(flag == NO)  {
+    CERROR("search_RDMA_recv_buf: could not locate an entry in MBbuf_list\n");
+  }
+
+  recv_id = (VAPI_wr_id_t ) i;
+
+  return recv_id;
+
+}
+
+
+
+
+
+
+
+VAPI_ret_t Send_Small_Msg(char *buf, int buf_length)
+{
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info              *qp;
+ VAPI_wr_id_t         send_id;
+
+ CDEBUG(D_NET, "Send_Small_Msg\n");
+
+ send_id = search_send_buf(buf_length); 
+
+ if(send_id < 0){
+   CERROR("Send_Small_Msg: Can not find a QP \n");
+   return(~VAPI_OK);
+ }
+
+ qp = &QP_list[(int) send_id];
+
+ // find a suitable/registered send_buf from MSbuf_list
+ CDEBUG(D_NET, "Send_Small_Msg: current send id  %d \n", send_id);
+
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        =  send_id;
+
+
+ // scatter and gather info 
+ sr_sg.len  = buf_length;
+ sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR 
+
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr;
+
+ // copy data to register send buffer 
+ memcpy(&sr_sg.addr, buf, buf_length);
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used 
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ // call VAPI_post_sr to send out this data 
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+    CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+ CDEBUG(D_NET, "VAPI_post_sr success.\n");
+
+ return (vstat);
+
+}
+
+
+
+
+VAPI_wr_id_t
+RTS_handshaking_protocol(int buf_length) 
+{
+
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ VAPI_wr_id_t         send_id;
+
+ RDMA_Info_Exchange   rdma_info;
+
+ rdma_info.opcode     = Ready_To_send;
+ rdma_info.buf_length = buf_length; 
+ rdma_info.raddr      = (VAPI_virt_addr_t) 0;
+ rdma_info.rkey       = (VAPI_rkey_t) 0 ; 
+
+ QP_info              *qp;
+
+ CDEBUG(D_NET, "RTS_handshaking_protocol\n");
+
+ // find a suitable/registered send_buf from MSbuf_list
+ send_id = search_send_buf(sizeof(RDMA_Info_Exchange));   
+
+ qp = &QP_list[(int) send_id];
+
+ CDEBUG(D_NET, "RTS_CTS: current send id  %d \n", send_id);
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        = send_id + RDMA_RTS_ID;// this RTS mesage ID 
+
+ // scatter and gather info 
+ sr_sg.len  = sizeof(RDMA_Info_Exchange);
+ sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR 
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr;
+
+ // copy data to register send buffer 
+ memcpy(&sr_sg.addr, &rdma_info, sizeof(RDMA_Info_Exchange));
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used 
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ // call VAPI_post_sr to send out this RTS message data 
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+    CERROR("RTS: VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat));
+ }
+
+ return send_id;
+
+}
+
+
+
+// create local receiving Memory Region for a HCA
+VAPI_ret_t
+createMemRegion_RDMA(VAPI_hca_hndl_t  hca_hndl,
+                     VAPI_pd_hndl_t   pd_hndl,
+                     char            *bufptr,
+                     int              buf_length,
+                     VAPI_mr_hndl_t   *rep_mr_hndl,
+                     VAPI_mrw_t       *rep_mr)
+{
+  VAPI_ret_t      vstat;
+  VAPI_mrw_t      mrw;
+  
+  CDEBUG(D_NET, "createMemRegion_RDMA\n");
+
+  // memory region address and size of memory region
+  // allocate a block of memory for this HCA 
+  // RDMA data buffer
+  
+  
+  if(bufptr == NULL) {
+    // need to allcate a local buffer to receive data from a
+    // remore VAPI_RDMA_WRITE_IMM
+    PORTAL_ALLOC(bufptr, buf_length);
+  }
+
+  if(bufptr == NULL) {
+    CDEBUG(D_MALLOC, "Failed to malloc a block of RDMA receiving memory, size %d\n",
+                                    buf_length);
+    return(VAPI_ENOMEM);
+  }
+
+  /* Register RDAM data Memory region */
+  CDEBUG(D_NET, "Register a RDMA data memory region\n");
+
+  mrw.type   = VAPI_MR;
+  mrw.pd_hndl= pd_hndl;
+  mrw.start  = (VAPI_virt_addr_t )(MT_virt_addr_t )bufptr;
+  mrw.size   = buf_length;
+  mrw.acl    = VAPI_EN_LOCAL_WRITE  | 
+               VAPI_EN_REMOTE_WRITE | 
+               VAPI_EN_REMOTE_READ;
+
+  // register send memory region
+  vstat = VAPI_register_mr(hca_hndl,
+                           &mrw,
+                           rep_mr_hndl,
+                           rep_mr);
+
+  // this memory region is going to be reused until deregister is called
+  if (vstat != VAPI_OK) {
+     CERROR("Failed registering a mem region Addr=%p, Len=%d. %s\n",
+             bufptr, buf_length, VAPI_strerror(vstat));
+  }
+
+  return(vstat);
+
+}
+
+
+
+RDMA_Info_Exchange  Local_rdma_info;
+
+int insert_MRbuf_list(int buf_lenght)
+{
+  int  recv_id = NUM_ENTRY;      
+
+  CDEBUG(D_NET, "insert_MRbuf_list\n");
+
+  for(recv_id= NUM_ENTRY; recv_id < NUM_MBUF; recv_id++){
+       if(BUF_UNREGISTERED == MRbuf_list[recv_id].status)  {
+         MRbuf_list[recv_id].status   = BUF_UNREGISTERED;
+         MRbuf_list[recv_id].buf_size = buf_lenght;
+         break;
+       }
+  }
+
+  return recv_id;
+
+}  
+
+VAPI_wr_id_t
+CTS_handshaking_protocol(RDMA_Info_Exchange *rdma_info) 
+{
+
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info             *qp;
+ VAPI_wr_id_t         send_id;
+ VAPI_mr_hndl_t       rep_mr_hndl;
+ VAPI_mrw_t           rep_mr;
+ int                  recv_id;
+ char                *bufptr = NULL;
+
+ // search MRbuf_list for an available entry that
+ // has registered data buffer with size equal to rdma_info->buf_lenght
+
+ CDEBUG(D_NET, "CTS_handshaking_protocol\n");
+
+ // register memory buffer for RDAM operation
+
+ vstat = createMemRegion_RDMA(Hca_hndl,
+                              Pd_hndl,
+                              bufptr, 
+                              rdma_info->buf_length,
+                              &rep_mr_hndl,
+                              &rep_mr);
+
+
+ Local_rdma_info.opcode            = Clear_To_send;
+ Local_rdma_info.recv_rdma_mr      = rep_mr;
+ Local_rdma_info.recv_rdma_mr_hndl = rep_mr_hndl;
+
+ if (vstat != VAPI_OK) {
+    CERROR("CST_handshaking_protocol: Failed registering a mem region"
+           "Len=%d. %s\n", rdma_info->buf_length, VAPI_strerror(vstat));
+    Local_rdma_info.flag = RDMA_BUFFER_UNAVAILABLE;
+ }
+ else {
+    // successfully allcate reserved RDAM data buffer 
+    recv_id = insert_MRbuf_list(rdma_info->buf_length);   
+
+    if(recv_id >=  NUM_ENTRY) { 
+      MRbuf_list[recv_id].buf_addr     = rep_mr.start;
+      MRbuf_list[recv_id].mr           = rep_mr;
+      MRbuf_list[recv_id].mr_hndl      = rep_mr_hndl;
+      MRbuf_list[recv_id].ref_count    = 0;
+      Local_rdma_info.flag             = RDMA_BUFFER_RESERVED;
+      Local_rdma_info.buf_length       = rdma_info->buf_length; 
+      Local_rdma_info.raddr            = rep_mr.start;
+      Local_rdma_info.rkey             = rep_mr.r_key; 
+    }
+    else {
+      CERROR("Can not find an entry in MRbuf_list - how could this happen\n");  
+    }
+ }
+
+ // find a suitable/registered send_buf from MSbuf_list
+ send_id = search_send_buf(sizeof(RDMA_Info_Exchange)); 
+ CDEBUG(D_NET, "CTS: current send id  %d \n", send_id);
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+ sr_desc.id        = send_id + RDMA_CTS_ID; // this CST message ID 
+
+ // scatter and gather info 
+ sr_sg.len  = sizeof(RDMA_Info_Exchange);
+ sr_sg.lkey = MSbuf_list[send_id].mr.l_key; // use send MR 
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[send_id].buf_addr;
+
+ // copy data to register send buffer 
+ memcpy(&sr_sg.addr, &Local_rdma_info, sizeof(RDMA_Info_Exchange));
+
+ sr_desc.sg_lst_p   = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used 
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+ // call VAPI_post_sr to send out this RTS message data 
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+    CERROR("CTS: VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+
+}
+
+
+
+VAPI_ret_t Send_Large_Msg(char *buf, int buf_length)
+{
+  VAPI_ret_t           vstat;
+  VAPI_sr_desc_t       sr_desc;
+  VAPI_sg_lst_entry_t  sr_sg;
+  QP_info             *qp;
+  VAPI_mrw_t           rep_mr; 
+  VAPI_mr_hndl_t       rep_mr_hndl;
+  int                  send_id;
+  VAPI_imm_data_t      imm_data = 0XAAAA5555;
+
+
+  CDEBUG(D_NET, "Send_Large_Msg: Enter\n");
+
+  // register this large buf 
+  // don't need to copy this buf to send buffer
+  vstat = createMemRegion_RDMA(Hca_hndl,
+                               Pd_hndl,
+                               buf,
+                               buf_length,
+                               &rep_mr_hndl,
+                               &rep_mr);
+
+  if (vstat != VAPI_OK) {
+    CERROR("Send_Large_M\sg:  createMemRegion_RDMAi() failed (%s).\n",
+                        VAPI_strerror(vstat));
+  }
+  
+
+  Local_rdma_info.send_rdma_mr      = rep_mr;
+  Local_rdma_info.send_rdma_mr_hndl = rep_mr_hndl;
+
+  //
+  //     Prepare descriptor for send queue
+  //
+  // ask for a remote rdma buffer with size buf_lenght
+  send_id = RTS_handshaking_protocol(buf_length); 
+
+  qp = &QP_list[send_id];
+
+  // wait for CTS message receiving from remote node 
+  while(1){
+     if(YES == Cts_Message_arrived) {
+        // receive CST message from remote node 
+        // Rdma_info is available for use
+        break;
+     }
+     schedule_timeout(RTS_CTS_TIMEOUT);
+  }
+  
+  sr_desc.id        = send_id + RDMA_OP_ID;
+  sr_desc.opcode    = VAPI_RDMA_WRITE_WITH_IMM;
+  sr_desc.comp_type = VAPI_SIGNALED;
+
+  // scatter and gather info 
+  sr_sg.len  = buf_length;
+
+  // rdma mr 
+  sr_sg.lkey = rep_mr.l_key;  
+  sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) rep_mr.start;
+  sr_desc.sg_lst_p = &sr_sg;
+  sr_desc.sg_lst_len = 1; // only 1 entry is used 
+
+  // immediate data - not used here 
+  sr_desc.imm_data = imm_data;
+  sr_desc.fence = TRUE;
+  sr_desc.set_se = FALSE;
+
+  // RDAM operation only
+  // raddr and rkey is receiving from remote node  
+  sr_desc.remote_addr = Rdma_info.raddr;
+  sr_desc.r_key       = Rdma_info.rkey;
+
+  // call VAPI_post_sr to send out this data 
+  vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+  if (vstat != VAPI_OK) {
+     CERROR("VAPI_post_sr failed (%s).\n",VAPI_strerror_sym(vstat));
+  }
+
+}
+
+
+
+
+
+
+//
+//  repost_recv_buf
+//  post a used recv buffer back to recv WQE list 
+//  wrq_id is used to indicate the starting position of recv-buffer 
+//
+VAPI_ret_t 
+repost_recv_buf(QP_info      *qp,
+                VAPI_wr_id_t  wrq_id) 
+{
+  VAPI_rr_desc_t       rr;
+  VAPI_sg_lst_entry_t  sg_entry;
+  VAPI_ret_t           ret;
+
+  CDEBUG(D_NET, "repost_recv_buf\n");
+
+  sg_entry.lkey = MRbuf_list[wrq_id].mr.l_key;
+  sg_entry.len  = MRbuf_list[wrq_id].buf_size;
+  sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[wrq_id].buf_addr;
+  rr.opcode     = VAPI_RECEIVE;
+  rr.comp_type  = VAPI_SIGNALED; /* All with CQE (IB compliant) */
+  rr.sg_lst_len = 1; /* single buffers */
+  rr.sg_lst_p   = &sg_entry;
+  rr.id         = wrq_id; /* WQE id used is the index to buffers ptr array */
+
+  ret= VAPI_post_rr(qp->hca_hndl,qp->qp_hndl,&rr);
+     
+  if (ret != VAPI_OK){
+     CERROR("failed reposting RQ WQE (%s) buffer \n",VAPI_strerror_sym(ret));
+     return ret;
+  }
+
+  CDEBUG(D_NET, "Successfully reposting an RQ WQE %d recv bufer \n", wrq_id);
+
+  return ret ;
+}
+                       
+//
+// post_recv_bufs
+//     post "num_o_bufs" for receiving data
+//      each receiving buf (buffer starting address, size of buffer)
+//      each buffer is associated with an id 
+//
+int 
+post_recv_bufs(VAPI_wr_id_t  start_id)
+{
+  int i;
+  VAPI_rr_desc_t       rr;
+  VAPI_sg_lst_entry_t  sg_entry;
+  VAPI_ret_t           ret;
+
+  CDEBUG(D_NET, "post_recv_bufs\n");
+
+  for(i=0; i< NUM_ENTRY; i++) {
+    sg_entry.lkey = MRbuf_list[i].mr.l_key;
+    sg_entry.len  = MRbuf_list[i].buf_size;
+    sg_entry.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MRbuf_list[i].buf_addr;
+    rr.opcode     = VAPI_RECEIVE;
+    rr.comp_type  = VAPI_SIGNALED;  /* All with CQE (IB compliant) */
+    rr.sg_lst_len = 1; /* single buffers */
+    rr.sg_lst_p   = &sg_entry;
+    rr.id         = start_id+i; /* WQE id used is the index to buffers ptr array */
+
+    ret= VAPI_post_rr(QP_list[i].hca_hndl,QP_list[i].qp_hndl, &rr);
+    if (ret != VAPI_OK) {
+       CERROR("failed posting RQ WQE (%s)\n",VAPI_strerror_sym(ret));
+       return i;
+    } 
+  }
+
+  return i; /* num of buffers posted */
+}
+                       
+int 
+post_RDMA_bufs(QP_info      *qp, 
+               void         *buf_array,
+               unsigned int  num_bufs,
+               unsigned int  buf_size,
+               VAPI_wr_id_t  start_id)
+{
+
+  CDEBUG(D_NET, "post_RDMA_bufs \n");
+  return YES;
+}
+
+
+
+//
+// LIB NAL
+// assign function pointers to theirs corresponding entries
+//
+
+nal_cb_t kibnal_lib = {
+        nal_data:       &kibnal_data,  /* NAL private data */
+        cb_send:        kibnal_send,
+        cb_send_pages:  NULL, // not implemented  
+        cb_recv:        kibnal_recv,
+        cb_recv_pages:  NULL, // not implemented 
+        cb_read:        kibnal_read,
+        cb_write:       kibnal_write,
+        cb_callback:    NULL, // not implemented 
+        cb_malloc:      kibnal_malloc,
+        cb_free:        kibnal_free,
+        cb_map:         NULL, // not implemented 
+        cb_unmap:       NULL, // not implemented 
+        cb_map_pages:   NULL, // not implemented 
+        cb_unmap_pages: NULL, // not implemented 
+        cb_printf:      kibnal_printf,
+        cb_cli:         kibnal_cli,
+        cb_sti:         kibnal_sti,
+        cb_dist:        kibnal_dist // no used at this moment 
+};
diff --git a/lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c b/lustre/portals/knals/ibnal/ibnal_send_recv_self_testing.c
new file mode 100644 (file)
index 0000000..82defdb
--- /dev/null
@@ -0,0 +1,116 @@
+/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
+ * vim:expandtab:shiftwidth=8:tabstop=8:
+ *  *
+ *  * Based on ksocknal, qswnal, and gmnal
+ *  *
+ *  * Copyright (C) 2003 LANL
+ *  *   Author: HB Chen <hbchen@lanl.gov>
+ *  *   Los Alamos National Lab
+ *  *
+ *  *   Portals is free software; you can redistribute it and/or
+ *  *   modify it under the terms of version 2 of the GNU General Public
+ *  *   License as published by the Free Software Foundation.
+ *  *
+ *  *   Portals is distributed in the hope that it will be useful,
+ *  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  *   GNU General Public License for more details.
+ *  *
+ *  *   You should have received a copy of the GNU General Public License
+ *  *   along with Portals; if not, write to the Free Software
+ *  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *  *
+ *  */
+
+#include "ibnal.h"
+
+
+
+VAPI_ret_t ibnal_send_recv_self_testing()
+{
+ VAPI_ret_t           vstat;
+ VAPI_sr_desc_t       sr_desc;
+ VAPI_sg_lst_entry_t  sr_sg;
+ QP_info              *qp;
+ VAPI_wr_id_t         send_id;
+ int                  buf_id;
+ char                 sbuf[KB_32];
+ char                 rbuf[KB_32];
+ int                  i;
+ int                  buf_length = KB_32;
+ VAPI_wc_desc_t       comp_desc;
+ int                  num_send = 1;
+ int                  loop_count = 0;
+
+
+ printk("ibnal_send_recv_self_testing\n");
+
+ memset(&sbuf, 'a', KB_32);
+ memset(&rbuf, ' ', KB_32);
+ send_id = 2222; 
+ buf_id = 0;
+
+ qp = &QP_list[0];
+
+ sr_desc.opcode    = VAPI_SEND;
+ sr_desc.comp_type = VAPI_SIGNALED;
+
+ // scatter and gather info
+ sr_sg.len  = KB_32;
+ sr_sg.lkey = MSbuf_list[buf_id].mr.l_key; // use send MR
+ sr_sg.addr = (VAPI_virt_addr_t)(MT_virt_addr_t) MSbuf_list[buf_id].buf_addr;
+
+ // copy data to register send buffer
+ memcpy(&sr_sg.addr, &buf, buf_length);
+
+ sr_desc.sg_lst_p = &sr_sg;
+ sr_desc.sg_lst_len = 1; // only 1 entry is used
+ sr_desc.fence = TRUE;
+ sr_desc.set_se = FALSE;
+
+
+ // call VAPI_post_sr to send out this data
+ vstat = VAPI_post_sr(qp->hca_hndl, qp->qp_hndl, &sr_desc);
+
+ if (vstat != VAPI_OK) {
+   printk("VAPI_post_sr failed (%s).\n",VAPI_strerror(vstat));
+ }
+
+ printk("VAPI_post_sr success.\n");
+
+ // poll for completion
+
+ while( loop_count < 100 ){
+   vstat = VAPI_poll_cq(qp->hca_hndl, qp->cq_hndl, &comp_desc);
+   if( vstat == VAPI_OK ) {
+       if(comp_desc.opcode == VAPI_CQE_SQ_SEND_DATA ) {
+          /* SEND completion */
+         printk("received SQ completion\n");
+       }
+       else { 
+          if(comp_desc.opcode == VAPI_CQE_RQ_SEND_DATA ) {
+           /* RECEIVE completion */
+            printk("received RQ completion\n");
+            memcpy(&rbuf, (char *) MRbuf_list[buf_id].buf_addar, KB_32);
+           
+           int n;
+
+           n = memcmp($sbuf, &rbuf, KB_32);
+           printk("compare sbuf and rbuf  n = %d\n", n); 
+           
+          }
+                 else  {
+            printk("unexpected completion opcode %d \n", comp_desc.opcode);
+         }
+       }
+   }
+
+   loop_count++; 
+   schedule_timeout(500);
+ }
+
+ printk("end of ibnal_self_send_recv_testing\n");
+
+
+}
diff --git a/lustre/portals/knals/ibnal/uagent.c b/lustre/portals/knals/ibnal/uagent.c
new file mode 100644 (file)
index 0000000..d7e939a
--- /dev/null
@@ -0,0 +1,391 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+
+#include <linux/shm.h>
+#include <linux/ipc.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+
+// Infiniband VAPI/EVAPI header files Mellanox MT23108 VAPI
+#include <vapi.h>
+#include <vapi_types.h>
+#include <vapi_common.h>
+#include <evapi.h>
+
+// Remote HCA Info information
+ typedef struct Remote_HCA_Info {
+       unsigned long     opcode;
+       unsigned long     length;
+       IB_lid_t          dlid[256];
+       VAPI_qp_num_t     rqp_num[256];
+       VAPI_rkey_t       rkey;   // for remote RDAM request
+       unsigned long     vaddr1; // virtual address fisrt 4 bytes
+       unsigned long     vaddr2; // virtual address second 4 bytes
+       u_int32_t         size;   // size of RDMA memory buffer
+       char              dest_ip[256]; //destination server IP address 
+ } Remote_HCA_Info;
+
+#define SHARED_SEGMENT_SIZE  0x10000 // 16KB shared memory between U and K
+
+// some internals opcodes for IB operations used in IBNAL
+#define SEND_QP_INFO          0X00000001
+#define RECV_QP_INFO          0X00000010
+#define DEFAULT_SOCKET_PORT   11211 
+#define LISTEN_QUEUE_SIZE     2048 
+#define DEST_IP                      "10.128.105.26"
+
+// server_thread
+// + wait for an incoming connection from remote node 
+// + receive remote HCA's data 
+//
+//
+//
+//
+// 
+void *server_thread(void *vargp)
+{
+  Remote_HCA_Info   *hca_data;
+  Remote_HCA_Info   hca_data_buffer;
+  
+  int    serverfd;
+  int    infd;
+  struct hostent  *hp;
+  struct sockaddr_in serveraddr;
+  struct sockaddr_in clientaddr;
+  int    sin_size=sizeof(struct sockaddr_in);
+  int   bytes_recv;
+  int    i;
+
+
+  hca_data = (Remote_HCA_Info *) vargp;
+  
+  if((serverfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+    printf("server_thread couldnot create a socket \n");
+    pthread_exit((void *) 0);
+  }
+  printf("server_thread create a socket \n");
+
+  bzero((char *) &serveraddr, sizeof(serveraddr));
+
+  serveraddr.sin_family = AF_INET;
+  serveraddr.sin_addr.s_addr = htons(INADDR_ANY);
+  serveraddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT);
+  
+  if(bind(serverfd,(struct sockaddr *)&serveraddr,sizeof(struct sockaddr)) < 0) {
+    printf("server_thread couldnot bind to a socket \n");
+    pthread_exit((void *) 0);
+  }
+
+  printf("server_thread bind to a socket \n");
+
+  if(listen(serverfd, LISTEN_QUEUE_SIZE) < 0) {
+    printf("server_thread couldnot listen to a socket \n");
+    pthread_exit((void *) 0);
+  }
+
+  printf("server_thread listen to a socket \n");
+
+  //
+  // I only expect to receive one HCA data from a remote HCA 
+  //
+  printf("server_thread: Waiting for a connection\n");
+  infd= accept(serverfd,(struct sockaddr*)&clientaddr,&sin_size);
+  printf("server_thread: Got an incoming connection");
+
+  /* receive data from socket into buffer */
+  bytes_recv = recv(infd,
+                    &hca_data_buffer,  
+                    sizeof(Remote_HCA_Info),
+                   0);
+
+  if(bytes_recv > 0) {
+/*       
+      printf("server_thread receive data\n");
+      printf("opcode is 0x%X\n", hca_data_buffer.opcode);
+      printf("length is 0x%X\n", hca_data_buffer.length);
+
+      for(i=0; i < 256; i++) {
+        printf("dlid %d is 0x%X\n", i, hca_data_buffer.dlid[i]);
+        printf("rqp_num %d is 0x%X\n", hca_data_buffer.rqp_num[i]);
+      }
+
+      printf("rkey is 0x%X\n", hca_data_buffer.rkey);
+      printf("vaddr1 is 0x%X\n", hca_data_buffer.vaddr1);
+      printf("vaddr2 is 0x%X\n", hca_data_buffer.vaddr2);
+      printf("size is 0x%X\n", hca_data_buffer.size);
+      printf("After conversion hton \n");
+      printf("opcode is 0x%X\n", htonl(hca_data_buffer.opcode));
+      printf("length is 0x%X\n", htonl(hca_data_buffer.length));
+
+      for(i=0; i < 256; i++) {
+        printf("dlid %d is 0x%X\n", htons(hca_data_buffer.dlid[i]));
+        printf("rqp_num %d is 0x%X\n", htonl(hca_data_buffer.rqp_num[i]));
+      }
+
+      printf("rkey is 0x%X\n", htonl(hca_data_buffer.rkey));
+      printf("vaddr1 is 0x%X\n", htonl(hca_data_buffer.vaddr1));
+      printf("vaddr2 is 0x%X\n", htonl(hca_data_buffer.vaddr2));
+      printf("size is 0x%X\n", htonl(hca_data_buffer.size));
+*/     
+
+      hca_data->opcode  = ntohl(hca_data_buffer.opcode); // long 
+      hca_data->length  = ntohl(hca_data_buffer.length); // long
+
+      for(i=0; i < 256; i++) {
+        hca_data->dlid[i]    = ntohs(hca_data_buffer.dlid[i]);   // u_int16
+        hca_data->rqp_num[i] = ntohl(hca_data_buffer.rqp_num[i]);// u_int32
+      }
+
+      hca_data->rkey    = ntohl(hca_data_buffer.rkey);   // u_int32
+      hca_data->vaddr1  = ntohl(hca_data_buffer.vaddr1); // first word u_int32
+      hca_data->vaddr2  = ntohl(hca_data_buffer.vaddr2); // second word u_int32
+      hca_data->size    = ntohl(hca_data_buffer.size);   // u_int32
+    }
+    else {
+      printf("server_thread receive ERROR bytes_recv = %d\n", bytes_recv);
+    }
+
+    close(infd);
+    close(serverfd);
+
+  printf("server_thread EXIT \n");
+      
+  pthread_exit((void *) 0);
+
+}
+
+//
+// client_thread 
+// + connect to a remote server_thread
+// + send local HCA's data to remote server_thread
+//
+void *client_thread(void *vargp)
+{
+
+  Remote_HCA_Info   *hca_data;
+  Remote_HCA_Info   hca_data_buffer;
+
+  int    clientfd;
+  struct hostent  *hp;
+  struct sockaddr_in clientaddr;
+  int    bytes_send;
+  int    i;
+  
+  hca_data = (Remote_HCA_Info *) vargp;
+
+  if((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
+    printf("client_thread couldnot create a socket \n");
+    pthread_exit((void *) 0);
+  }
+  printf("client_thread create a socket \n");
+  
+  bzero((char *) &clientaddr, sizeof(clientaddr));
+
+  clientaddr.sin_family = AF_INET;
+  clientaddr.sin_addr.s_addr = inet_addr(hca_data->dest_ip);
+  printf("client_thread get server Ip address = %s\n", hca_data->dest_ip);
+  clientaddr.sin_port = htons((unsigned short) DEFAULT_SOCKET_PORT);
+  memset(&(clientaddr.sin_zero), '\0', 8);
+
+  connect(clientfd, (struct sockaddr *) &clientaddr, sizeof(struct sockaddr));
+
+  printf("client_thread connect to  server Ip address = %s\n", hca_data->dest_ip);
+
+  hca_data_buffer.opcode  = htonl(hca_data->opcode); // long 
+  hca_data_buffer.length  = htonl(hca_data->length); // long
+
+  for(i=0; i < 256; i++) {
+    hca_data_buffer.dlid[i]    = htons(hca_data->dlid[i]);   // u_int16
+    hca_data_buffer.rqp_num[i] = htonl(hca_data->rqp_num[i]);// u_int32
+  }
+
+  hca_data_buffer.rkey    = htonl(hca_data->rkey);   // u_int32
+  hca_data_buffer.vaddr1  = htonl(hca_data->vaddr1); // first word u_int32
+  hca_data_buffer.vaddr2  = htonl(hca_data->vaddr2); // second word u_int32
+  hca_data_buffer.size    = htonl(hca_data->size);   // u_int32
+  bytes_send = send(clientfd, & hca_data_buffer, sizeof(Remote_HCA_Info), 0); 
+  
+  if(bytes_send == sizeof(Remote_HCA_Info)) {
+    printf("client_thread: send successfully \n");
+  }
+  else {
+    printf("client_thread: send failed \n");
+  }
+
+  printf("client_thread EXIT \n");
+
+  pthread_exit((void *) 0);
+}
+
+
+//
+//  main 
+//  + create a shared-memory between this main()/user address and
+//    a kernel thread/kernel address space associated with inbal 
+//    kernel module 
+//  + access local HCA's data through this shared memory 
+//
+//  + create a server_thread for receiving remote HCA's data
+//  + create a client_thread for sending out local HCA's data
+//  + after receiving remote HCA's data update this shared memory
+//
+int  main(int argc , char *argv[])
+{
+  int              segment_id;
+  struct shmid_ds  shmbuffer;
+  int              segment_size;
+  const int        shared_segment_size = sizeof(Remote_HCA_Info);
+  key_t            key = 999;
+  unsigned long    raddr;
+  Remote_HCA_Info  *shared_memory;
+  Remote_HCA_Info  exchange_hca_data;
+  Remote_HCA_Info  remote_hca_data;
+  int i; 
+
+  /* pthread */
+  pthread_t          sid;
+  pthread_t          cid;
+  pthread_attr_t     attr; 
+  int                rc, status;
+
+  char dest_ip[256];
+
+  if(argc != 2) {
+         printf("USAGE:   uagent   server_ip_address\n");
+         printf("argc = %d \n", argc);
+         exit(1);
+  }
+
+  strcpy(&exchange_hca_data.dest_ip[0], argv[1]);
+  printf("the destinational server IP address = %s\n", 
+                                      &exchange_hca_data.dest_ip); 
+
+  segment_id =  shmget(key, shared_segment_size, IPC_CREAT | 0666);
+
+  printf("sys_shmget is done segment_id = %d\n", segment_id);
+
+  shared_memory = (Remote_HCA_Info *) shmat(segment_id, 0, 0);
+
+  if(shared_memory == (char *) -1) {
+    printf("Shared memory attach failed shared_memory=%p\n",shared_memory);
+    exit(0);
+  }
+
+  printf("shared menory attached at address %p\n", shared_memory);
+
+  while (1) {
+    if(shared_memory->opcode ==  SEND_QP_INFO) {
+      printf("Local HCA data received from kernel thread\n");
+      break;
+    }
+    usleep(1000);
+    continue;
+  }
+
+  printf("Local HCA data received from kernel thread\n");
+
+  // save local HCA's data in exchange_hca_data
+  //
+  exchange_hca_data.opcode  = shared_memory->opcode;
+  exchange_hca_data.length  = shared_memory->length;
+
+  for(i=0; i < 256; i++) {
+    exchange_hca_data.dlid[i]    = shared_memory->dlid[i];
+    exchange_hca_data.rqp_num[i] = shared_memory->rqp_num[i];
+  }
+
+  exchange_hca_data.rkey    = shared_memory->rkey;
+  exchange_hca_data.vaddr1  = shared_memory->vaddr1;
+  exchange_hca_data.vaddr2  = shared_memory->vaddr2;
+  exchange_hca_data.size    = shared_memory->size;
+
+  /* Initialize and set thread detached attribute */
+  pthread_attr_init(&attr);
+  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+  /* create a server thread for procsssing incoming remote node socket data */
+  // 
+  pthread_create(&sid, 
+                 &attr, 
+                 server_thread,
+                 (Remote_HCA_Info *) &remote_hca_data);
+
+  printf("Main: created a server thread \n");
+
+  sleep(10);
+  
+  /* create a clint thread to send out local HCA data to remote node */
+  pthread_create(&cid, 
+                 &attr, 
+                 client_thread,
+                 (Remote_HCA_Info *) &exchange_hca_data);
+
+  printf("Main: created a client  thread \n");
+
+  /* synchronization between server_thread and client_thread */
+  pthread_attr_destroy(&attr);
+
+  rc = pthread_join(sid, (void **) &status);
+  if(rc) {
+    printf("Error: return code from pthread_join() is %d\n", rc);
+    exit(-1);
+  }
+
+  printf("completed join with thread %d status = %d\n", sid, status);
+
+  rc = pthread_join(cid, (void **) &status);
+  if(rc) {
+    printf("Error: return code from pthread_join() is %d\n", rc);
+    exit(-1);
+  }
+  printf("completed join with thread %d status = %d\n", cid, status);
+
+  // update shared memory with remote HCA's data 
+
+  shared_memory->opcode = RECV_QP_INFO;
+  shared_memory->length = remote_hca_data.length;
+  for(i=0; i < 256; i++) {
+    shared_memory->dlid[i]   = remote_hca_data.dlid[i];
+    shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i];
+  }
+  shared_memory->rkey   = remote_hca_data.rkey;
+  shared_memory->vaddr1 = remote_hca_data.vaddr1;
+  shared_memory->vaddr2 = remote_hca_data.vaddr2;
+  shared_memory->size   = remote_hca_data.size;
+
+  sleep(5);
+
+  shared_memory->opcode = RECV_QP_INFO;
+  shared_memory->length = remote_hca_data.length;
+  for(i=0; i < 256; i++) {
+    shared_memory->dlid[i]   = remote_hca_data.dlid[i];
+    shared_memory->rqp_num[i]= remote_hca_data.rqp_num[i];
+  }
+  
+  shared_memory->rkey   = remote_hca_data.rkey;
+  shared_memory->vaddr1 = remote_hca_data.vaddr1;
+  shared_memory->vaddr2 = remote_hca_data.vaddr2;
+  shared_memory->size   = remote_hca_data.size;
+
+  sleep(10);
+  
+//  shmdt(shared_memory);
+   
+  printf("uagent is DONE \n");
+  
+
+  exit(0);
+
+}
+
index aca06a6..8b02d26 100644 (file)
@@ -930,6 +930,7 @@ char *portals_nid2str(int nal, ptl_nid_t nid, char *str)
                 break;
         case QSWNAL:
         case GMNAL:
+        case IBNAL:
         case TOENAL:
         case SCIMACNAL:
                 sprintf(str, "%u:%u", (__u32)(nid >> 32), (__u32)nid);
index d233903..a15ce6a 100644 (file)
@@ -320,6 +320,8 @@ kportal_get_ni (int nal)
                 return  (PORTAL_SYMBOL_GET(ktoenal_ni));
         case GMNAL:
                 return  (PORTAL_SYMBOL_GET(kgmnal_ni));
+        case IBNAL:
+                return  (PORTAL_SYMBOL_GET(kibnal_ni));
         case TCPNAL:
                 /* userspace NAL */
                 return (NULL);
@@ -350,6 +352,9 @@ kportal_put_ni (int nal)
         case GMNAL:
                 PORTAL_SYMBOL_PUT(kgmnal_ni);
                 break;
+        case IBNAL:
+                PORTAL_SYMBOL_PUT(kibnal_ni);
+                break;
         case TCPNAL:
                 /* A lesson to a malicious caller */
                 LBUG ();
index f77a439..020a2a9 100644 (file)
@@ -26,7 +26,7 @@
 #include <portals/api-support.h>
 
 int ptl_init;
-unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL);
+unsigned int portal_subsystem_debug = ~0 - (S_PORTALS | S_QSWNAL | S_SOCKNAL | S_GMNAL | S_IBNAL);
 unsigned int portal_debug = ~0;
 unsigned int portal_cerror = 1;
 unsigned int portal_printk;
index af34cba..5309eb4 100644 (file)
@@ -66,6 +66,7 @@ static name2num_t nalnames[] = {
         {"toe",                TOENAL},
         {"elan",       QSWNAL},
         {"gm",         GMNAL},
+        {"ib",         IBNAL},
         {"scimac",      SCIMACNAL},
         {NULL,         -1}
 };