1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2004 Cluster File Systems, Inc.
5 * Author: Eric Barton <eric@bartonsoftware.com>
6 * Author: Frank Zago <fzago@systemfabricworks.com>
8 * This file is part of Lustre, http://www.lustre.org.
10 * Lustre is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * Lustre is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with Lustre; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 # define EXPORT_SYMTAB
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/kernel.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/errno.h>
36 #include <linux/smp_lock.h>
37 #include <linux/unistd.h>
38 #include <linux/uio.h>
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
44 #include <linux/init.h>
46 #include <linux/file.h>
47 #include <linux/stat.h>
48 #include <linux/list.h>
49 #include <linux/kmod.h>
50 #include <linux/sysctl.h>
52 #define DEBUG_SUBSYSTEM S_IBNAL
54 #define IBNAL_CHECK_ADVERT
56 #include <libcfs/kp30.h>
57 #include <portals/p30.h>
58 #include <portals/lib-p30.h>
59 #include <portals/nal.h>
68 #define CDEBUG(mask, format, a...) printk(KERN_INFO "%s:%d - " format, __func__, __LINE__,##a)
78 #define GCC_VERSION (__GNUC__ * 10000 \
79 + __GNUC_MINOR__ * 100 \
80 + __GNUC_PATCHLEVEL__)
82 /* Test for GCC > 3.2.2 */
83 #if GCC_VERSION <= 30202
84 /* GCC 3.2.2, and presumably several versions before it, will
85 * miscompile this driver. See
86 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9853. */
87 #error Invalid GCC version. Must use GCC >= 3.2.3
90 #define IBNAL_SERVICE_NAME "vibnal"
91 #define IBNAL_SERVICE_NUMBER 0x11b9a2 /* TODO */
94 # define IBNAL_N_SCHED num_online_cpus() /* # schedulers */
96 # define IBNAL_N_SCHED 1 /* # schedulers */
99 #define IBNAL_MIN_RECONNECT_INTERVAL HZ /* first failed connection retry... */
100 #define IBNAL_MAX_RECONNECT_INTERVAL (60*HZ) /* ...exponentially increasing to this */
102 #define IBNAL_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
104 #define IBNAL_MSG_QUEUE_SIZE 8 /* # messages/RDMAs in-flight */
105 #define IBNAL_CREDIT_HIGHWATER 7 /* when to eagerly return credits */
107 /* 7 indicates infinite retry attempts, Infinicon recommended 5 */
108 #define IBNAL_RETRY 5 /* # times to retry */
109 #define IBNAL_RNR_RETRY 5 /* */
110 #define IBNAL_CM_RETRY 5 /* # times to retry connection */
112 #define IBNAL_FLOW_CONTROL 1
113 #define IBNAL_ACK_TIMEOUT 20 /* supposedly 4 secs */
115 #define IBNAL_NTX 64 /* # tx descs */
116 /* this had to be dropped down so that we only register < 255 pages per
117 * region. this will change if we register all memory. */
118 #define IBNAL_NTX_NBLK 128 /* # reserved tx descs */
120 #define IBNAL_PEER_HASH_SIZE 101 /* # peer lists */
122 #define IBNAL_RESCHED 100 /* # scheduler loops before reschedule */
124 #define IBNAL_CONCURRENT_PEERS 1000 /* # nodes all talking at once to me */
126 /* default vals for runtime tunables */
127 #define IBNAL_IO_TIMEOUT 50 /* default comms timeout (seconds) */
129 /************************/
130 /* derived constants... */
132 /* TX messages (shared by all connections) */
133 #define IBNAL_TX_MSGS (IBNAL_NTX + IBNAL_NTX_NBLK)
134 #define IBNAL_TX_MSG_BYTES (IBNAL_TX_MSGS * IBNAL_MSG_SIZE)
135 #define IBNAL_TX_MSG_PAGES ((IBNAL_TX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
137 #define IBNAL_TX_MAX_SG (PTL_MD_MAX_IOV + 1)
139 /* RX messages (per connection) */
140 #define IBNAL_RX_MSGS IBNAL_MSG_QUEUE_SIZE
141 #define IBNAL_RX_MSG_BYTES (IBNAL_RX_MSGS * IBNAL_MSG_SIZE)
142 #define IBNAL_RX_MSG_PAGES ((IBNAL_RX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
145 /* we may have up to 2 completions per transmit +
146 1 completion per receive, per connection */
147 #define IBNAL_CQ_ENTRIES ((2*IBNAL_TX_MSGS) + \
148 (IBNAL_RX_MSGS * IBNAL_CONCURRENT_PEERS))
150 #define IBNAL_RDMA_BASE 0x0eeb0000
152 #define IBNAL_WHOLE_MEM 1
153 #define IBNAL_CKSUM 0
155 /* Starting sequence number. */
156 #define IBNAL_STARTING_PSN 0x465A
158 /* Timeout for SA requests, in seconds */
159 #define GSI_TIMEOUT 5
164 int kib_io_timeout; /* comms timeout (seconds) */
165 struct ctl_table_header *kib_sysctl; /* sysctl interface */
168 /* some of these have specific types in the stack that just map back
169 * to the uFOO types, like IB_{L,R}_KEY. */
172 int ibp_npages; /* # pages */
173 int ibp_mapped; /* mapped? */
174 __u64 ibp_vaddr; /* mapped region vaddr */
175 __u32 ibp_lkey; /* mapped region lkey */
176 __u32 ibp_rkey; /* mapped region rkey */
177 vv_mem_reg_h_t ibp_handle; /* mapped region handle */
178 struct page *ibp_pages[0];
183 vv_mem_reg_h_t md_handle;
187 } kib_md_t __attribute__((packed));
191 /* initialisation state. These values are sorted by their initialization order. */
210 __u64 kib_incarnation; /* which one am I */
211 int kib_shutdown; /* shut down? */
212 atomic_t kib_nthreads; /* # live threads */
214 __u64 kib_service_id; /* service number I listen on */
215 vv_gid_t kib_port_gid; /* port GID in HOST ORDER! */
216 vv_p_key_t kib_port_pkey; /* my pkey */
217 ptl_nid_t kib_nid; /* my NID */
218 struct semaphore kib_nid_mutex; /* serialise NID ops */
219 cm_cep_handle_t kib_cep; /* connection end point */
221 rwlock_t kib_global_lock; /* stabilize peer/conn ops */
223 struct list_head *kib_peers; /* hash table of all my known peers */
224 int kib_peer_hash_size; /* size of kib_peers */
225 atomic_t kib_npeers; /* # peers extant */
226 atomic_t kib_nconns; /* # connections extant */
228 struct list_head kib_connd_conns; /* connections to progress */
229 struct list_head kib_connd_peers; /* peers waiting for a connection */
230 wait_queue_head_t kib_connd_waitq; /* connection daemons sleep here */
231 unsigned long kib_connd_waketime; /* when connd will wake */
232 spinlock_t kib_connd_lock; /* serialise */
234 wait_queue_head_t kib_sched_waitq; /* schedulers sleep here */
235 struct list_head kib_sched_txq; /* tx requiring attention */
236 struct list_head kib_sched_rxq; /* rx requiring attention */
237 spinlock_t kib_sched_lock; /* serialise */
239 struct kib_tx *kib_tx_descs; /* all the tx descriptors */
240 kib_pages_t *kib_tx_pages; /* premapped tx msg pages */
242 struct list_head kib_idle_txs; /* idle tx descriptors */
243 struct list_head kib_idle_nblk_txs; /* idle reserved tx descriptors */
244 wait_queue_head_t kib_idle_tx_waitq; /* block here for tx descriptor */
245 __u64 kib_next_tx_cookie; /* RDMA completion cookie */
246 spinlock_t kib_tx_lock; /* serialise */
248 vv_hca_h_t kib_hca; /* The HCA */
249 vv_hca_attrib_t kib_hca_attrs; /* HCA attributes */
251 int kib_port; /* port on the device */
252 vv_port_attrib_t kib_port_attr; /* port attributes */
254 vv_pd_h_t kib_pd; /* protection domain */
255 vv_cq_h_t kib_cq; /* completion queue */
257 void *kib_listen_handle; /* where I listen for connections */
259 /* These fields are left untouched, so they can be shared. */
261 cm_drequest_data_t dreq_data;
262 cm_dreply_data_t drep_data;
265 /* Send and receive MADs (service records, path records) */
266 gsi_class_handle_t gsi_handle;
267 gsi_dtgrm_pool_handle_t gsi_pool_handle;
268 struct semaphore gsi_mutex; /* protect GSI list - TODO:spinlock instead? */
269 struct list_head gsi_pending; /* pending GSI datagrams */
273 /************************************************************************
274 * Wire message structs.
275 * These are sent in sender's byte order (i.e. receiver flips).
276 * CAVEAT EMPTOR: other structs communicated between nodes (e.g. MAD
277 * private data and SM service info), is LE on the wire.
280 /* also kib_md_t above */
284 __u32 rd_nob; /* # of bytes */
285 __u64 rd_addr; /* remote io vaddr */
286 } kib_rdma_desc_t __attribute__((packed));
290 ptl_hdr_t ibim_hdr; /* portals header */
291 char ibim_payload[0]; /* piggy-backed payload */
292 } kib_immediate_msg_t __attribute__((packed));
294 /* these arrays serve two purposes during rdma. they are built on the passive
295 * side and sent to the active side as remote arguments. On the active side
296 * the descs are used as a data structure on the way to local gather items.
297 * the different roles result in split local/remote meaning of desc->rd_key */
300 ptl_hdr_t ibrm_hdr; /* portals header */
301 __u64 ibrm_cookie; /* opaque completion cookie */
302 __u32 ibrm_num_descs; /* how many descs */
303 __u32 rd_key; /* remote key */
304 kib_rdma_desc_t ibrm_desc[0]; /* where to suck/blow */
305 } kib_rdma_msg_t __attribute__((packed));
307 #define kib_rdma_msg_len(num_descs) \
308 offsetof(kib_msg_t, ibm_u.rdma.ibrm_desc[num_descs])
312 __u64 ibcm_cookie; /* opaque completion cookie */
313 __u32 ibcm_status; /* completion status */
314 } kib_completion_msg_t __attribute__((packed));
318 __u32 ibm_magic; /* I'm an openibnal message */
319 __u16 ibm_version; /* this is my version number */
320 __u8 ibm_type; /* msg type */
321 __u8 ibm_credits; /* returned credits */
327 kib_immediate_msg_t immediate;
329 kib_completion_msg_t completion;
330 } ibm_u __attribute__((packed));
331 } kib_msg_t __attribute__((packed));
333 #define IBNAL_MSG_MAGIC 0x0be91b91 /* unique magic */
334 #define IBNAL_MSG_VERSION 1 /* current protocol version */
336 #define IBNAL_MSG_NOOP 0xd0 /* nothing (just credits) */
337 #define IBNAL_MSG_IMMEDIATE 0xd1 /* portals hdr + payload */
338 #define IBNAL_MSG_PUT_RDMA 0xd2 /* portals PUT hdr + source rdma desc */
339 #define IBNAL_MSG_PUT_DONE 0xd3 /* signal PUT rdma completion */
340 #define IBNAL_MSG_GET_RDMA 0xd4 /* portals GET hdr + sink rdma desc */
341 #define IBNAL_MSG_GET_DONE 0xd5 /* signal GET rdma completion */
343 /***********************************************************************/
345 typedef struct kib_rx /* receive message */
347 struct list_head rx_list; /* queue for attention */
348 struct kib_conn *rx_conn; /* owning conn */
349 int rx_rdma; /* RDMA completion posted? */
350 int rx_posted; /* posted? */
351 kib_msg_t *rx_msg; /* pre-mapped buffer */
354 vv_scatgat_t rx_gl; /* and its memory */
357 typedef struct kib_tx /* transmit message */
359 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
360 int tx_isnblk; /* I'm reserved for non-blocking sends */
361 struct kib_conn *tx_conn; /* owning conn */
362 int tx_mapped; /* mapped for RDMA? */
363 int tx_sending; /* # tx callbacks outstanding */
364 int tx_status; /* completion status */
365 unsigned long tx_deadline; /* completion deadline */
366 int tx_passive_rdma; /* peer sucks/blows */
367 int tx_passive_rdma_wait; /* waiting for peer to complete */
368 __u64 tx_passive_rdma_cookie; /* completion cookie */
369 lib_msg_t *tx_libmsg[2]; /* lib msgs to finalize on completion */
370 kib_md_t tx_md; /* RDMA mapping (active/passive) */
371 kib_msg_t *tx_msg; /* pre-mapped buffer */
374 int tx_nsp; /* # send work items */
375 vv_wr_t tx_wrq[IBNAL_TX_MAX_SG]; /* send work items... */
376 vv_scatgat_t tx_gl[IBNAL_TX_MAX_SG]; /* ...and their memory */
379 #define KIB_TX_UNMAPPED 0
380 #define KIB_TX_MAPPED 1
381 #define KIB_TX_MAPPED_FMR 2
383 typedef struct kib_wire_connreq
385 __u32 wcr_magic; /* I'm an openibnal connreq */
386 __u16 wcr_version; /* this is my version number */
387 __u16 wcr_queue_depth; /* this is my receive queue size */
388 __u64 wcr_nid; /* peer's NID */
389 __u64 wcr_incarnation; /* peer's incarnation */
390 } kib_wire_connreq_t;
392 typedef struct kib_gid
397 typedef struct kib_connreq
399 /* connection-in-progress */
400 struct kib_conn *cr_conn;
401 kib_wire_connreq_t cr_wcr;
403 //ib_service_record_v2_t cr_service;
405 ib_path_record_v2_t cr_path;
408 cm_request_data_t cr_cm_req;
409 cm_rtu_data_t cr_cm_rtu;
414 typedef struct kib_conn
416 struct kib_peer *ibc_peer; /* owning peer */
417 struct list_head ibc_list; /* stash on peer's conn list */
418 __u64 ibc_incarnation; /* which instance of the peer */
419 atomic_t ibc_refcount; /* # users */
420 int ibc_state; /* what's happening */
421 atomic_t ibc_nob; /* # bytes buffered */
422 int ibc_nsends_posted; /* # uncompleted sends */
423 int ibc_credits; /* # credits I have */
424 int ibc_outstanding_credits; /* # credits to return */
425 int ibc_rcvd_disconnect;/* received discon request */
426 int ibc_sent_disconnect;/* sent discon request */
427 struct list_head ibc_tx_queue; /* send queue */
428 struct list_head ibc_active_txs; /* active tx awaiting completion */
429 spinlock_t ibc_lock; /* serialise */
430 kib_rx_t *ibc_rxs; /* the rx descs */
431 kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
432 vv_qp_h_t ibc_qp; /* queue pair */
433 cm_cep_handle_t ibc_cep; /* connection ID? */
434 vv_qp_attr_t ibc_qp_attrs; /* QP attrs */
435 kib_connreq_t *ibc_connreq; /* connection request state */
438 #define IBNAL_CONN_INIT_NOTHING 0 /* initial state */
439 #define IBNAL_CONN_INIT_QP 1 /* ibc_qp set up */
440 #define IBNAL_CONN_CONNECTING 2 /* started to connect */
441 #define IBNAL_CONN_ESTABLISHED 3 /* connection established */
442 #define IBNAL_CONN_SEND_DREQ 4 /* to send disconnect req */
443 #define IBNAL_CONN_DREQ 5 /* sent disconnect req */
444 #define IBNAL_CONN_DREP 6 /* sent disconnect rep */
445 #define IBNAL_CONN_DISCONNECTED 7 /* no more QP or CM traffic */
447 #define KIB_ASSERT_CONN_STATE(conn, state) do { \
448 LASSERTF((conn)->ibc_state == state, "%d\n", conn->ibc_state); \
451 #define KIB_ASSERT_CONN_STATE_RANGE(conn, low, high) do { \
452 LASSERTF(low <= high, "%d %d\n", low, high); \
453 LASSERTF((conn)->ibc_state >= low && (conn)->ibc_state <= high, \
454 "%d\n", conn->ibc_state); \
457 typedef struct kib_peer
459 struct list_head ibp_list; /* stash on global peer list */
460 struct list_head ibp_connd_list; /* schedule on kib_connd_peers */
461 ptl_nid_t ibp_nid; /* who's on the other end(s) */
462 atomic_t ibp_refcount; /* # users */
463 int ibp_persistence; /* "known" peer refs */
464 struct list_head ibp_conns; /* all active connections */
465 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
466 int ibp_connecting; /* connecting+accepting */
467 unsigned long ibp_reconnect_time; /* when reconnect may be attempted */
468 unsigned long ibp_reconnect_interval; /* exponential backoff */
472 typedef void (*sa_request_cb_t)(struct sa_request *request);
475 /* Link all the pending GSI datagrams together. */
476 struct list_head list;
478 int retry; /* number of retries left (after a timeout only) */
479 int status; /* status of the request */
480 gsi_dtgrm_t *dtgrm_req; /* request */
481 gsi_dtgrm_t *dtgrm_resp; /* response */
482 sa_mad_v2_t *mad; /* points inside the datagram */
486 struct timer_list timer;
488 /* When the requests is completed, we either call the callback
489 * or post a completion. They are mutually exclusive. */
490 struct completion signal;
491 sa_request_cb_t callback;
494 /* The CM callback are called on the interrupt level. However we
495 * cannot do everything we want on that level, so we let keventd run
497 struct cm_off_level {
501 cm_conn_data_t *info;
505 extern lib_nal_t kibnal_lib;
506 extern kib_data_t kibnal_data;
507 extern kib_tunables_t kibnal_tunables;
509 static inline int wrq_signals_completion(vv_wr_t *wrq)
511 return wrq->completion_notification != 0;
514 /******************************************************************************/
516 /* these are purposely avoiding using local vars so they don't increase
517 * stack consumption. */
519 #define kib_peer_addref(peer) do { \
520 LASSERTF(atomic_read(&peer->ibp_refcount) > 0, "%d\n", \
521 atomic_read(&peer->ibp_refcount)); \
522 CDEBUG(D_NET, "++peer[%p] -> "LPX64" (%d)\n", \
523 peer, peer->ibp_nid, atomic_read (&peer->ibp_refcount)); \
524 atomic_inc(&peer->ibp_refcount); \
527 #define kib_peer_decref(peer) do { \
528 LASSERTF(atomic_read(&peer->ibp_refcount) > 0, "%d\n", \
529 atomic_read(&peer->ibp_refcount)); \
530 CDEBUG(D_NET, "--peer[%p] -> "LPX64" (%d)\n", \
531 peer, peer->ibp_nid, atomic_read (&peer->ibp_refcount)); \
532 if (atomic_dec_and_test (&peer->ibp_refcount)) { \
533 CDEBUG (D_NET, "destroying peer "LPX64" %p\n", \
534 peer->ibp_nid, peer); \
535 kibnal_destroy_peer (peer); \
539 /******************************************************************************/
541 static inline struct list_head *
542 kibnal_nid2peerlist (ptl_nid_t nid)
544 unsigned int hash = ((unsigned int)nid) % kibnal_data.kib_peer_hash_size;
546 return (&kibnal_data.kib_peers [hash]);
550 kibnal_peer_active(kib_peer_t *peer)
552 /* Am I in the peer hash table? */
553 return (!list_empty(&peer->ibp_list));
557 kibnal_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
559 /* CAVEAT EMPTOR: tx takes caller's ref on conn */
561 LASSERT (tx->tx_nsp > 0); /* work items set up */
562 LASSERT (tx->tx_conn == NULL); /* only set here */
565 tx->tx_deadline = jiffies + kibnal_tunables.kib_io_timeout * HZ;
566 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
570 kibnal_service_nid_field(ib_service_record_v2_t *sr)
572 /* The service key mask must have byte 0 to 7 set. */
573 return (__u64 *)sr->service_data8;
577 kibnal_set_service_keys(ib_service_record_v2_t *sr, ptl_nid_t nid)
579 LASSERT (strlen(IBNAL_SERVICE_NAME) < sizeof(sr->service_name));
581 strcpy (sr->service_name, IBNAL_SERVICE_NAME);
583 *kibnal_service_nid_field(sr) = cpu_to_le64(nid);
587 /* TODO: use vv_va2adverize instead */
589 kibnal_page2phys (struct page *p)
591 __u64 page_number = p - mem_map;
593 return (page_number << PAGE_SHIFT);
596 # error "no page->phys"
599 /* CAVEAT EMPTOR: We rely on tx/rx descriptor alignment to allow us to
600 * use the lowest bit of the work request id as a flag to determine if
601 * the completion is for a transmit or a receive (the op field is not
602 * valid when the wc completes in error). */
604 static inline vv_wr_id_t
605 kibnal_ptr2wreqid (void *ptr, int isrx)
607 unsigned long lptr = (unsigned long)ptr;
609 LASSERT ((lptr & 1) == 0);
610 return (vv_wr_id_t)(lptr | (isrx ? 1 : 0));
614 kibnal_wreqid2ptr (vv_wr_id_t wreqid)
616 return (void *)(((unsigned long)wreqid) & ~1UL);
620 kibnal_wreqid_is_rx (vv_wr_id_t wreqid)
622 return (wreqid & 1) != 0;
626 kibnal_whole_mem(void)
635 /* Voltaire stores GIDs in host order. */
636 static inline void gid_swap(vv_gid_t *gid)
640 s = gid->scope.g.subnet;
641 gid->scope.g.subnet = cpu_to_be64(gid->scope.g.eui64);
642 gid->scope.g.eui64 = cpu_to_be64(s);
646 static void dump_qp(kib_conn_t *conn)
648 vv_qp_attr_t *qp_attrs;
652 CERROR("QP dumping %p\n", conn);
654 retval = vv_qp_query(kibnal_data.kib_hca, conn->ibc_qp, &qp_context, &conn->ibc_qp_attrs);
656 CERROR ("Couldn't query qp attributes: %d\n", retval);
660 qp_attrs = &conn->ibc_qp_attrs;
662 CERROR("QP %x dump\n", qp_attrs->query.qp_num);
663 CERROR(" vv_qp_attr_mask = %llx\n", qp_attrs->query.vv_qp_attr_mask);
664 CERROR(" qp_state = %d\n", qp_attrs->query.qp_state);
665 CERROR(" cq_send_h = %p\n", qp_attrs->query.cq_send_h);
666 CERROR(" cq_receive_h = %p \n", qp_attrs->query.cq_receive_h);
667 CERROR(" send_max_outstand_wr = %d\n", qp_attrs->query.send_max_outstand_wr);
668 CERROR(" receive_max_outstand_wr = %d\n", qp_attrs->query.receive_max_outstand_wr);
669 CERROR(" max_scatgat_per_send_wr = %d\n", qp_attrs->query.max_scatgat_per_send_wr);
670 CERROR(" max_scatgat_per_receive_wr = %d\n", qp_attrs->query.max_scatgat_per_receive_wr);
671 CERROR(" send_psn = %x\n", qp_attrs->query.send_psn);
672 CERROR(" receve_psn = %x\n", qp_attrs->query.receve_psn);
673 CERROR(" access_control = %x\n", qp_attrs->query.access_control);
674 CERROR(" phy_port_num = %d\n", qp_attrs->query.phy_port_num);
675 CERROR(" primary_p_key_indx = %x\n", qp_attrs->query.primary_p_key_indx);
676 CERROR(" q_key = %x\n", qp_attrs->query.q_key);
677 CERROR(" destanation_qp = %x\n", qp_attrs->query.destanation_qp);
678 CERROR(" rdma_r_atom_outstand_num = %d\n", qp_attrs->query.rdma_r_atom_outstand_num);
679 CERROR(" responder_rdma_r_atom_num = %d\n", qp_attrs->query.responder_rdma_r_atom_num);
680 CERROR(" min_rnr_nak_timer = %d\n", qp_attrs->query.min_rnr_nak_timer);
681 CERROR(" pd_h = %lx\n", qp_attrs->query.pd_h);
682 CERROR(" recv_solicited_events = %d\n", qp_attrs->query.recv_solicited_events);
683 CERROR(" send_signaled_comp = %d\n", qp_attrs->query.send_signaled_comp);
684 CERROR(" flow_control = %d\n", qp_attrs->query.flow_control);
691 static void dump_wqe(vv_wr_t *wr)
693 CERROR("Dumping send WR %p\n", wr);
695 CERROR(" wr_id = %llx\n", wr->wr_id);
696 CERROR(" completion_notification = %d\n", wr->completion_notification);
697 CERROR(" scatgat_list = %p\n", wr->scatgat_list);
698 CERROR(" num_of_data_segments = %d\n", wr->num_of_data_segments);
700 if (wr->scatgat_list && wr->num_of_data_segments) {
701 CERROR(" scatgat_list[0].v_address = %p\n", wr->scatgat_list[0].v_address);
702 CERROR(" scatgat_list[0].length = %d\n", wr->scatgat_list[0].length);
703 CERROR(" scatgat_list[0].l_key = %x\n", wr->scatgat_list[0].l_key);
706 CERROR(" wr_type = %d\n", wr->wr_type);
708 switch(wr->wr_type) {
712 CERROR(" fance_indicator = %d\n", wr->type.send.send_qp_type.rc_type.fance_indicator);
718 case vv_wr_rdma_write:
719 case vv_wr_rdma_read:
721 CERROR(" fance_indicator = %d\n", wr->type.send.send_qp_type.rc_type.fance_indicator);
722 CERROR(" r_addr = %llx\n", wr->type.send.send_qp_type.rc_type.r_addr);
723 CERROR(" r_r_key = %x\n", wr->type.send.send_qp_type.rc_type.r_r_key);
736 static void dump_wc(vv_wc_t *wc)
738 CERROR("Dumping WC\n");
740 CERROR(" wr_id = %llx\n", wc->wr_id);
741 CERROR(" operation_type = %d\n", wc->operation_type);
742 CERROR(" num_bytes_transfered = %lld\n", wc->num_bytes_transfered);
743 CERROR(" completion_status = %d\n", wc->completion_status);
750 static void hexdump(char *string, void *ptr, int len)
752 unsigned char *c = ptr;
755 if (len < 0 || len > 2048) {
756 printk("XXX what the hell? %d\n",len);
760 printk("%d bytes of '%s' from 0x%p\n", len, string, ptr);
762 for (i = 0; i < len;) {
763 printk("%02x",*(c++));
777 #define hexdump(a,b,c)
780 /*--------------------------------------------------------------------------*/
783 extern kib_peer_t *kibnal_create_peer (ptl_nid_t nid);
784 extern void kibnal_destroy_peer (kib_peer_t *peer);
785 extern int kibnal_del_peer (ptl_nid_t nid, int single_share);
786 extern kib_peer_t *kibnal_find_peer_locked (ptl_nid_t nid);
787 extern void kibnal_unlink_peer_locked (kib_peer_t *peer);
788 extern int kibnal_close_stale_conns_locked (kib_peer_t *peer,
790 extern kib_conn_t *kibnal_create_conn (void);
791 extern void kibnal_put_conn (kib_conn_t *conn);
792 extern void kibnal_destroy_conn (kib_conn_t *conn);
793 extern void kibnal_listen_callback(cm_cep_handle_t cep, cm_conn_data_t *info, void *arg);
795 extern int kibnal_alloc_pages (kib_pages_t **pp, int npages, int access);
796 extern void kibnal_free_pages (kib_pages_t *p);
798 extern void kibnal_check_sends (kib_conn_t *conn);
799 extern void kibnal_close_conn_locked (kib_conn_t *conn, int error);
800 extern void kibnal_destroy_conn (kib_conn_t *conn);
801 extern int kibnal_thread_start (int (*fn)(void *arg), void *arg);
802 extern int kibnal_scheduler(void *arg);
803 extern int kibnal_connd (void *arg);
804 extern void kibnal_init_tx_msg (kib_tx_t *tx, int type, int body_nob);
805 extern void kibnal_close_conn (kib_conn_t *conn, int why);
806 extern void kibnal_start_active_rdma (int type, int status,
807 kib_rx_t *rx, lib_msg_t *libmsg,
809 struct iovec *iov, ptl_kiov_t *kiov,
810 size_t offset, size_t nob);
812 void kibnal_ca_async_callback(vv_event_record_t ev);
813 void kibnal_ca_callback (unsigned long context);
814 extern void vibnal_mad_received_cb(gsi_class_handle_t handle, void *context, gsi_dtgrm_t * dtgrm);
815 extern void vibnal_mad_sent_cb(gsi_class_handle_t handle, void *context, gsi_dtgrm_t * dtgrm);
816 extern int kibnal_advertize_op(ptl_nid_t nid, int op, sa_request_cb_t callback, void *context);
817 extern int vibnal_start_sa_request(struct sa_request *request);
818 extern struct sa_request *alloc_sa_request(void);
819 extern void free_sa_request(struct sa_request *request);
820 extern int kibnal_pathrecord_op(struct sa_request *request, vv_gid_t dgid, sa_request_cb_t callback, void *context);