Whamcloud - gitweb
Mass conversion of all copyright messages to Oracle.
[fs/lustre-release.git] / lnet / klnds / iiblnd / iiblnd.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/iiblnd/iiblnd.h
37  *
38  * Author: Eric Barton <eric@bartonsoftware.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 # define EXPORT_SYMTAB
43 #endif
44 #ifndef AUTOCONF_INCLUDED
45 #include <linux/config.h>
46 #endif
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/mm.h>
50 #include <linux/string.h>
51 #include <linux/stat.h>
52 #include <linux/errno.h>
53 #include <linux/smp_lock.h>
54 #include <linux/unistd.h>
55 #include <linux/uio.h>
56
57 #include <asm/system.h>
58 #include <asm/uaccess.h>
59 #include <asm/io.h>
60
61 #include <linux/init.h>
62 #include <linux/fs.h>
63 #include <linux/file.h>
64 #include <linux/stat.h>
65 #include <linux/list.h>
66 #include <linux/kmod.h>
67 #include <linux/sysctl.h>
68
69 #define DEBUG_SUBSYSTEM S_LND
70
71 #include <libcfs/libcfs.h>
72 #include <lnet/lnet.h>
73 #include <lnet/lib-lnet.h>
74 #include <lnet/lnet-sysctl.h>
75
76 #include <linux/iba/ibt.h>
77
78 #define GCC_VERSION (__GNUC__ * 10000 \
79                 + __GNUC_MINOR__ * 100 \
80                 + __GNUC_PATCHLEVEL__)
81
82 /* Test for GCC > 3.2.2 */
83 #if GCC_VERSION <= 30202
84 /* GCC 3.2.2, and presumably several versions before it, will
85  * miscompile this driver. See
86  * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=9853. */
87 #error Invalid GCC version. Must use GCC >= 3.2.3
88 #endif
89
90 #ifdef CONFIG_SMP
91 # define IBNAL_N_SCHED      num_online_cpus()   /* # schedulers */
92 #else
93 # define IBNAL_N_SCHED      1                   /* # schedulers */
94 #endif
95
96 #define IBNAL_USE_FMR                0          /* map on demand v. use whole mem mapping */
97 #define KIBLND_DETAILED_DEBUG        0
98
99 /* tunables fixed at compile time */
100 #define IBNAL_PEER_HASH_SIZE         101        /* # peer lists */
101 #define IBNAL_RESCHED                100        /* # scheduler loops before reschedule */
102 #define IBNAL_MSG_QUEUE_SIZE         8          /* # messages/RDMAs in-flight */
103 #define IBNAL_CREDIT_HIGHWATER       7          /* when to eagerly return credits */
104 #define IBNAL_MSG_SIZE              (4<<10)     /* max size of queued messages (inc hdr) */
105 #define IBNAL_RDMA_BASE              0x0eeb0000
106 #define IBNAL_STARTING_PSN           1
107
108 /* QP tunables */
109 /* 7 indicates infinite retry attempts, Infinicon recommended 5 */
110 #define IBNAL_RETRY                  5          /* # times to retry */
111 #define IBNAL_RNR_RETRY              5          /*  */
112 #define IBNAL_CM_RETRY               5          /* # times to retry connection */
113 #define IBNAL_FLOW_CONTROL           1
114 #define IBNAL_ACK_TIMEOUT            20         /* supposedly 4 secs */
115 #define IBNAL_EE_FLOW                1
116 #define IBNAL_LOCAL_SUB              1
117 #define IBNAL_FAILOVER_ACCEPTED      0
118
119 /************************/
120 /* derived constants... */
121
122 /* TX messages (shared by all connections) */
123 #define IBNAL_TX_MSGS()       (*kibnal_tunables.kib_ntx)
124 #define IBNAL_TX_MSG_BYTES()  (IBNAL_TX_MSGS() * IBNAL_MSG_SIZE)
125 #define IBNAL_TX_MSG_PAGES()  ((IBNAL_TX_MSG_BYTES() + PAGE_SIZE - 1)/PAGE_SIZE)
126
127 #if IBNAL_USE_FMR
128 # define IBNAL_MAX_RDMA_FRAGS 1
129 # define IBNAL_CONCURRENT_SENDS IBNAL_RX_MSGS
130 #else
131 # define IBNAL_MAX_RDMA_FRAGS LNET_MAX_IOV
132 # define IBNAL_CONCURRENT_SENDS IBNAL_MSG_QUEUE_SIZE
133 #endif
134
135 /* RX messages (per connection) */
136 #define IBNAL_RX_MSGS         (IBNAL_MSG_QUEUE_SIZE * 2)
137 #define IBNAL_RX_MSG_BYTES    (IBNAL_RX_MSGS * IBNAL_MSG_SIZE)
138 #define IBNAL_RX_MSG_PAGES    ((IBNAL_RX_MSG_BYTES + PAGE_SIZE - 1)/PAGE_SIZE)
139
140 #define IBNAL_CQ_ENTRIES()  (IBNAL_TX_MSGS() * (1 + IBNAL_MAX_RDMA_FRAGS) +             \
141                              (IBNAL_RX_MSGS * *kibnal_tunables.kib_concurrent_peers))
142
143 typedef struct
144 {
145         char            **kib_hca_basename;     /* HCA base name */
146         char            **kib_ipif_basename;    /* IPoIB interface base name */
147         char            **kib_service_name;     /* global service name */
148         unsigned int     *kib_service_number;   /* global service number */
149         int              *kib_min_reconnect_interval; /* min connect retry seconds... */
150         int              *kib_max_reconnect_interval; /* max connect retry seconds */
151         int              *kib_concurrent_peers; /* max # peers */
152         int              *kib_cksum;            /* checksum kib_msg_t? */
153         int              *kib_timeout;          /* comms timeout (seconds) */
154         int              *kib_keepalive;        /* keepalive timeout (seconds) */
155         int              *kib_ntx;              /* # tx descs */
156         int              *kib_credits;          /* # concurrent sends */
157         int              *kib_peercredits;      /* # concurrent sends to 1 peer */
158         int              *kib_sd_retries;       /* # concurrent sends to 1 peer */
159         int              *kib_concurrent_sends; /* send work queue sizing */
160 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
161         cfs_sysctl_table_header_t *kib_sysctl;  /* sysctl interface */
162 #endif
163 } kib_tunables_t;
164
165 /* NB The Infinicon stack has specific typedefs for some things
166  * (e.g. IB_{L,R}_KEY), that just map back to __u32 etc */
167 typedef struct
168 {
169         int               ibp_npages;           /* # pages */
170         struct page      *ibp_pages[0];
171 } kib_pages_t;
172
173 typedef struct
174 {
175         IB_HANDLE         md_handle;
176         __u32             md_lkey;
177         __u32             md_rkey;
178         __u64             md_addr;
179 } kib_md_t;
180
181 typedef struct
182 {
183         int               kib_init;             /* initialisation state */
184         __u64             kib_incarnation;      /* which one am I */
185         int               kib_shutdown;         /* shut down? */
186         atomic_t          kib_nthreads;         /* # live threads */
187         lnet_ni_t        *kib_ni;               /* _the_ iib instance */
188
189         __u64             kib_port_guid;        /* my GUID (lo 64 of GID)*/
190         __u16             kib_port_pkey;        /* my pkey, whatever that is */
191         struct semaphore  kib_listener_signal;  /* signal completion */
192         IB_HANDLE         kib_listener_cep;     /* connection end point */
193
194         rwlock_t          kib_global_lock;      /* stabilize peer/conn ops */
195         int               kib_ready;            /* CQ callback fired */
196         int               kib_checking_cq;      /* a scheduler is checking the CQ */
197
198         struct list_head *kib_peers;            /* hash table of all my known peers */
199         int               kib_peer_hash_size;   /* size of kib_peers */
200         atomic_t          kib_npeers;           /* # peers extant */
201         atomic_t          kib_nconns;           /* # connections extant */
202
203         struct list_head  kib_connd_zombies;    /* connections to free */
204         struct list_head  kib_connd_conns;      /* connections to progress */
205         struct list_head  kib_connd_peers;      /* peers waiting for a connection */
206         wait_queue_head_t kib_connd_waitq;      /* connection daemon sleep here */
207         spinlock_t        kib_connd_lock;       /* serialise */
208
209         wait_queue_head_t kib_sched_waitq;      /* schedulers sleep here */
210         spinlock_t        kib_sched_lock;       /* serialise */
211
212         struct kib_tx    *kib_tx_descs;         /* all the tx descriptors */
213         kib_pages_t      *kib_tx_pages;         /* premapped tx msg pages */
214
215         struct list_head  kib_idle_txs;         /* idle tx descriptors */
216         __u64             kib_next_tx_cookie;   /* RDMA completion cookie */
217         spinlock_t        kib_tx_lock;          /* serialise */
218
219         IB_HANDLE         kib_hca;              /* The HCA */
220         int               kib_port;             /* port on the device */
221         IB_HANDLE         kib_pd;               /* protection domain */
222         IB_HANDLE         kib_sd;               /* SD handle */
223         IB_HANDLE         kib_cq;               /* completion queue */
224         kib_md_t          kib_whole_mem;        /* whole-mem registration */
225
226         int               kib_hca_idx;          /* my HCA number */
227         uint64            kib_hca_guids[8];     /* all the HCA guids */
228         IB_CA_ATTRIBUTES  kib_hca_attrs;        /* where to get HCA attrs */
229
230         COMMAND_CONTROL_PARAMETERS kib_sdretry; /* control SD query retries */
231 } kib_data_t;
232
233 #define IBNAL_INIT_NOTHING         0
234 #define IBNAL_INIT_DATA            1
235 #define IBNAL_INIT_LIB             2
236 #define IBNAL_INIT_HCA             3
237 #define IBNAL_INIT_PORTATTRS       4
238 #define IBNAL_INIT_SD              5
239 #define IBNAL_INIT_PD              6
240 #define IBNAL_INIT_MD              7
241 #define IBNAL_INIT_TXD             8
242 #define IBNAL_INIT_CQ              9
243 #define IBNAL_INIT_ALL             10
244
245 /************************************************************************
246  * Wire message structs.
247  * These are sent in sender's byte order (i.e. receiver flips).
248  * CAVEAT EMPTOR: other structs communicated between nodes (e.g. MAD
249  * private data and SM service info), is LE on the wire.
250  */
251
252 typedef struct kib_connparams
253 {
254         __u32             ibcp_queue_depth;
255         __u32             ibcp_max_msg_size;
256         __u32             ibcp_max_frags;
257 } WIRE_ATTR kib_connparams_t;
258
259 typedef struct
260 {
261         lnet_hdr_t        ibim_hdr;             /* portals header */
262         char              ibim_payload[0];      /* piggy-backed payload */
263 } WIRE_ATTR kib_immediate_msg_t;
264
265 #if IBNAL_USE_FMR
266 typedef struct
267 {
268         __u64             rd_addr;              /* IO VMA address */
269         __u32             rd_nob;               /* # of bytes */
270         __u32             rd_key;               /* remote key */
271 } WIRE_ATTR kib_rdma_desc_t;
272 #else
273 typedef struct
274 {
275         __u32             rf_nob;               /* # of bytes */
276         __u64             rf_addr;              /* remote io vaddr */
277 } WIRE_ATTR kib_rdma_frag_t;
278
279 typedef struct
280 {
281         __u32             rd_key;               /* local/remote key */
282         __u32             rd_nfrag;             /* # fragments */
283         kib_rdma_frag_t   rd_frags[0];          /* buffer frags */
284 } WIRE_ATTR kib_rdma_desc_t;
285 #endif
286
287 typedef struct
288 {
289         lnet_hdr_t        ibprm_hdr;            /* LNET header */
290         __u64             ibprm_cookie;         /* opaque completion cookie */
291 } WIRE_ATTR kib_putreq_msg_t;
292
293 typedef struct
294 {
295         __u64             ibpam_src_cookie;     /* reflected completion cookie */
296         __u64             ibpam_dst_cookie;     /* opaque completion cookie */
297         kib_rdma_desc_t   ibpam_rd;             /* sender's sink buffer */
298 } WIRE_ATTR kib_putack_msg_t;
299
300 typedef struct
301 {
302         lnet_hdr_t        ibgm_hdr;             /* LNET header */
303         __u64             ibgm_cookie;          /* opaque completion cookie */
304         kib_rdma_desc_t   ibgm_rd;              /* sender's sink buffer */
305 } WIRE_ATTR kib_get_msg_t;
306
307 typedef struct
308 {
309         __u64             ibcm_cookie;          /* opaque completion cookie */
310         __u32             ibcm_status;          /* completion status */
311 } WIRE_ATTR kib_completion_msg_t;
312
313 typedef struct
314 {
315         /* First 2 fields fixed FOR ALL TIME */
316         __u32             ibm_magic;            /* I'm an openibnal message */
317         __u16             ibm_version;          /* this is my version number */
318
319         __u8              ibm_type;             /* msg type */
320         __u8              ibm_credits;          /* returned credits */
321         __u32             ibm_nob;              /* # bytes in whole message */
322         __u32             ibm_cksum;            /* checksum (0 == no checksum) */
323         __u64             ibm_srcnid;           /* sender's NID */
324         __u64             ibm_srcstamp;         /* sender's incarnation */
325         __u64             ibm_dstnid;           /* destination's NID */
326         __u64             ibm_dststamp;         /* destination's incarnation */
327         __u64             ibm_seq;              /* sequence number */
328
329         union {
330                 kib_connparams_t      connparams;
331                 kib_immediate_msg_t   immediate;
332                 kib_putreq_msg_t      putreq;
333                 kib_putack_msg_t      putack;
334                 kib_get_msg_t         get;
335                 kib_completion_msg_t  completion;
336         } WIRE_ATTR ibm_u;
337 } WIRE_ATTR kib_msg_t;
338
339 #define IBNAL_MSG_MAGIC LNET_PROTO_IIB_MAGIC    /* unique magic */
340 #define IBNAL_MSG_VERSION              2        /* current protocol version */
341 #define IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD 1   /* previous version */
342
343 #define IBNAL_MSG_CONNREQ           0xc0        /* connection request */
344 #define IBNAL_MSG_CONNACK           0xc1        /* connection acknowledge */
345 #define IBNAL_MSG_NOOP              0xd0        /* nothing (just credits) */
346 #define IBNAL_MSG_IMMEDIATE         0xd1        /* immediate */
347 #define IBNAL_MSG_PUT_REQ           0xd2        /* putreq (src->sink) */
348 #define IBNAL_MSG_PUT_NAK           0xd3        /* completion (sink->src) */
349 #define IBNAL_MSG_PUT_ACK           0xd4        /* putack (sink->src) */
350 #define IBNAL_MSG_PUT_DONE          0xd5        /* completion (src->sink) */
351 #define IBNAL_MSG_GET_REQ           0xd6        /* getreq (sink->src) */
352 #define IBNAL_MSG_GET_DONE          0xd7        /* completion (src->sink: all OK) */
353
354 /* connection rejection reasons */
355 #define IBNAL_REJECT_CONN_RACE       0          /* You lost connection race */
356 #define IBNAL_REJECT_NO_RESOURCES    1          /* Out of memory/conns etc */
357 #define IBNAL_REJECT_FATAL           2          /* Anything else */
358
359 /***********************************************************************/
360
361 typedef struct kib_rx                           /* receive message */
362 {
363         struct list_head          rx_list;      /* queue for attention */
364         struct kib_conn          *rx_conn;      /* owning conn */
365         int                       rx_nob;       /* # bytes received (-1 while posted) */
366         __u64                     rx_hca_msg;   /* pre-mapped buffer (hca vaddr) */
367         kib_msg_t                *rx_msg;       /* pre-mapped buffer (host vaddr) */
368         IB_WORK_REQ2              rx_wrq;
369         IB_LOCAL_DATASEGMENT      rx_gl;        /* and its memory */
370 } kib_rx_t;
371
372 typedef struct kib_tx                           /* transmit message */
373 {
374         struct list_head          tx_list;      /* queue on idle_txs ibc_tx_queue etc. */
375         struct kib_conn          *tx_conn;      /* owning conn */
376         int                       tx_mapped;    /* mapped for RDMA? */
377         int                       tx_sending;   /* # tx callbacks outstanding */
378         int                       tx_queued;    /* queued for sending */
379         int                       tx_waiting;   /* waiting for peer */
380         int                       tx_status;    /* completion status */
381         unsigned long             tx_deadline;  /* completion deadline */
382         __u64                     tx_cookie;    /* completion cookie */
383         lnet_msg_t               *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
384         kib_msg_t                *tx_msg;       /* pre-mapped buffer (host vaddr) */
385         __u64                     tx_hca_msg;   /* pre-mapped buffer (HCA vaddr) */
386         int                       tx_nwrq;      /* # send work items */
387 #if IBNAL_USE_FMR
388         IB_WORK_REQ2              tx_wrq[2];    /* send work items... */
389         IB_LOCAL_DATASEGMENT      tx_gl[2];     /* ...and their memory */
390         kib_rdma_desc_t           tx_rd[1];     /* rdma descriptor */
391         kib_md_t                  tx_md;        /* mapping */
392         __u64                    *tx_pages;     /* page phys addrs */
393 #else
394         IB_WORK_REQ2             *tx_wrq;       /* send work items... */
395         IB_LOCAL_DATASEGMENT     *tx_gl;        /* ...and their memory */
396         kib_rdma_desc_t          *tx_rd;        /* rdma descriptor (src buffers) */
397 #endif
398 } kib_tx_t;
399
400 typedef struct
401 {
402         /* scratchpad during connection establishment */
403         IB_QP_ATTRIBUTES_QUERY cv_qpattrs;
404         QUERY                  cv_query;
405         IB_SERVICE_RECORD      cv_svcrec;
406         IB_PATH_RECORD         cv_path;
407         CM_CONN_INFO           cv_cmci;
408 } kib_connvars_t;
409
410 typedef struct kib_conn
411 {
412         struct kib_peer    *ibc_peer;           /* owning peer */
413         struct list_head    ibc_list;           /* stash on peer's conn list */
414         __u64               ibc_incarnation;    /* which instance of the peer */
415         __u64               ibc_txseq;          /* tx sequence number */
416         __u64               ibc_rxseq;          /* rx sequence number */
417         __u32               ibc_version;        /* peer protocol version */
418         atomic_t            ibc_refcount;       /* # users */
419         int                 ibc_state;          /* what's happening */
420         int                 ibc_nsends_posted;  /* # uncompleted sends */
421         int                 ibc_credits;        /* # credits I have */
422         int                 ibc_outstanding_credits; /* # credits to return */
423         int                 ibc_reserved_credits; /* # credits for ACK/DONE msgs */
424         unsigned long       ibc_last_send;      /* time of last send */
425         struct list_head    ibc_early_rxs;      /* rxs completed before ESTABLISHED */
426         struct list_head    ibc_tx_queue_nocred; /* sends that don't need a cred */
427         struct list_head    ibc_tx_queue_rsrvd; /* sends that need a reserved cred */
428         struct list_head    ibc_tx_queue;       /* send queue */
429         struct list_head    ibc_active_txs;     /* active tx awaiting completion */
430         spinlock_t          ibc_lock;           /* serialise */
431         kib_rx_t           *ibc_rxs;            /* the rx descs */
432         kib_pages_t        *ibc_rx_pages;       /* premapped rx msg pages */
433         IB_HANDLE           ibc_qp;             /* queue pair */
434         IB_HANDLE           ibc_cep;            /* CM endpoint */
435         kib_connvars_t     *ibc_cvars;          /* connection scratchpad */
436 } kib_conn_t;
437
438 #define IBNAL_CONN_INIT_NOTHING      0          /* initial state */
439 #define IBNAL_CONN_INIT_QP           1          /* ibc_qp set up */
440 #define IBNAL_CONN_CONNECTING        2          /* started to connect */
441 #define IBNAL_CONN_ESTABLISHED       3          /* connection established */
442 #define IBNAL_CONN_DISCONNECTING     4          /* to send disconnect req */
443 #define IBNAL_CONN_DISCONNECTED      5          /* no more QP or CM traffic */
444
445 /* types of connection */
446 #define IBNAL_CONN_ACTIVE            0          /* active connect */
447 #define IBNAL_CONN_PASSIVE           1          /* passive connect */
448 #define IBNAL_CONN_WAITING           2          /* waiting for connect */
449
450 typedef struct kib_peer
451 {
452         struct list_head    ibp_list;           /* stash on global peer list */
453         struct list_head    ibp_connd_list;     /* schedule on kib_connd_peers */
454         lnet_nid_t          ibp_nid;            /* who's on the other end(s) */
455         atomic_t            ibp_refcount;       /* # users */
456         int                 ibp_persistence;    /* "known" peer refs */
457         int                 ibp_version;        /* protocol version */
458         struct list_head    ibp_conns;          /* all active connections */
459         struct list_head    ibp_tx_queue;       /* msgs waiting for a conn */
460         int                 ibp_connecting;     /* active connects in progress */
461         int                 ibp_accepting;      /* passive connects in progress */
462         int                 ibp_passivewait;    /* waiting for peer to connect */
463         unsigned long       ibp_passivewait_deadline; /* when passive wait must complete */
464         unsigned long       ibp_reconnect_time; /* when reconnect may be attempted */
465         unsigned long       ibp_reconnect_interval; /* exponential backoff */
466         int                 ibp_error;          /* errno on closing this peer */
467         cfs_time_t          ibp_last_alive;     /* when (in jiffies) I was last alive */
468 } kib_peer_t;
469
470
471 extern kib_data_t      kibnal_data;
472 extern kib_tunables_t  kibnal_tunables;
473
474 /******************************************************************************/
475
476 /* these are purposely avoiding using local vars so they don't increase
477  * stack consumption. */
478
479 #define kibnal_conn_addref(conn)                                \
480 do {                                                            \
481         CDEBUG(D_NET, "conn[%p] (%d)++\n",                      \
482                (conn), atomic_read(&(conn)->ibc_refcount));     \
483         LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);        \
484         atomic_inc(&(conn)->ibc_refcount);                      \
485 } while (0)
486
487 #define kibnal_conn_decref(conn)                                              \
488 do {                                                                          \
489         unsigned long   flags;                                                \
490                                                                               \
491         CDEBUG(D_NET, "conn[%p] (%d)--\n",                                    \
492                (conn), atomic_read(&(conn)->ibc_refcount));                   \
493         LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);                      \
494         if (atomic_dec_and_test(&(conn)->ibc_refcount)) {                     \
495                 spin_lock_irqsave(&kibnal_data.kib_connd_lock, flags);        \
496                 list_add_tail(&(conn)->ibc_list,                              \
497                               &kibnal_data.kib_connd_zombies);                \
498                 wake_up(&kibnal_data.kib_connd_waitq);                        \
499                 spin_unlock_irqrestore(&kibnal_data.kib_connd_lock, flags);   \
500         }                                                                     \
501 } while (0)
502
503 #define kibnal_peer_addref(peer)                                \
504 do {                                                            \
505         CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",                \
506                (peer), libcfs_nid2str((peer)->ibp_nid),         \
507                atomic_read (&(peer)->ibp_refcount));            \
508         LASSERT(atomic_read(&(peer)->ibp_refcount) > 0);        \
509         atomic_inc(&(peer)->ibp_refcount);                      \
510 } while (0)
511
512 #define kibnal_peer_decref(peer)                                \
513 do {                                                            \
514         CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",                \
515                (peer), libcfs_nid2str((peer)->ibp_nid),         \
516                atomic_read (&(peer)->ibp_refcount));            \
517         LASSERT(atomic_read(&(peer)->ibp_refcount) > 0);        \
518         if (atomic_dec_and_test(&(peer)->ibp_refcount))         \
519                 kibnal_destroy_peer(peer);                      \
520 } while (0)
521
522 /******************************************************************************/
523
524 static inline struct list_head *
525 kibnal_nid2peerlist (lnet_nid_t nid)
526 {
527         unsigned int hash = ((unsigned int)nid) % kibnal_data.kib_peer_hash_size;
528
529         return (&kibnal_data.kib_peers [hash]);
530 }
531
532 static inline int
533 kibnal_peer_active(kib_peer_t *peer)
534 {
535         /* Am I in the peer hash table? */
536         return (!list_empty(&peer->ibp_list));
537 }
538
539 static inline int
540 kibnal_peer_connecting(kib_peer_t *peer)
541 {
542         /* Am I expecting a connection to materialise? */
543         return (peer->ibp_connecting != 0 ||
544                 peer->ibp_accepting != 0 ||
545                 peer->ibp_passivewait);
546 }
547
548 static inline void
549 kibnal_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
550 {
551         struct list_head  *q;
552         
553         LASSERT (tx->tx_nwrq > 0);              /* work items set up */
554         LASSERT (!tx->tx_queued);               /* not queued for sending already */
555
556         tx->tx_queued = 1;
557         tx->tx_deadline = jiffies + (*kibnal_tunables.kib_timeout * HZ);
558
559         if (tx->tx_conn == NULL) {
560                 kibnal_conn_addref(conn);
561                 tx->tx_conn = conn;
562                 LASSERT (tx->tx_msg->ibm_type != IBNAL_MSG_PUT_DONE);
563         } else {
564                 LASSERT (tx->tx_conn == conn);
565                 LASSERT (tx->tx_msg->ibm_type == IBNAL_MSG_PUT_DONE);
566         }
567
568         if (conn->ibc_version == IBNAL_MSG_VERSION_RDMAREPLYNOTRSRVD) {
569                 /* All messages have simple credit control */
570                 q = &conn->ibc_tx_queue;
571         } else {
572                 LASSERT (conn->ibc_version == IBNAL_MSG_VERSION);
573                 
574                 switch (tx->tx_msg->ibm_type) {
575                 case IBNAL_MSG_PUT_REQ:
576                 case IBNAL_MSG_GET_REQ:
577                         /* RDMA request: reserve a buffer for the RDMA reply
578                          * before sending */
579                         q = &conn->ibc_tx_queue_rsrvd;
580                         break;
581
582                 case IBNAL_MSG_PUT_NAK:
583                 case IBNAL_MSG_PUT_ACK:
584                 case IBNAL_MSG_PUT_DONE:
585                 case IBNAL_MSG_GET_DONE:
586                         /* RDMA reply/completion: no credits; peer has reserved
587                          * a reply buffer */
588                         q = &conn->ibc_tx_queue_nocred;
589                         break;
590                 
591                 case IBNAL_MSG_NOOP:
592                 case IBNAL_MSG_IMMEDIATE:
593                         /* Otherwise: consume a credit before sending */
594                         q = &conn->ibc_tx_queue;
595                         break;
596                 
597                 default:
598                         LBUG();
599                         q = NULL;
600                 }
601         }
602         
603         list_add_tail(&tx->tx_list, q);
604 }
605
606 static inline int
607 kibnal_send_keepalive(kib_conn_t *conn) 
608 {
609         return (*kibnal_tunables.kib_keepalive > 0) &&
610                 time_after(jiffies, conn->ibc_last_send +
611                            *kibnal_tunables.kib_keepalive*HZ);
612 }
613
614 #define KIBNAL_SERVICE_KEY_MASK  (IB_SERVICE_RECORD_COMP_SERVICENAME |          \
615                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_1 |       \
616                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_2 |       \
617                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_3 |       \
618                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_4 |       \
619                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_5 |       \
620                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_6 |       \
621                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_7 |       \
622                                   IB_SERVICE_RECORD_COMP_SERVICEDATA8_8)
623
624 static inline __u64*
625 kibnal_service_nid_field(IB_SERVICE_RECORD *srv)
626 {
627         /* must be consistent with KIBNAL_SERVICE_KEY_MASK */
628         return (__u64 *)srv->ServiceData8;
629 }
630
631 static inline void
632 kibnal_set_service_keys(IB_SERVICE_RECORD *srv, lnet_nid_t nid)
633 {
634         char *svc_name = *kibnal_tunables.kib_service_name;
635
636         LASSERT (strlen(svc_name) < sizeof(srv->ServiceName));
637         memset (srv->ServiceName, 0, sizeof(srv->ServiceName));
638         strcpy (srv->ServiceName, svc_name);
639
640         *kibnal_service_nid_field(srv) = cpu_to_le64(nid);
641 }
642
643 /* CAVEAT EMPTOR: We rely on tx/rx descriptor alignment to allow us to use the
644  * lowest 2 bits of the work request id to stash the work item type (the op
645  * field is not valid when the wc completes in error). */
646
647 #define IBNAL_WID_TX    0
648 #define IBNAL_WID_RX    1
649 #define IBNAL_WID_RDMA  2
650 #define IBNAL_WID_MASK  3UL
651
652 static inline __u64
653 kibnal_ptr2wreqid (void *ptr, int type)
654 {
655         unsigned long lptr = (unsigned long)ptr;
656
657         LASSERT ((lptr & IBNAL_WID_MASK) == 0);
658         LASSERT ((type & ~IBNAL_WID_MASK) == 0);
659         return (__u64)(lptr | type);
660 }
661
662 static inline void *
663 kibnal_wreqid2ptr (__u64 wreqid)
664 {
665         return (void *)(((unsigned long)wreqid) & ~IBNAL_WID_MASK);
666 }
667
668 static inline int
669 kibnal_wreqid2type (__u64 wreqid)
670 {
671         return (wreqid & IBNAL_WID_MASK);
672 }
673
674 static inline void
675 kibnal_set_conn_state (kib_conn_t *conn, int state)
676 {
677         CDEBUG(D_NET,"%p state %d\n", conn, state);
678         conn->ibc_state = state;
679         mb();
680 }
681
682 #if IBNAL_USE_FMR
683
684 static inline int
685 kibnal_rd_size (kib_rdma_desc_t *rd) 
686 {
687         return rd->rd_nob;
688 }
689
690 #else
691 static inline int
692 kibnal_rd_size (kib_rdma_desc_t *rd)
693 {
694         int   i;
695         int   size;
696         
697         for (i = size = 0; i < rd->rd_nfrag; i++)
698                 size += rd->rd_frags[i].rf_nob;
699         
700         return size;
701 }
702 #endif
703
704 int  kibnal_startup (lnet_ni_t *ni);
705 void kibnal_shutdown (lnet_ni_t *ni);
706 int  kibnal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
707 int  kibnal_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
708 int  kibnal_eager_recv (lnet_ni_t *ni, void *private, 
709                         lnet_msg_t *lntmsg, void **new_private);
710 int  kibnal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg,
711                   int delayed, unsigned int niov,
712                   struct iovec *iov, lnet_kiov_t *kiov,
713                   unsigned int offset, unsigned int mlen, unsigned int rlen);
714 void kibnal_init_msg(kib_msg_t *msg, int type, int body_nob);
715 void kibnal_pack_msg(kib_msg_t *msg, __u32 version, int credits, 
716                      lnet_nid_t dstnid, __u64 dststamp, __u64 seq);
717 void kibnal_pack_connmsg(kib_msg_t *msg, __u32 version, int nob, int type,
718                          lnet_nid_t dstnid, __u64 dststamp);
719 int  kibnal_unpack_msg(kib_msg_t *msg, __u32 expected_version, int nob);
720 IB_HANDLE kibnal_create_cep(lnet_nid_t nid);
721 int  kibnal_create_peer (kib_peer_t **peerp, lnet_nid_t nid);
722 void kibnal_destroy_peer (kib_peer_t *peer);
723 kib_peer_t *kibnal_find_peer_locked (lnet_nid_t nid);
724 int  kibnal_del_peer (lnet_nid_t nid);
725 void kibnal_peer_alive (kib_peer_t *peer);
726 void kibnal_unlink_peer_locked (kib_peer_t *peer);
727 int  kibnal_add_persistent_peer (lnet_nid_t nid);
728 int  kibnal_close_stale_conns_locked (kib_peer_t *peer,
729                                       __u64 incarnation);
730 int  kibnal_conn_rts(kib_conn_t *conn,
731                      __u32 qpn, __u8 resp_res, __u8 init_depth, __u32 psn);
732 kib_conn_t *kibnal_create_conn (lnet_nid_t nid, int proto_version);
733 void kibnal_destroy_conn (kib_conn_t *conn);
734 void kibnal_listen_callback(IB_HANDLE cep, CM_CONN_INFO *info, void *arg);
735 int  kibnal_alloc_pages (kib_pages_t **pp, int npages);
736 void kibnal_free_pages (kib_pages_t *p);
737 void kibnal_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
738 void kibnal_txlist_done (struct list_head *txlist, int status);
739 int  kibnal_post_receives (kib_conn_t *conn);
740 int  kibnal_init_rdma (kib_tx_t *tx, int type, int nob,
741                        kib_rdma_desc_t *dstrd, __u64 dstcookie);
742 void kibnal_check_sends (kib_conn_t *conn);
743 void kibnal_close_conn_locked (kib_conn_t *conn, int error);
744 int  kibnal_thread_start (int (*fn)(void *arg), void *arg);
745 int  kibnal_scheduler(void *arg);
746 int  kibnal_connd (void *arg);
747 void kibnal_init_tx_msg (kib_tx_t *tx, int type, int body_nob);
748 void kibnal_close_conn (kib_conn_t *conn, int why);
749 void kibnal_start_active_rdma (int type, int status,
750                                kib_rx_t *rx, lnet_msg_t *lntmsg,
751                                unsigned int niov,
752                                struct iovec *iov, lnet_kiov_t *kiov,
753                                unsigned int offset, unsigned int nob);
754 void kibnal_hca_async_callback (void *hca_arg, IB_EVENT_RECORD *ev);
755 void kibnal_hca_callback (void *hca_arg, void *cq_arg);
756 int  kibnal_tunables_init (void);
757 void kibnal_tunables_fini (void);