1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd.h
38 * Author: Eric Barton <eric@bartonsoftware.com>
42 # define EXPORT_SYMTAB
44 #ifndef AUTOCONF_INCLUDED
45 #include <linux/config.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
50 #include <linux/string.h>
51 #include <linux/stat.h>
52 #include <linux/errno.h>
53 #include <linux/smp_lock.h>
54 #include <linux/unistd.h>
55 #include <linux/uio.h>
56 #ifdef HAVE_SCATTERLIST_SETPAGE
57 # include <linux/scatterlist.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
64 #include <linux/init.h>
66 #include <linux/file.h>
67 #include <linux/stat.h>
68 #include <linux/list.h>
69 #include <linux/kmod.h>
70 #include <linux/sysctl.h>
71 #include <linux/random.h>
72 #include <linux/pci.h>
77 #define DEBUG_SUBSYSTEM S_LND
79 #include <libcfs/kp30.h>
80 #include <lnet/lnet.h>
81 #include <lnet/lib-lnet.h>
82 #include <lnet/lnet-sysctl.h>
88 #include <rdma/rdma_cm.h>
89 #include <rdma/ib_cm.h>
90 #include <rdma/ib_verbs.h>
91 #include <rdma/ib_fmr_pool.h>
93 /* tunables fixed at compile time */
95 # define IBLND_N_SCHED num_online_cpus() /* # schedulers */
97 # define IBLND_N_SCHED 1 /* # schedulers */
100 #define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
101 #define IBLND_RESCHED 100 /* # scheduler loops before reschedule */
105 unsigned int *kib_service; /* IB service number */
106 int *kib_min_reconnect_interval; /* first failed connection retry... */
107 int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
108 int *kib_cksum; /* checksum kib_msg_t? */
109 int *kib_timeout; /* comms timeout (seconds) */
110 int *kib_keepalive; /* keepalive timeout (seconds) */
111 int *kib_ntx; /* # tx descs */
112 int *kib_credits; /* # concurrent sends */
113 int *kib_peertxcredits; /* # concurrent sends to 1 peer */
114 int *kib_peerrtrcredits; /* # per-peer router buffer credits */
115 int *kib_peercredits_hiw; /* # when eagerly to return credits */
116 int *kib_peertimeout; /* seconds to consider peer dead */
117 char **kib_default_ipif; /* default IPoIB interface */
118 int *kib_retry_count;
119 int *kib_rnr_retry_count;
120 int *kib_concurrent_sends; /* send work queue sizing */
121 int *kib_ib_mtu; /* IB MTU */
122 int *kib_map_on_demand; /* map-on-demand if RD has more fragments
123 * than this value, 0 disable map-on-demand */
124 int *kib_pmr_pool_size; /* # physical MR in pool */
125 int *kib_fmr_pool_size; /* # FMRs in pool */
126 int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
127 int *kib_fmr_cache; /* enable FMR pool cache? */
128 #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM
129 cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */
133 extern kib_tunables_t kiblnd_tunables;
135 #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
136 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
138 #define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
139 #define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits))-1) /* Max # of peer credits */
141 #define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
142 IBLND_MSG_QUEUE_SIZE_V1 : \
143 *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
144 #define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
145 IBLND_CREDIT_HIGHWATER_V1 : \
146 *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
149 kiblnd_concurrent_sends_v1(void)
151 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
152 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
154 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
155 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
157 return *kiblnd_tunables.kib_concurrent_sends;
160 #define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
161 kiblnd_concurrent_sends_v1() : \
162 *kiblnd_tunables.kib_concurrent_sends)
163 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
164 #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
165 #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
167 #define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
168 #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
169 #define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
170 *kiblnd_tunables.kib_map_on_demand : \
171 IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
172 #define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
173 IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
175 /************************/
176 /* derived constants... */
178 /* TX messages (shared by all connections) */
179 #define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
181 /* RX messages (per connection) */
182 #define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
183 #define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
184 #define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
186 /* WRs and CQEs (per connection) */
187 #define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
188 #define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
189 #define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
193 struct list_head ibd_list; /* chain on kib_devs */
194 __u32 ibd_ifip; /* IPoIB interface IP */
195 char ibd_ifname[32]; /* IPoIB interface name */
196 int ibd_nnets; /* # nets extant */
198 struct rdma_cm_id *ibd_cmid; /* IB listener (bound to 1 device) */
199 struct ib_pd *ibd_pd; /* PD for the device */
200 int ibd_page_shift; /* page shift of current HCA */
201 int ibd_page_size; /* page size of current HCA */
202 __u64 ibd_page_mask; /* page mask of current HCA */
203 int ibd_mr_shift; /* bits shift of max MR size */
204 __u64 ibd_mr_size; /* size of MR */
206 int ibd_nmrs; /* # of global MRs */
207 struct ib_mr **ibd_mrs; /* MR for non RDMA I/O */
210 #define IBLND_POOL_DEADLINE 300 /* # of seconds to keep pool alive */
214 struct ib_device *ibp_device; /* device for mapping */
215 int ibp_npages; /* # pages */
216 struct page *ibp_pages[0]; /* page array */
222 struct list_head pmr_list; /* chain node */
223 struct ib_phys_buf *pmr_ipb; /* physical buffer */
224 struct ib_mr *pmr_mr; /* IB MR */
225 struct kib_pmr_pool *pmr_pool; /* owner of this MR */
226 __u64 pmr_iova; /* Virtual I/O address */
227 int pmr_refcount; /* reference count */
233 typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, int inc, struct kib_pool **pp_po);
234 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
235 typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
236 typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
240 #define IBLND_POOL_NAME_LEN 32
242 typedef struct kib_poolset
244 spinlock_t ps_lock; /* serialize */
245 struct kib_net *ps_net; /* network it belongs to */
246 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
247 struct list_head ps_pool_list; /* list of pools */
248 cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */
249 int ps_increasing; /* is allocating new pool */
250 int ps_pool_size; /* new pool size */
252 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
253 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
254 kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
255 kib_ps_node_fini_t ps_node_fini; /* finalize node */
258 typedef struct kib_pool
260 struct list_head po_list; /* chain on pool list */
261 struct list_head po_free_list; /* pre-allocated node */
262 kib_poolset_t *po_owner; /* pool_set of this pool */
263 cfs_time_t po_deadline; /* deadline of this pool */
264 int po_allocated; /* # of elements in use */
265 int po_size; /* # of pre-allocated elements */
269 kib_poolset_t tps_poolset; /* pool-set */
270 __u64 tps_next_tx_cookie; /* cookie of TX */
274 kib_pool_t tpo_pool; /* pool */
275 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
276 kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
280 kib_poolset_t pps_poolset; /* pool-set */
283 typedef struct kib_pmr_pool {
284 kib_pool_t ppo_pool; /* pool */
289 spinlock_t fps_lock; /* serialize */
290 struct kib_net *fps_net; /* IB network */
291 struct list_head fps_pool_list; /* FMR pool list */
292 __u64 fps_version; /* validity stamp */
293 int fps_increasing; /* is allocating new pool */
294 cfs_time_t fps_next_retry; /* time stamp for retry if failed to allocate */
299 struct list_head fpo_list; /* chain on pool list */
300 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
301 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
302 cfs_time_t fpo_deadline; /* deadline of this pool */
303 int fpo_map_count; /* # of mapped FMR */
307 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
308 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
311 typedef struct kib_net
313 __u64 ibn_incarnation; /* my epoch */
314 int ibn_init; /* initialisation state */
315 int ibn_shutdown; /* shutting down? */
316 unsigned int ibn_with_fmr:1; /* FMR? */
317 unsigned int ibn_with_pmr:1; /* PMR? */
319 atomic_t ibn_npeers; /* # peers extant */
320 atomic_t ibn_nconns; /* # connections extant */
322 kib_tx_poolset_t ibn_tx_ps; /* tx pool-set */
323 kib_fmr_poolset_t ibn_fmr_ps; /* fmr pool-set */
324 kib_pmr_poolset_t ibn_pmr_ps; /* pmr pool-set */
326 kib_dev_t *ibn_dev; /* underlying IB device */
331 int kib_init; /* initialisation state */
332 int kib_shutdown; /* shut down? */
333 struct list_head kib_devs; /* IB devices extant */
334 atomic_t kib_nthreads; /* # live threads */
335 rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
337 struct list_head *kib_peers; /* hash table of all my known peers */
338 int kib_peer_hash_size; /* size of kib_peers */
340 void *kib_connd; /* the connd task (serialisation assertions) */
341 struct list_head kib_connd_conns; /* connections to setup/teardown */
342 struct list_head kib_connd_zombies; /* connections with zero refcount */
343 wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
344 spinlock_t kib_connd_lock; /* serialise */
346 wait_queue_head_t kib_sched_waitq; /* schedulers sleep here */
347 struct list_head kib_sched_conns; /* conns to check for rx completions */
348 spinlock_t kib_sched_lock; /* serialise */
350 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
353 #define IBLND_INIT_NOTHING 0
354 #define IBLND_INIT_DATA 1
355 #define IBLND_INIT_ALL 2
357 /************************************************************************
358 * IB Wire message format.
359 * These are sent in sender's byte order (i.e. receiver flips).
362 typedef struct kib_connparams
364 __u16 ibcp_queue_depth;
365 __u16 ibcp_max_frags;
366 __u32 ibcp_max_msg_size;
367 } WIRE_ATTR kib_connparams_t;
371 lnet_hdr_t ibim_hdr; /* portals header */
372 char ibim_payload[0]; /* piggy-backed payload */
373 } WIRE_ATTR kib_immediate_msg_t;
377 __u32 rf_nob; /* # bytes this frag */
378 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
379 } WIRE_ATTR kib_rdma_frag_t;
383 __u32 rd_key; /* local/remote key */
384 __u32 rd_nfrags; /* # fragments */
385 kib_rdma_frag_t rd_frags[0]; /* buffer frags */
386 } WIRE_ATTR kib_rdma_desc_t;
390 lnet_hdr_t ibprm_hdr; /* portals header */
391 __u64 ibprm_cookie; /* opaque completion cookie */
392 } WIRE_ATTR kib_putreq_msg_t;
396 __u64 ibpam_src_cookie; /* reflected completion cookie */
397 __u64 ibpam_dst_cookie; /* opaque completion cookie */
398 kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
399 } WIRE_ATTR kib_putack_msg_t;
403 lnet_hdr_t ibgm_hdr; /* portals header */
404 __u64 ibgm_cookie; /* opaque completion cookie */
405 kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
406 } WIRE_ATTR kib_get_msg_t;
410 __u64 ibcm_cookie; /* opaque completion cookie */
411 __s32 ibcm_status; /* < 0 failure: >= 0 length */
412 } WIRE_ATTR kib_completion_msg_t;
416 /* First 2 fields fixed FOR ALL TIME */
417 __u32 ibm_magic; /* I'm an openibnal message */
418 __u16 ibm_version; /* this is my version number */
420 __u8 ibm_type; /* msg type */
421 __u8 ibm_credits; /* returned credits */
422 __u32 ibm_nob; /* # bytes in whole message */
423 __u32 ibm_cksum; /* checksum (0 == no checksum) */
424 __u64 ibm_srcnid; /* sender's NID */
425 __u64 ibm_srcstamp; /* sender's incarnation */
426 __u64 ibm_dstnid; /* destination's NID */
427 __u64 ibm_dststamp; /* destination's incarnation */
430 kib_connparams_t connparams;
431 kib_immediate_msg_t immediate;
432 kib_putreq_msg_t putreq;
433 kib_putack_msg_t putack;
435 kib_completion_msg_t completion;
437 } WIRE_ATTR kib_msg_t;
439 #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
441 #define IBLND_MSG_VERSION_1 0x11
442 #define IBLND_MSG_VERSION_2 0x12
443 #define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
445 #define IBLND_MSG_CONNREQ 0xc0 /* connection request */
446 #define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
447 #define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
448 #define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
449 #define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
450 #define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
451 #define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
452 #define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
453 #define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
454 #define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
457 __u32 ibr_magic; /* sender's magic */
458 __u16 ibr_version; /* sender's version */
459 __u8 ibr_why; /* reject reason */
460 __u8 ibr_padding; /* padding */
461 __u64 ibr_incarnation; /* incarnation of peer */
462 kib_connparams_t ibr_cp; /* connection parameters */
463 } WIRE_ATTR kib_rej_t;
465 /* connection rejection reasons */
466 #define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
467 #define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
468 #define IBLND_REJECT_FATAL 3 /* Anything else */
470 #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
471 #define IBLND_REJECT_CONN_STALE 5 /* stale peer */
473 #define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */
474 #define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */
476 /***********************************************************************/
478 typedef struct kib_rx /* receive message */
480 struct list_head rx_list; /* queue for attention */
481 struct kib_conn *rx_conn; /* owning conn */
482 int rx_nob; /* # bytes received (-1 while posted) */
483 enum ib_wc_status rx_status; /* completion status */
484 kib_msg_t *rx_msg; /* message buffer (host vaddr) */
485 __u64 rx_msgaddr; /* message buffer (I/O addr) */
486 DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
487 struct ib_recv_wr rx_wrq; /* receive work item... */
488 struct ib_sge rx_sge; /* ...and its memory */
491 #define IBLND_POSTRX_DONT_POST 0 /* don't post */
492 #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
493 #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
494 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
496 typedef struct kib_tx /* transmit message */
498 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
499 kib_tx_pool_t *tx_pool; /* pool I'm from */
500 struct kib_conn *tx_conn; /* owning conn */
501 short tx_sending; /* # tx callbacks outstanding */
502 short tx_queued; /* queued for sending */
503 short tx_waiting; /* waiting for peer */
504 int tx_status; /* LNET completion status */
505 unsigned long tx_deadline; /* completion deadline */
506 __u64 tx_cookie; /* completion cookie */
507 lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
508 kib_msg_t *tx_msg; /* message buffer (host vaddr) */
509 __u64 tx_msgaddr; /* message buffer (I/O addr) */
510 DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
511 int tx_nwrq; /* # send work items */
512 struct ib_send_wr *tx_wrq; /* send work items... */
513 struct ib_sge *tx_sge; /* ...and their memory */
514 kib_rdma_desc_t *tx_rd; /* rdma descriptor */
515 int tx_nfrags; /* # entries in... */
516 struct scatterlist *tx_frags; /* dma_map_sg descriptor */
517 __u64 *tx_pages; /* rdma phys page addrs */
519 kib_phys_mr_t *pmr; /* MR for physical buffer */
520 kib_fmr_t fmr; /* FMR */
522 int tx_dmadir; /* dma direction */
525 typedef struct kib_connvars
527 /* connection-in-progress variables */
531 typedef struct kib_conn
533 struct kib_peer *ibc_peer; /* owning peer */
534 struct list_head ibc_list; /* stash on peer's conn list */
535 struct list_head ibc_sched_list; /* schedule for attention */
536 __u16 ibc_version; /* version of connection */
537 __u64 ibc_incarnation; /* which instance of the peer */
538 atomic_t ibc_refcount; /* # users */
539 int ibc_state; /* what's happening */
540 int ibc_nsends_posted; /* # uncompleted sends */
541 int ibc_noops_posted; /* # uncompleted NOOPs */
542 int ibc_credits; /* # credits I have */
543 int ibc_outstanding_credits; /* # credits to return */
544 int ibc_reserved_credits;/* # ACK/DONE msg credits */
545 int ibc_comms_error; /* set on comms error */
546 int ibc_nrx:16; /* receive buffers owned */
547 int ibc_scheduled:1; /* scheduled for attention */
548 int ibc_ready:1; /* CQ callback fired */
549 unsigned long ibc_last_send; /* time of last send */
550 struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
551 struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
552 struct list_head ibc_tx_queue; /* sends that need a credit */
553 struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */
554 struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
555 struct list_head ibc_active_txs; /* active tx awaiting completion */
556 spinlock_t ibc_lock; /* serialise */
557 kib_rx_t *ibc_rxs; /* the rx descs */
558 kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
560 struct rdma_cm_id *ibc_cmid; /* CM id */
561 struct ib_cq *ibc_cq; /* completion queue */
563 kib_connvars_t *ibc_connvars; /* in-progress connection state */
566 #define IBLND_CONN_INIT 0 /* being intialised */
567 #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
568 #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
569 #define IBLND_CONN_ESTABLISHED 3 /* connection established */
570 #define IBLND_CONN_CLOSING 4 /* being closed */
571 #define IBLND_CONN_DISCONNECTED 5 /* disconnected */
573 typedef struct kib_peer
575 struct list_head ibp_list; /* stash on global peer list */
576 lnet_nid_t ibp_nid; /* who's on the other end(s) */
577 lnet_ni_t *ibp_ni; /* LNet interface */
578 atomic_t ibp_refcount; /* # users */
579 struct list_head ibp_conns; /* all active connections */
580 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
581 __u16 ibp_version; /* version of peer */
582 __u64 ibp_incarnation; /* incarnation of peer */
583 int ibp_connecting; /* current active connection attempts */
584 int ibp_accepting; /* current passive connection attempts */
585 int ibp_error; /* errno on closing this peer */
586 cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */
589 extern kib_data_t kiblnd_data;
591 #define kiblnd_conn_addref(conn) \
593 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
594 (conn), atomic_read(&(conn)->ibc_refcount)); \
595 LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \
596 atomic_inc(&(conn)->ibc_refcount); \
599 #define kiblnd_conn_decref(conn) \
601 unsigned long flags; \
603 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
604 (conn), atomic_read(&(conn)->ibc_refcount)); \
605 LASSERT(atomic_read(&(conn)->ibc_refcount) > 0); \
606 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
607 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
608 list_add_tail(&(conn)->ibc_list, \
609 &kiblnd_data.kib_connd_zombies); \
610 wake_up(&kiblnd_data.kib_connd_waitq); \
611 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); \
615 #define kiblnd_peer_addref(peer) \
617 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
618 (peer), libcfs_nid2str((peer)->ibp_nid), \
619 atomic_read (&(peer)->ibp_refcount)); \
620 LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
621 atomic_inc(&(peer)->ibp_refcount); \
624 #define kiblnd_peer_decref(peer) \
626 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
627 (peer), libcfs_nid2str((peer)->ibp_nid), \
628 atomic_read (&(peer)->ibp_refcount)); \
629 LASSERT(atomic_read(&(peer)->ibp_refcount) > 0); \
630 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
631 kiblnd_destroy_peer(peer); \
634 static inline struct list_head *
635 kiblnd_nid2peerlist (lnet_nid_t nid)
637 unsigned int hash = ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
639 return (&kiblnd_data.kib_peers [hash]);
643 kiblnd_peer_active (kib_peer_t *peer)
645 /* Am I in the peer hash table? */
646 return (!list_empty(&peer->ibp_list));
649 static inline kib_conn_t *
650 kiblnd_get_conn_locked (kib_peer_t *peer)
652 LASSERT (!list_empty(&peer->ibp_conns));
654 /* just return the first connection */
655 return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
659 kiblnd_send_keepalive(kib_conn_t *conn)
661 return (*kiblnd_tunables.kib_keepalive > 0) &&
662 time_after(jiffies, conn->ibc_last_send +
663 *kiblnd_tunables.kib_keepalive*HZ);
667 kiblnd_send_noop(kib_conn_t *conn)
669 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
671 if (conn->ibc_outstanding_credits <
672 IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
673 !kiblnd_send_keepalive(conn))
674 return 0; /* No need to send NOOP */
676 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
677 if (!list_empty(&conn->ibc_tx_queue_nocred))
678 return 0; /* NOOP can be piggybacked */
680 /* No tx to piggyback NOOP onto or no credit to send a tx */
681 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0);
684 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
685 !list_empty(&conn->ibc_tx_queue_nocred) || /* can be piggybacked */
686 conn->ibc_credits == 0) /* no credit */
689 if (conn->ibc_credits == 1 && /* last credit reserved for */
690 conn->ibc_outstanding_credits == 0) /* giving back credits */
693 /* No tx to piggyback NOOP onto or no credit to send a tx */
694 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
698 kiblnd_abort_receives(kib_conn_t *conn)
700 ib_modify_qp(conn->ibc_cmid->qp,
701 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
704 static inline const char *
705 kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
707 if (q == &conn->ibc_tx_queue)
710 if (q == &conn->ibc_tx_queue_rsrvd)
711 return "tx_queue_rsrvd";
713 if (q == &conn->ibc_tx_queue_nocred)
714 return "tx_queue_nocred";
716 if (q == &conn->ibc_active_txs)
723 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
724 * lowest bits of the work request id to stash the work item type. */
726 #define IBLND_WID_TX 0
727 #define IBLND_WID_RDMA 1
728 #define IBLND_WID_RX 2
729 #define IBLND_WID_MASK 3UL
732 kiblnd_ptr2wreqid (void *ptr, int type)
734 unsigned long lptr = (unsigned long)ptr;
736 LASSERT ((lptr & IBLND_WID_MASK) == 0);
737 LASSERT ((type & ~IBLND_WID_MASK) == 0);
738 return (__u64)(lptr | type);
742 kiblnd_wreqid2ptr (__u64 wreqid)
744 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
748 kiblnd_wreqid2type (__u64 wreqid)
750 return (wreqid & IBLND_WID_MASK);
754 kiblnd_set_conn_state (kib_conn_t *conn, int state)
756 conn->ibc_state = state;
761 kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
763 msg->ibm_type = type;
764 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
768 kiblnd_rd_size (kib_rdma_desc_t *rd)
773 for (i = size = 0; i < rd->rd_nfrags; i++)
774 size += rd->rd_frags[i].rf_nob;
780 kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
782 return rd->rd_frags[index].rf_addr;
786 kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
788 return rd->rd_frags[index].rf_nob;
792 kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
798 kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
800 if (nob < rd->rd_frags[index].rf_nob) {
801 rd->rd_frags[index].rf_addr += nob;
802 rd->rd_frags[index].rf_nob -= nob;
811 kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
813 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
814 msgtype == IBLND_MSG_PUT_ACK);
816 return msgtype == IBLND_MSG_GET_REQ ?
817 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
818 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
821 #ifdef HAVE_OFED_IB_DMA_MAP
824 kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
826 return ib_dma_mapping_error(dev, dma_addr);
829 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
830 void *msg, size_t size,
831 enum dma_data_direction direction)
833 return ib_dma_map_single(dev, msg, size, direction);
836 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
837 __u64 addr, size_t size,
838 enum dma_data_direction direction)
840 ib_dma_unmap_single(dev, addr, size, direction);
843 #define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
844 #define KIBLND_UNMAP_ADDR(p, m, a) (a)
846 static inline int kiblnd_dma_map_sg(struct ib_device *dev,
847 struct scatterlist *sg, int nents,
848 enum dma_data_direction direction)
850 return ib_dma_map_sg(dev, sg, nents, direction);
853 static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
854 struct scatterlist *sg, int nents,
855 enum dma_data_direction direction)
857 ib_dma_unmap_sg(dev, sg, nents, direction);
860 static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
861 struct scatterlist *sg)
863 return ib_sg_dma_address(dev, sg);
866 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
867 struct scatterlist *sg)
869 return ib_sg_dma_len(dev, sg);
872 /* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
873 * right because OFED1.2 defines it as const, to use it we have to add
874 * (void *) cast to overcome "const" */
876 #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
877 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
882 kiblnd_dma_mapping_error(struct ib_device *dev, dma_addr_t dma_addr)
884 return dma_mapping_error(dma_addr);
887 static inline dma_addr_t kiblnd_dma_map_single(struct ib_device *dev,
888 void *msg, size_t size,
889 enum dma_data_direction direction)
891 return dma_map_single(dev->dma_device, msg, size, direction);
894 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
895 dma_addr_t addr, size_t size,
896 enum dma_data_direction direction)
898 dma_unmap_single(dev->dma_device, addr, size, direction);
901 #define KIBLND_UNMAP_ADDR_SET(p, m, a) pci_unmap_addr_set(p, m, a)
902 #define KIBLND_UNMAP_ADDR(p, m, a) pci_unmap_addr(p, m)
904 static inline int kiblnd_dma_map_sg(struct ib_device *dev,
905 struct scatterlist *sg, int nents,
906 enum dma_data_direction direction)
908 return dma_map_sg(dev->dma_device, sg, nents, direction);
911 static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
912 struct scatterlist *sg, int nents,
913 enum dma_data_direction direction)
915 return dma_unmap_sg(dev->dma_device, sg, nents, direction);
919 static inline dma_addr_t kiblnd_sg_dma_address(struct ib_device *dev,
920 struct scatterlist *sg)
922 return sg_dma_address(sg);
926 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
927 struct scatterlist *sg)
929 return sg_dma_len(sg);
932 #define KIBLND_CONN_PARAM(e) ((e)->private_data)
933 #define KIBLND_CONN_PARAM_LEN(e) ((e)->private_data_len)
937 struct ib_mr *kiblnd_find_rd_dma_mr(kib_net_t *net,
938 kib_rdma_desc_t *rd);
939 struct ib_mr *kiblnd_find_dma_mr(kib_net_t *net,
940 __u64 addr, __u64 size);
941 void kiblnd_map_rx_descs(kib_conn_t *conn);
942 void kiblnd_unmap_rx_descs(kib_conn_t *conn);
943 int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
944 kib_rdma_desc_t *rd, int nfrags);
945 void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
946 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
947 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
949 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
950 int npages, __u64 iov, kib_fmr_t *fmr);
951 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
953 int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_rdma_desc_t *rd,
954 __u64 *iova, kib_phys_mr_t **pp_pmr);
955 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
957 int kiblnd_startup (lnet_ni_t *ni);
958 void kiblnd_shutdown (lnet_ni_t *ni);
959 int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
960 void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when);
962 int kiblnd_tunables_init(void);
963 void kiblnd_tunables_fini(void);
965 int kiblnd_connd (void *arg);
966 int kiblnd_scheduler(void *arg);
967 int kiblnd_thread_start (int (*fn)(void *arg), void *arg);
969 int kiblnd_alloc_pages (kib_pages_t **pp, int npages);
970 void kiblnd_free_pages (kib_pages_t *p);
972 int kiblnd_cm_callback(struct rdma_cm_id *cmid,
973 struct rdma_cm_event *event);
974 int kiblnd_translate_mtu(int value);
976 int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
977 void kiblnd_destroy_peer (kib_peer_t *peer);
978 void kiblnd_destroy_dev (kib_dev_t *dev);
979 void kiblnd_unlink_peer_locked (kib_peer_t *peer);
980 void kiblnd_peer_alive (kib_peer_t *peer);
981 kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
982 void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
983 int kiblnd_close_stale_conns_locked (kib_peer_t *peer,
984 int version, __u64 incarnation);
985 int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
987 void kiblnd_connreq_done(kib_conn_t *conn, int status);
988 kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
989 int state, int version);
990 void kiblnd_destroy_conn (kib_conn_t *conn);
991 void kiblnd_close_conn (kib_conn_t *conn, int error);
992 void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
994 int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
995 int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
997 void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
998 void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
999 void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
1000 void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
1001 void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status);
1002 void kiblnd_check_sends (kib_conn_t *conn);
1004 void kiblnd_qp_event(struct ib_event *event, void *arg);
1005 void kiblnd_cq_event(struct ib_event *event, void *arg);
1006 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1008 void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
1009 int credits, lnet_nid_t dstnid, __u64 dststamp);
1010 int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
1011 int kiblnd_post_rx (kib_rx_t *rx, int credit);
1013 int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
1014 int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1015 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1016 unsigned int offset, unsigned int mlen, unsigned int rlen);
1018 /* compat macroses */
1019 #ifndef HAVE_SCATTERLIST_SETPAGE
1020 static inline void sg_set_page(struct scatterlist *sg, struct page *page,
1021 unsigned int len, unsigned int offset)
1024 sg->offset = offset;