4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/o2iblnd/o2iblnd.h
34 * Author: Eric Barton <eric@bartonsoftware.com>
37 #ifdef HAVE_COMPAT_RDMA
38 #include <linux/compat-2.6.h>
40 #ifdef LINUX_3_17_COMPAT_H
41 #undef NEED_KTIME_GET_REAL_NS
46 #include <linux/version.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/kthread.h>
51 #include <linux/string.h>
52 #include <linux/stat.h>
53 #include <linux/errno.h>
54 #include <linux/unistd.h>
55 #include <linux/uio.h>
57 #include <asm/uaccess.h>
60 #include <linux/init.h>
62 #include <linux/file.h>
63 #include <linux/stat.h>
64 #include <linux/list.h>
65 #include <linux/kmod.h>
66 #include <linux/sysctl.h>
67 #include <linux/pci.h>
68 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32)
69 #include <linux/pci-dma.h>
75 #include <rdma/rdma_cm.h>
76 #include <rdma/ib_cm.h>
77 #include <rdma/ib_verbs.h>
78 #include <rdma/ib_fmr_pool.h>
80 #define DEBUG_SUBSYSTEM S_LND
82 #include <libcfs/libcfs.h>
83 #include <lnet/lnet.h>
84 #include <lnet/lib-lnet.h>
86 #define IBLND_PEER_HASH_SIZE 101 /* # peer_ni lists */
87 /* # scheduler loops before reschedule */
88 #define IBLND_RESCHED 100
90 #define IBLND_N_SCHED 2
91 #define IBLND_N_SCHED_HIGH 4
95 int *kib_dev_failover; /* HCA failover */
96 unsigned int *kib_service; /* IB service number */
97 int *kib_min_reconnect_interval; /* first failed connection retry... */
98 int *kib_max_reconnect_interval; /* ...exponentially increasing to this */
99 int *kib_cksum; /* checksum kib_msg_t? */
100 int *kib_timeout; /* comms timeout (seconds) */
101 int *kib_keepalive; /* keepalive timeout (seconds) */
102 int *kib_ntx; /* # tx descs */
103 char **kib_default_ipif; /* default IPoIB interface */
104 int *kib_retry_count;
105 int *kib_rnr_retry_count;
106 int *kib_ib_mtu; /* IB MTU */
107 int *kib_require_priv_port;/* accept only privileged ports */
108 int *kib_use_priv_port; /* use privileged port for active connect */
109 /* # threads on each CPT */
111 int *kib_wrq_sge; /* # sg elements per wrq */
114 extern kib_tunables_t kiblnd_tunables;
116 #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
117 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
119 #define IBLND_CREDITS_DEFAULT 8 /* default # of peer_ni credits */
120 #define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
122 /* when eagerly to return credits */
123 #define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
124 IBLND_CREDIT_HIGHWATER_V1 : \
125 t->lnd_peercredits_hiw)
127 #ifdef HAVE_RDMA_CREATE_ID_5ARG
128 # define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
132 # ifdef HAVE_RDMA_CREATE_ID_4ARG
133 # define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, \
136 # define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps)
140 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
141 #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
142 #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
144 #define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
145 #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
147 /************************/
148 /* derived constants... */
149 /* Pools (shared by connections on each CPT) */
150 /* These pools can grow at runtime, so don't need give a very large value */
151 #define IBLND_TX_POOL 256
152 #define IBLND_FMR_POOL 256
153 #define IBLND_FMR_POOL_FLUSH 192
155 /* RX messages (per connection) */
156 #define IBLND_RX_MSGS(c) \
157 ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
158 #define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
159 #define IBLND_RX_MSG_PAGES(c) \
160 ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
162 /* WRs and CQEs (per connection) */
163 #define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
164 #define IBLND_SEND_WRS(c) \
165 ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
166 c->ibc_peer->ibp_ni))
167 #define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
171 /* o2iblnd can run over aliased interface */
173 #define KIB_IFNAME_SIZE IFALIASZ
175 #define KIB_IFNAME_SIZE 256
180 struct list_head ibd_list; /* chain on kib_devs */
181 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
182 __u32 ibd_ifip; /* IPoIB interface IP */
183 /** IPoIB interface name */
184 char ibd_ifname[KIB_IFNAME_SIZE];
185 int ibd_nnets; /* # nets extant */
187 cfs_time_t ibd_next_failover;
188 /* # failover failures */
189 int ibd_failed_failover;
190 /* failover in progress */
191 unsigned int ibd_failover;
192 /* IPoIB interface is a bonding master */
193 unsigned int ibd_can_failover;
194 struct list_head ibd_nets;
195 struct kib_hca_dev *ibd_hdev;
198 typedef struct kib_hca_dev
200 struct rdma_cm_id *ibh_cmid; /* listener cmid */
201 struct ib_device *ibh_ibdev; /* IB device */
202 int ibh_page_shift; /* page shift of current HCA */
203 int ibh_page_size; /* page size of current HCA */
204 __u64 ibh_page_mask; /* page mask of current HCA */
205 int ibh_mr_shift; /* bits shift of max MR size */
206 __u64 ibh_mr_size; /* size of MR */
207 #ifdef HAVE_IB_GET_DMA_MR
208 struct ib_mr *ibh_mrs; /* global MR */
210 struct ib_pd *ibh_pd; /* PD */
211 kib_dev_t *ibh_dev; /* owner */
212 atomic_t ibh_ref; /* refcount */
215 /** # of seconds to keep pool alive */
216 #define IBLND_POOL_DEADLINE 300
217 /** # of seconds to retry if allocation failed */
218 #define IBLND_POOL_RETRY 1
222 int ibp_npages; /* # pages */
223 struct page *ibp_pages[0]; /* page array */
229 typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
230 int inc, struct kib_pool **pp_po);
231 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
232 typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
233 typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
237 #define IBLND_POOL_NAME_LEN 32
239 typedef struct kib_poolset
243 /* network it belongs to */
244 struct kib_net *ps_net;
246 char ps_name[IBLND_POOL_NAME_LEN];
248 struct list_head ps_pool_list;
249 /* failed pool list */
250 struct list_head ps_failed_pool_list;
251 /* time stamp for retry if failed to allocate */
252 cfs_time_t ps_next_retry;
253 /* is allocating new pool */
260 /* create a new pool */
261 kib_ps_pool_create_t ps_pool_create;
263 kib_ps_pool_destroy_t ps_pool_destroy;
264 /* initialize new allocated node */
265 kib_ps_node_init_t ps_node_init;
267 kib_ps_node_fini_t ps_node_fini;
270 typedef struct kib_pool
272 /* chain on pool list */
273 struct list_head po_list;
274 /* pre-allocated node */
275 struct list_head po_free_list;
276 /* pool_set of this pool */
277 kib_poolset_t *po_owner;
278 /* deadline of this pool */
279 cfs_time_t po_deadline;
280 /* # of elements in use */
282 /* pool is created on failed HCA */
284 /* # of pre-allocated elements */
289 kib_poolset_t tps_poolset; /* pool-set */
290 __u64 tps_next_tx_cookie; /* cookie of TX */
294 kib_pool_t tpo_pool; /* pool */
295 struct kib_hca_dev *tpo_hdev; /* device for this pool */
296 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
297 kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
302 spinlock_t fps_lock; /* serialize */
303 struct kib_net *fps_net; /* IB network */
304 struct list_head fps_pool_list; /* FMR pool list */
305 struct list_head fps_failed_pool_list; /* FMR pool list */
306 __u64 fps_version; /* validity stamp */
307 int fps_cpt; /* CPT id */
309 int fps_flush_trigger;
311 /* is allocating new pool */
313 /* time stamp for retry if failed to allocate */
314 cfs_time_t fps_next_retry;
317 #ifndef HAVE_IB_RDMA_WR
319 struct ib_send_wr wr;
323 struct kib_fast_reg_descriptor { /* For fast registration */
324 struct list_head frd_list;
325 struct ib_rdma_wr frd_inv_wr;
326 #ifdef HAVE_IB_MAP_MR_SG
327 struct ib_reg_wr frd_fastreg_wr;
329 struct ib_rdma_wr frd_fastreg_wr;
330 struct ib_fast_reg_page_list *frd_frpl;
332 struct ib_mr *frd_mr;
338 struct list_head fpo_list; /* chain on pool list */
339 struct kib_hca_dev *fpo_hdev; /* device for this pool */
340 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
343 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
345 struct { /* For fast registration */
346 struct list_head fpo_pool_list;
350 cfs_time_t fpo_deadline; /* deadline of this pool */
351 int fpo_failed; /* fmr pool is failed */
352 int fpo_map_count; /* # of mapped FMR */
357 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
358 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
359 struct kib_fast_reg_descriptor *fmr_frd;
363 typedef struct kib_net
365 /* chain on kib_dev_t::ibd_nets */
366 struct list_head ibn_list;
367 __u64 ibn_incarnation;/* my epoch */
368 int ibn_init; /* initialisation state */
369 int ibn_shutdown; /* shutting down? */
371 atomic_t ibn_npeers; /* # peers extant */
372 atomic_t ibn_nconns; /* # connections extant */
374 kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
375 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
377 kib_dev_t *ibn_dev; /* underlying IB device */
380 #define KIB_THREAD_SHIFT 16
381 #define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
382 #define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
383 #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
385 struct kib_sched_info {
388 /* schedulers sleep here */
389 wait_queue_head_t ibs_waitq;
390 /* conns to check for rx completions */
391 struct list_head ibs_conns;
392 /* number of scheduler threads */
394 /* max allowed scheduler threads */
395 int ibs_nthreads_max;
396 int ibs_cpt; /* CPT id */
401 int kib_init; /* initialisation state */
402 int kib_shutdown; /* shut down? */
403 struct list_head kib_devs; /* IB devices extant */
404 /* list head of failed devices */
405 struct list_head kib_failed_devs;
406 /* schedulers sleep here */
407 wait_queue_head_t kib_failover_waitq;
408 atomic_t kib_nthreads; /* # live threads */
409 /* stabilize net/dev/peer_ni/conn ops */
410 rwlock_t kib_global_lock;
411 /* hash table of all my known peers */
412 struct list_head *kib_peers;
413 /* size of kib_peers */
414 int kib_peer_hash_size;
415 /* the connd task (serialisation assertions) */
417 /* connections to setup/teardown */
418 struct list_head kib_connd_conns;
419 /* connections with zero refcount */
420 struct list_head kib_connd_zombies;
421 /* connections to reconnect */
422 struct list_head kib_reconn_list;
423 /* peers wait for reconnection */
424 struct list_head kib_reconn_wait;
426 * The second that peers are pulled out from \a kib_reconn_wait
429 time64_t kib_reconn_sec;
430 /* connection daemon sleeps here */
431 wait_queue_head_t kib_connd_waitq;
432 spinlock_t kib_connd_lock; /* serialise */
433 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
434 /* percpt data for schedulers */
435 struct kib_sched_info **kib_scheds;
438 #define IBLND_INIT_NOTHING 0
439 #define IBLND_INIT_DATA 1
440 #define IBLND_INIT_ALL 2
442 /************************************************************************
443 * IB Wire message format.
444 * These are sent in sender's byte order (i.e. receiver flips).
447 typedef struct kib_connparams
449 __u16 ibcp_queue_depth;
450 __u16 ibcp_max_frags;
451 __u32 ibcp_max_msg_size;
452 } WIRE_ATTR kib_connparams_t;
456 struct lnet_hdr ibim_hdr; /* portals header */
457 char ibim_payload[0];/* piggy-backed payload */
458 } WIRE_ATTR kib_immediate_msg_t;
462 __u32 rf_nob; /* # bytes this frag */
463 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
464 } WIRE_ATTR kib_rdma_frag_t;
468 __u32 rd_key; /* local/remote key */
469 __u32 rd_nfrags; /* # fragments */
470 kib_rdma_frag_t rd_frags[0]; /* buffer frags */
471 } WIRE_ATTR kib_rdma_desc_t;
475 struct lnet_hdr ibprm_hdr; /* portals header */
476 __u64 ibprm_cookie; /* opaque completion cookie */
477 } WIRE_ATTR kib_putreq_msg_t;
481 __u64 ibpam_src_cookie; /* reflected completion cookie */
482 __u64 ibpam_dst_cookie; /* opaque completion cookie */
483 kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
484 } WIRE_ATTR kib_putack_msg_t;
488 struct lnet_hdr ibgm_hdr; /* portals header */
489 __u64 ibgm_cookie; /* opaque completion cookie */
490 kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
491 } WIRE_ATTR kib_get_msg_t;
495 __u64 ibcm_cookie; /* opaque completion cookie */
496 __s32 ibcm_status; /* < 0 failure: >= 0 length */
497 } WIRE_ATTR kib_completion_msg_t;
501 /* First 2 fields fixed FOR ALL TIME */
502 __u32 ibm_magic; /* I'm an ibnal message */
503 __u16 ibm_version; /* this is my version number */
505 __u8 ibm_type; /* msg type */
506 __u8 ibm_credits; /* returned credits */
507 __u32 ibm_nob; /* # bytes in whole message */
508 __u32 ibm_cksum; /* checksum (0 == no checksum) */
509 __u64 ibm_srcnid; /* sender's NID */
510 __u64 ibm_srcstamp; /* sender's incarnation */
511 __u64 ibm_dstnid; /* destination's NID */
512 __u64 ibm_dststamp; /* destination's incarnation */
515 kib_connparams_t connparams;
516 kib_immediate_msg_t immediate;
517 kib_putreq_msg_t putreq;
518 kib_putack_msg_t putack;
520 kib_completion_msg_t completion;
522 } WIRE_ATTR kib_msg_t;
524 #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
526 #define IBLND_MSG_VERSION_1 0x11
527 #define IBLND_MSG_VERSION_2 0x12
528 #define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
530 #define IBLND_MSG_CONNREQ 0xc0 /* connection request */
531 #define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
532 #define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
533 #define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
534 #define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
535 #define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
536 #define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
537 #define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
538 #define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
539 #define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
542 __u32 ibr_magic; /* sender's magic */
543 __u16 ibr_version; /* sender's version */
544 __u8 ibr_why; /* reject reason */
545 __u8 ibr_padding; /* padding */
546 __u64 ibr_incarnation; /* incarnation of peer_ni */
547 kib_connparams_t ibr_cp; /* connection parameters */
548 } WIRE_ATTR kib_rej_t;
550 /* connection rejection reasons */
551 #define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
552 #define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
553 #define IBLND_REJECT_FATAL 3 /* Anything else */
555 #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer_ni */
556 #define IBLND_REJECT_CONN_STALE 5 /* stale peer_ni */
558 /* peer_ni's rdma frags doesn't match mine */
559 #define IBLND_REJECT_RDMA_FRAGS 6
560 /* peer_ni's msg queue size doesn't match mine */
561 #define IBLND_REJECT_MSG_QUEUE_SIZE 7
562 #define IBLND_REJECT_INVALID_SRV_ID 8
564 /***********************************************************************/
566 typedef struct kib_rx /* receive message */
568 /* queue for attention */
569 struct list_head rx_list;
571 struct kib_conn *rx_conn;
572 /* # bytes received (-1 while posted) */
574 /* completion status */
575 enum ib_wc_status rx_status;
576 /* message buffer (host vaddr) */
578 /* message buffer (I/O addr) */
580 /* for dma_unmap_single() */
581 DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);
582 /* receive work item... */
583 struct ib_recv_wr rx_wrq;
584 /* ...and its memory */
585 struct ib_sge rx_sge;
588 #define IBLND_POSTRX_DONT_POST 0 /* don't post */
589 #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
590 #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer_ni back 1 credit */
591 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
593 typedef struct kib_tx /* transmit message */
595 /* queue on idle_txs ibc_tx_queue etc. */
596 struct list_head tx_list;
598 kib_tx_pool_t *tx_pool;
600 struct kib_conn *tx_conn;
601 /* # tx callbacks outstanding */
603 /* queued for sending */
605 /* waiting for peer_ni */
607 /* LNET completion status */
609 /* completion deadline */
610 unsigned long tx_deadline;
611 /* completion cookie */
613 /* lnet msgs to finalize on completion */
614 struct lnet_msg *tx_lntmsg[2];
615 /* message buffer (host vaddr) */
617 /* message buffer (I/O addr) */
619 /* for dma_unmap_single() */
620 DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);
621 /** sge for tx_msgaddr */
622 struct ib_sge tx_msgsge;
623 /* # send work items */
625 /* # used scatter/gather elements */
627 /* send work items... */
628 struct ib_rdma_wr *tx_wrq;
629 /* ...and their memory */
630 struct ib_sge *tx_sge;
631 /* rdma descriptor */
632 kib_rdma_desc_t *tx_rd;
633 /* # entries in... */
635 /* dma_map_sg descriptor */
636 struct scatterlist *tx_frags;
637 /* rdma phys page addrs */
645 typedef struct kib_connvars
647 /* connection-in-progress variables */
651 typedef struct kib_conn
653 /* scheduler information */
654 struct kib_sched_info *ibc_sched;
656 struct kib_peer *ibc_peer;
658 kib_hca_dev_t *ibc_hdev;
659 /* stash on peer_ni's conn list */
660 struct list_head ibc_list;
661 /* schedule for attention */
662 struct list_head ibc_sched_list;
663 /* version of connection */
665 /* reconnect later */
666 __u16 ibc_reconnect:1;
667 /* which instance of the peer */
668 __u64 ibc_incarnation;
670 atomic_t ibc_refcount;
671 /* what's happening */
673 /* # uncompleted sends */
674 int ibc_nsends_posted;
675 /* # uncompleted NOOPs */
676 int ibc_noops_posted;
677 /* # credits I have */
679 /* # credits to return */
680 int ibc_outstanding_credits;
681 /* # ACK/DONE msg credits */
682 int ibc_reserved_credits;
683 /* set on comms error */
685 /* connections queue depth */
686 __u16 ibc_queue_depth;
687 /* connections max frags */
689 /* receive buffers owned */
690 unsigned int ibc_nrx:16;
691 /* scheduled for attention */
692 unsigned int ibc_scheduled:1;
693 /* CQ callback fired */
694 unsigned int ibc_ready:1;
695 /* time of last send */
696 unsigned long ibc_last_send;
697 /** link chain for kiblnd_check_conns only */
698 struct list_head ibc_connd_list;
699 /** rxs completed before ESTABLISHED */
700 struct list_head ibc_early_rxs;
701 /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
702 struct list_head ibc_tx_noops;
703 /* sends that need a credit */
704 struct list_head ibc_tx_queue;
705 /* sends that don't need a credit */
706 struct list_head ibc_tx_queue_nocred;
707 /* sends that need to reserve an ACK/DONE msg */
708 struct list_head ibc_tx_queue_rsrvd;
709 /* active tx awaiting completion */
710 struct list_head ibc_active_txs;
715 /* premapped rx msg pages */
716 kib_pages_t *ibc_rx_pages;
719 struct rdma_cm_id *ibc_cmid;
720 /* completion queue */
721 struct ib_cq *ibc_cq;
723 /* in-progress connection state */
724 kib_connvars_t *ibc_connvars;
727 #define IBLND_CONN_INIT 0 /* being initialised */
728 #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
729 #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
730 #define IBLND_CONN_ESTABLISHED 3 /* connection established */
731 #define IBLND_CONN_CLOSING 4 /* being closed */
732 #define IBLND_CONN_DISCONNECTED 5 /* disconnected */
734 typedef struct kib_peer
736 /* stash on global peer_ni list */
737 struct list_head ibp_list;
738 /* who's on the other end(s) */
741 struct lnet_ni *ibp_ni;
742 /* all active connections */
743 struct list_head ibp_conns;
744 /* next connection to send on for round robin */
745 struct kib_conn *ibp_next_conn;
746 /* msgs waiting for a conn */
747 struct list_head ibp_tx_queue;
748 /* incarnation of peer_ni */
749 __u64 ibp_incarnation;
750 /* when (in jiffies) I was last alive */
751 cfs_time_t ibp_last_alive;
753 atomic_t ibp_refcount;
754 /* version of peer_ni */
756 /* current passive connection attempts */
757 unsigned short ibp_accepting;
758 /* current active connection attempts */
759 unsigned short ibp_connecting;
760 /* reconnect this peer_ni later */
761 unsigned char ibp_reconnecting;
762 /* counter of how many times we triggered a conn race */
763 unsigned char ibp_races;
764 /* # consecutive reconnection attempts to this peer */
765 unsigned int ibp_reconnected;
766 /* errno on closing this peer_ni */
768 /* max map_on_demand */
770 /* max_peer_credits */
771 __u16 ibp_queue_depth;
774 #ifndef HAVE_IB_INC_RKEY
776 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
777 * for calculating a new rkey for type 2 memory windows.
778 * @rkey - the rkey to increment.
780 static inline u32 ib_inc_rkey(u32 rkey)
782 const u32 mask = 0x000000ff;
783 return ((rkey + 1) & mask) | (rkey & ~mask);
787 extern kib_data_t kiblnd_data;
789 extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
791 int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
793 /* max # of fragments configured by user */
795 kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
797 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
800 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
801 mod = tunables->lnd_map_on_demand;
802 return mod != 0 ? mod : IBLND_MAX_RDMA_FRAGS;
806 kiblnd_rdma_frags(int version, struct lnet_ni *ni)
808 return version == IBLND_MSG_VERSION_1 ?
809 IBLND_MAX_RDMA_FRAGS :
810 kiblnd_cfg_rdma_frags(ni);
814 kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
816 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
817 int concurrent_sends;
819 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
820 concurrent_sends = tunables->lnd_concurrent_sends;
822 if (version == IBLND_MSG_VERSION_1) {
823 if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
824 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
826 if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
827 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
830 return concurrent_sends;
834 kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
836 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
837 atomic_inc(&hdev->ibh_ref);
841 kiblnd_hdev_decref(kib_hca_dev_t *hdev)
843 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
844 if (atomic_dec_and_test(&hdev->ibh_ref))
845 kiblnd_hdev_destroy(hdev);
849 kiblnd_dev_can_failover(kib_dev_t *dev)
851 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
854 if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
857 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
860 return dev->ibd_can_failover;
863 #define kiblnd_conn_addref(conn) \
865 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
866 (conn), atomic_read(&(conn)->ibc_refcount)); \
867 atomic_inc(&(conn)->ibc_refcount); \
870 #define kiblnd_conn_decref(conn) \
872 unsigned long flags; \
874 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
875 (conn), atomic_read(&(conn)->ibc_refcount)); \
876 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
877 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
878 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
879 list_add_tail(&(conn)->ibc_list, \
880 &kiblnd_data.kib_connd_zombies); \
881 wake_up(&kiblnd_data.kib_connd_waitq); \
882 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
886 #define kiblnd_peer_addref(peer_ni) \
888 CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)++\n", \
889 (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \
890 atomic_read (&(peer_ni)->ibp_refcount)); \
891 atomic_inc(&(peer_ni)->ibp_refcount); \
894 #define kiblnd_peer_decref(peer_ni) \
896 CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)--\n", \
897 (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \
898 atomic_read (&(peer_ni)->ibp_refcount)); \
899 LASSERT_ATOMIC_POS(&(peer_ni)->ibp_refcount); \
900 if (atomic_dec_and_test(&(peer_ni)->ibp_refcount)) \
901 kiblnd_destroy_peer(peer_ni); \
905 kiblnd_peer_connecting(kib_peer_ni_t *peer_ni)
907 return peer_ni->ibp_connecting != 0 ||
908 peer_ni->ibp_reconnecting != 0 ||
909 peer_ni->ibp_accepting != 0;
913 kiblnd_peer_idle(kib_peer_ni_t *peer_ni)
915 return !kiblnd_peer_connecting(peer_ni) && list_empty(&peer_ni->ibp_conns);
918 static inline struct list_head *
919 kiblnd_nid2peerlist (lnet_nid_t nid)
922 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
924 return &kiblnd_data.kib_peers[hash];
928 kiblnd_peer_active (kib_peer_ni_t *peer_ni)
930 /* Am I in the peer_ni hash table? */
931 return !list_empty(&peer_ni->ibp_list);
934 static inline struct kib_conn *
935 kiblnd_get_conn_locked (kib_peer_ni_t *peer_ni)
937 struct list_head *next;
939 LASSERT(!list_empty(&peer_ni->ibp_conns));
941 /* Advance to next connection, be sure to skip the head node */
942 if (!peer_ni->ibp_next_conn ||
943 peer_ni->ibp_next_conn->ibc_list.next == &peer_ni->ibp_conns)
944 next = peer_ni->ibp_conns.next;
946 next = peer_ni->ibp_next_conn->ibc_list.next;
947 peer_ni->ibp_next_conn = list_entry(next, struct kib_conn, ibc_list);
949 return peer_ni->ibp_next_conn;
953 kiblnd_send_keepalive(kib_conn_t *conn)
955 return (*kiblnd_tunables.kib_keepalive > 0) &&
956 cfs_time_after(jiffies, conn->ibc_last_send +
957 msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
962 kiblnd_need_noop(kib_conn_t *conn)
964 struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
965 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
967 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
968 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
970 if (conn->ibc_outstanding_credits <
971 IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
972 !kiblnd_send_keepalive(conn))
973 return 0; /* No need to send NOOP */
975 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
976 if (!list_empty(&conn->ibc_tx_queue_nocred))
977 return 0; /* NOOP can be piggybacked */
979 /* No tx to piggyback NOOP onto or no credit to send a tx */
980 return (list_empty(&conn->ibc_tx_queue) ||
981 conn->ibc_credits == 0);
984 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
985 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
986 conn->ibc_credits == 0) /* no credit */
989 if (conn->ibc_credits == 1 && /* last credit reserved for */
990 conn->ibc_outstanding_credits == 0) /* giving back credits */
993 /* No tx to piggyback NOOP onto or no credit to send a tx */
994 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
998 kiblnd_abort_receives(kib_conn_t *conn)
1000 ib_modify_qp(conn->ibc_cmid->qp,
1001 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
1004 static inline const char *
1005 kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
1007 if (q == &conn->ibc_tx_queue)
1010 if (q == &conn->ibc_tx_queue_rsrvd)
1011 return "tx_queue_rsrvd";
1013 if (q == &conn->ibc_tx_queue_nocred)
1014 return "tx_queue_nocred";
1016 if (q == &conn->ibc_active_txs)
1017 return "active_txs";
1023 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
1024 * lowest bits of the work request id to stash the work item type. */
1026 #define IBLND_WID_INVAL 0
1027 #define IBLND_WID_TX 1
1028 #define IBLND_WID_RX 2
1029 #define IBLND_WID_RDMA 3
1030 #define IBLND_WID_MR 4
1031 #define IBLND_WID_MASK 7UL
1034 kiblnd_ptr2wreqid (void *ptr, int type)
1036 unsigned long lptr = (unsigned long)ptr;
1038 LASSERT ((lptr & IBLND_WID_MASK) == 0);
1039 LASSERT ((type & ~IBLND_WID_MASK) == 0);
1040 return (__u64)(lptr | type);
1043 static inline void *
1044 kiblnd_wreqid2ptr (__u64 wreqid)
1046 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
1050 kiblnd_wreqid2type (__u64 wreqid)
1052 return (wreqid & IBLND_WID_MASK);
1056 kiblnd_set_conn_state (kib_conn_t *conn, int state)
1058 conn->ibc_state = state;
1063 kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
1065 msg->ibm_type = type;
1066 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
1070 kiblnd_rd_size (kib_rdma_desc_t *rd)
1075 for (i = size = 0; i < rd->rd_nfrags; i++)
1076 size += rd->rd_frags[i].rf_nob;
1082 kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
1084 return rd->rd_frags[index].rf_addr;
1088 kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
1090 return rd->rd_frags[index].rf_nob;
1094 kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
1100 kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
1102 if (nob < rd->rd_frags[index].rf_nob) {
1103 rd->rd_frags[index].rf_addr += nob;
1104 rd->rd_frags[index].rf_nob -= nob;
1113 kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
1115 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
1116 msgtype == IBLND_MSG_PUT_ACK);
1118 return msgtype == IBLND_MSG_GET_REQ ?
1119 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
1120 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
1124 kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1126 return ib_dma_mapping_error(dev, dma_addr);
1129 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
1130 void *msg, size_t size,
1131 enum dma_data_direction direction)
1133 return ib_dma_map_single(dev, msg, size, direction);
1136 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
1137 __u64 addr, size_t size,
1138 enum dma_data_direction direction)
1140 ib_dma_unmap_single(dev, addr, size, direction);
1143 #define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
1144 #define KIBLND_UNMAP_ADDR(p, m, a) (a)
1146 static inline int kiblnd_dma_map_sg(struct ib_device *dev,
1147 struct scatterlist *sg, int nents,
1148 enum dma_data_direction direction)
1150 return ib_dma_map_sg(dev, sg, nents, direction);
1153 static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
1154 struct scatterlist *sg, int nents,
1155 enum dma_data_direction direction)
1157 ib_dma_unmap_sg(dev, sg, nents, direction);
1160 static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
1161 struct scatterlist *sg)
1163 return ib_sg_dma_address(dev, sg);
1166 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
1167 struct scatterlist *sg)
1169 return ib_sg_dma_len(dev, sg);
1172 /* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
1173 * right because OFED1.2 defines it as const, to use it we have to add
1174 * (void *) cast to overcome "const" */
1176 #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
1177 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
1179 #ifdef HAVE_IB_GET_DMA_MR
1180 struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
1181 int negotiated_nfrags);
1183 void kiblnd_map_rx_descs(kib_conn_t *conn);
1184 void kiblnd_unmap_rx_descs(kib_conn_t *conn);
1185 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
1186 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
1188 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
1189 kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
1190 kib_fmr_t *fmr, bool *is_fastreg);
1191 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
1193 int kiblnd_tunables_setup(struct lnet_ni *ni);
1194 int kiblnd_tunables_init(void);
1196 int kiblnd_connd (void *arg);
1197 int kiblnd_scheduler(void *arg);
1198 int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
1199 int kiblnd_failover_thread (void *arg);
1201 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
1203 int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1204 struct rdma_cm_event *event);
1205 int kiblnd_translate_mtu(int value);
1207 int kiblnd_dev_failover(kib_dev_t *dev);
1208 int kiblnd_create_peer(struct lnet_ni *ni, kib_peer_ni_t **peerp,
1210 void kiblnd_destroy_peer (kib_peer_ni_t *peer);
1211 bool kiblnd_reconnect_peer(kib_peer_ni_t *peer);
1212 void kiblnd_destroy_dev (kib_dev_t *dev);
1213 void kiblnd_unlink_peer_locked (kib_peer_ni_t *peer_ni);
1214 kib_peer_ni_t *kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid);
1215 int kiblnd_close_stale_conns_locked (kib_peer_ni_t *peer_ni,
1216 int version, __u64 incarnation);
1217 int kiblnd_close_peer_conns_locked (kib_peer_ni_t *peer_ni, int why);
1219 kib_conn_t *kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid,
1220 int state, int version);
1221 void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
1222 void kiblnd_close_conn (kib_conn_t *conn, int error);
1223 void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
1225 void kiblnd_launch_tx(struct lnet_ni *ni, kib_tx_t *tx, lnet_nid_t nid);
1226 void kiblnd_txlist_done(struct list_head *txlist, int status);
1228 void kiblnd_qp_event(struct ib_event *event, void *arg);
1229 void kiblnd_cq_event(struct ib_event *event, void *arg);
1230 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1232 void kiblnd_pack_msg(struct lnet_ni *ni, kib_msg_t *msg, int version,
1233 int credits, lnet_nid_t dstnid, __u64 dststamp);
1234 int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
1235 int kiblnd_post_rx (kib_rx_t *rx, int credit);
1237 int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
1238 int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1239 int delayed, unsigned int niov, struct kvec *iov,
1240 lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,