4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/o2iblnd/o2iblnd.h
34 * Author: Eric Barton <eric@bartonsoftware.com>
37 #ifdef HAVE_COMPAT_RDMA
38 #include <linux/compat-2.6.h>
40 #ifdef LINUX_3_17_COMPAT_H
41 #undef NEED_KTIME_GET_REAL_NS
46 #include <linux/module.h>
47 #include <linux/kernel.h>
48 #include <linux/kthread.h>
50 #include <linux/string.h>
51 #include <linux/stat.h>
52 #include <linux/errno.h>
53 #include <linux/unistd.h>
54 #include <linux/uio.h>
56 #include <asm/uaccess.h>
59 #include <linux/init.h>
61 #include <linux/file.h>
62 #include <linux/stat.h>
63 #include <linux/list.h>
64 #include <linux/kmod.h>
65 #include <linux/sysctl.h>
66 #include <linux/pci.h>
71 #include <rdma/rdma_cm.h>
72 #include <rdma/ib_cm.h>
73 #include <rdma/ib_verbs.h>
74 #include <rdma/ib_fmr_pool.h>
76 #define DEBUG_SUBSYSTEM S_LND
78 #include <libcfs/libcfs.h>
79 #include <lnet/lib-lnet.h>
81 #define IBLND_PEER_HASH_SIZE 101 /* # peer_ni lists */
82 /* # scheduler loops before reschedule */
83 #define IBLND_RESCHED 100
85 #define IBLND_N_SCHED 2
86 #define IBLND_N_SCHED_HIGH 4
89 int *kib_dev_failover; /* HCA failover */
90 unsigned int *kib_service; /* IB service number */
91 int *kib_cksum; /* checksum struct kib_msg? */
92 int *kib_timeout; /* comms timeout (seconds) */
93 int *kib_keepalive; /* keepalive timeout (seconds) */
94 char **kib_default_ipif; /* default IPoIB interface */
96 int *kib_rnr_retry_count;
97 int *kib_ib_mtu; /* IB MTU */
98 int *kib_require_priv_port;/* accept only privileged ports */
99 int *kib_use_priv_port; /* use privileged port for active connect */
100 /* # threads on each CPT */
102 int *kib_wrq_sge; /* # sg elements per wrq */
103 int *kib_use_fastreg_gaps; /* enable discontiguous fastreg fragment support */
106 extern struct kib_tunables kiblnd_tunables;
108 #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
109 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
111 #define IBLND_CREDITS_DEFAULT 8 /* default # of peer_ni credits */
112 #define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
114 /* when eagerly to return credits */
115 #define IBLND_CREDITS_HIGHWATER(t, conn) ((conn->ibc_version) == IBLND_MSG_VERSION_1 ? \
116 IBLND_CREDIT_HIGHWATER_V1 : \
117 min(t->lnd_peercredits_hiw, (__u32)conn->ibc_queue_depth - 1))
119 #ifdef HAVE_RDMA_CREATE_ID_5ARG
120 # define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
121 rdma_create_id((ns) ? (ns) : &init_net, cb, dev, ps, qpt)
123 # ifdef HAVE_RDMA_CREATE_ID_4ARG
124 # define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
125 rdma_create_id(cb, dev, ps, qpt)
127 # define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
128 rdma_create_id(cb, dev, ps)
132 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
133 #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
134 #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
136 #define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
137 #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
139 /************************/
140 /* derived constants... */
141 /* Pools (shared by connections on each CPT) */
142 /* These pools can grow at runtime, so don't need give a very large value */
143 #define IBLND_TX_POOL 256
144 #define IBLND_FMR_POOL 256
145 #define IBLND_FMR_POOL_FLUSH 192
147 /* RX messages (per connection) */
148 #define IBLND_RX_MSGS(c) \
149 ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
150 #define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
151 #define IBLND_RX_MSG_PAGES(c) \
152 ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
154 /* WRs and CQEs (per connection) */
155 #define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
157 /* 2 = LNet msg + Transfer chain */
158 #define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + kiblnd_send_wrs(c))
162 /* o2iblnd can run over aliased interface */
164 #define KIB_IFNAME_SIZE IFALIASZ
166 #define KIB_IFNAME_SIZE 256
170 IBLND_DEV_CAPS_FASTREG_ENABLED = BIT(0),
171 IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT = BIT(1),
172 IBLND_DEV_CAPS_FMR_ENABLED = BIT(2),
176 struct list_head ibd_list; /* chain on kib_devs */
177 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
178 __u32 ibd_ifip; /* IPoIB interface IP */
179 /** IPoIB interface name */
180 char ibd_ifname[KIB_IFNAME_SIZE];
181 int ibd_nnets; /* # nets extant */
183 time64_t ibd_next_failover;
184 /* # failover failures */
185 int ibd_failed_failover;
186 /* failover in progress */
187 unsigned int ibd_failover;
188 /* IPoIB interface is a bonding master */
189 unsigned int ibd_can_failover;
190 struct list_head ibd_nets;
191 struct kib_hca_dev *ibd_hdev;
192 enum kib_dev_caps ibd_dev_caps;
196 struct rdma_cm_id *ibh_cmid; /* listener cmid */
197 struct ib_device *ibh_ibdev; /* IB device */
198 int ibh_page_shift; /* page shift of current HCA */
199 int ibh_page_size; /* page size of current HCA */
200 __u64 ibh_page_mask; /* page mask of current HCA */
201 __u64 ibh_mr_size; /* size of MR */
202 int ibh_max_qp_wr; /* maximum work requests size */
203 #ifdef HAVE_IB_GET_DMA_MR
204 struct ib_mr *ibh_mrs; /* global MR */
206 struct ib_pd *ibh_pd; /* PD */
207 struct kib_dev *ibh_dev; /* owner */
208 atomic_t ibh_ref; /* refcount */
211 /** # of seconds to keep pool alive */
212 #define IBLND_POOL_DEADLINE 300
213 /** # of seconds to retry if allocation failed */
214 #define IBLND_POOL_RETRY 1
217 int ibp_npages; /* # pages */
218 struct page *ibp_pages[0]; /* page array */
224 typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
225 int inc, struct kib_pool **pp_po);
226 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
227 typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
228 typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
232 #define IBLND_POOL_NAME_LEN 32
237 /* network it belongs to */
238 struct kib_net *ps_net;
240 char ps_name[IBLND_POOL_NAME_LEN];
242 struct list_head ps_pool_list;
243 /* failed pool list */
244 struct list_head ps_failed_pool_list;
245 /* time stamp for retry if failed to allocate */
246 time64_t ps_next_retry;
247 /* is allocating new pool */
254 /* create a new pool */
255 kib_ps_pool_create_t ps_pool_create;
257 kib_ps_pool_destroy_t ps_pool_destroy;
258 /* initialize new allocated node */
259 kib_ps_node_init_t ps_node_init;
261 kib_ps_node_fini_t ps_node_fini;
265 /* chain on pool list */
266 struct list_head po_list;
267 /* pre-allocated node */
268 struct list_head po_free_list;
269 /* pool_set of this pool */
270 struct kib_poolset *po_owner;
271 /* deadline of this pool */
272 time64_t po_deadline;
273 /* # of elements in use */
275 /* pool is created on failed HCA */
277 /* # of pre-allocated elements */
281 struct kib_tx_poolset {
282 struct kib_poolset tps_poolset; /* pool-set */
283 __u64 tps_next_tx_cookie; /* cookie of TX */
287 struct kib_pool tpo_pool; /* pool */
288 struct kib_hca_dev *tpo_hdev; /* device for this pool */
289 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
290 struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */
293 struct kib_fmr_poolset {
294 spinlock_t fps_lock; /* serialize */
295 struct kib_net *fps_net; /* IB network */
296 struct list_head fps_pool_list; /* FMR pool list */
297 struct list_head fps_failed_pool_list; /* FMR pool list */
298 __u64 fps_version; /* validity stamp */
299 int fps_cpt; /* CPT id */
301 int fps_flush_trigger;
303 /* is allocating new pool */
305 /* time stamp for retry if failed to allocate */
306 time64_t fps_next_retry;
309 #ifndef HAVE_IB_RDMA_WR
311 struct ib_send_wr wr;
315 struct kib_fast_reg_descriptor { /* For fast registration */
316 struct list_head frd_list;
317 struct ib_rdma_wr frd_inv_wr;
318 #ifdef HAVE_IB_MAP_MR_SG
319 struct ib_reg_wr frd_fastreg_wr;
321 struct ib_rdma_wr frd_fastreg_wr;
322 struct ib_fast_reg_page_list *frd_frpl;
324 struct ib_mr *frd_mr;
328 struct kib_fmr_pool {
329 struct list_head fpo_list; /* chain on pool list */
330 struct kib_hca_dev *fpo_hdev; /* device for this pool */
331 struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
334 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
336 struct { /* For fast registration */
337 struct list_head fpo_pool_list;
341 time64_t fpo_deadline; /* deadline of this pool */
342 int fpo_failed; /* fmr pool is failed */
343 int fpo_map_count; /* # of mapped FMR */
344 bool fpo_is_fmr; /* True if FMR pools allocated */
348 struct kib_fmr_pool *fmr_pool; /* pool of FMR */
349 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
350 struct kib_fast_reg_descriptor *fmr_frd;
354 #ifdef HAVE_ORACLE_OFED_EXTENSIONS
355 #define kib_fmr_pool_map(pool, pgs, n, iov) \
356 ib_fmr_pool_map_phys((pool), (pgs), (n), (iov), NULL)
358 #define kib_fmr_pool_map(pool, pgs, n, iov) \
359 ib_fmr_pool_map_phys((pool), (pgs), (n), (iov))
363 /* chain on struct kib_dev::ibd_nets */
364 struct list_head ibn_list;
365 __u64 ibn_incarnation;/* my epoch */
366 int ibn_init; /* initialisation state */
367 int ibn_shutdown; /* shutting down? */
369 atomic_t ibn_npeers; /* # peers extant */
370 atomic_t ibn_nconns; /* # connections extant */
372 struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */
373 struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
375 struct kib_dev *ibn_dev; /* underlying IB device */
378 #define KIB_THREAD_SHIFT 16
379 #define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
380 #define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
381 #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
383 struct kib_sched_info {
386 /* schedulers sleep here */
387 wait_queue_head_t ibs_waitq;
388 /* conns to check for rx completions */
389 struct list_head ibs_conns;
390 /* number of scheduler threads */
392 /* max allowed scheduler threads */
393 int ibs_nthreads_max;
394 int ibs_cpt; /* CPT id */
398 int kib_init; /* initialisation state */
399 int kib_shutdown; /* shut down? */
400 struct list_head kib_devs; /* IB devices extant */
401 /* list head of failed devices */
402 struct list_head kib_failed_devs;
403 /* schedulers sleep here */
404 wait_queue_head_t kib_failover_waitq;
405 atomic_t kib_nthreads; /* # live threads */
406 /* stabilize net/dev/peer_ni/conn ops */
407 rwlock_t kib_global_lock;
408 /* hash table of all my known peers */
409 struct list_head *kib_peers;
410 /* size of kib_peers */
411 int kib_peer_hash_size;
412 /* the connd task (serialisation assertions) */
414 /* connections to setup/teardown */
415 struct list_head kib_connd_conns;
416 /* connections with zero refcount */
417 struct list_head kib_connd_zombies;
418 /* connections to reconnect */
419 struct list_head kib_reconn_list;
420 /* peers wait for reconnection */
421 struct list_head kib_reconn_wait;
423 * The second that peers are pulled out from \a kib_reconn_wait
426 time64_t kib_reconn_sec;
427 /* connection daemon sleeps here */
428 wait_queue_head_t kib_connd_waitq;
429 spinlock_t kib_connd_lock; /* serialise */
430 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
431 /* percpt data for schedulers */
432 struct kib_sched_info **kib_scheds;
435 #define IBLND_INIT_NOTHING 0
436 #define IBLND_INIT_DATA 1
437 #define IBLND_INIT_ALL 2
439 /************************************************************************
440 * IB Wire message format.
441 * These are sent in sender's byte order (i.e. receiver flips).
444 struct kib_connparams {
445 __u16 ibcp_queue_depth;
446 __u16 ibcp_max_frags;
447 __u32 ibcp_max_msg_size;
450 struct kib_immediate_msg {
451 struct lnet_hdr ibim_hdr; /* portals header */
452 char ibim_payload[0];/* piggy-backed payload */
455 struct kib_rdma_frag {
456 __u32 rf_nob; /* # bytes this frag */
457 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
460 struct kib_rdma_desc {
461 __u32 rd_key; /* local/remote key */
462 __u32 rd_nfrags; /* # fragments */
463 struct kib_rdma_frag rd_frags[0]; /* buffer frags */
466 struct kib_putreq_msg {
467 struct lnet_hdr ibprm_hdr; /* portals header */
468 __u64 ibprm_cookie; /* opaque completion cookie */
471 struct kib_putack_msg {
472 __u64 ibpam_src_cookie; /* reflected completion cookie */
473 __u64 ibpam_dst_cookie; /* opaque completion cookie */
474 struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */
478 struct lnet_hdr ibgm_hdr; /* portals header */
479 __u64 ibgm_cookie; /* opaque completion cookie */
480 struct kib_rdma_desc ibgm_rd; /* rdma descriptor */
483 struct kib_completion_msg {
484 __u64 ibcm_cookie; /* opaque completion cookie */
485 __s32 ibcm_status; /* < 0 failure: >= 0 length */
489 /* First 2 fields fixed FOR ALL TIME */
490 __u32 ibm_magic; /* I'm an ibnal message */
491 __u16 ibm_version; /* this is my version number */
493 __u8 ibm_type; /* msg type */
494 __u8 ibm_credits; /* returned credits */
495 __u32 ibm_nob; /* # bytes in whole message */
496 __u32 ibm_cksum; /* checksum (0 == no checksum) */
497 __u64 ibm_srcnid; /* sender's NID */
498 __u64 ibm_srcstamp; /* sender's incarnation */
499 __u64 ibm_dstnid; /* destination's NID */
500 __u64 ibm_dststamp; /* destination's incarnation */
503 struct kib_connparams connparams;
504 struct kib_immediate_msg immediate;
505 struct kib_putreq_msg putreq;
506 struct kib_putack_msg putack;
507 struct kib_get_msg get;
508 struct kib_completion_msg completion;
512 #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
514 #define IBLND_MSG_VERSION_1 0x11
515 #define IBLND_MSG_VERSION_2 0x12
516 #define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
518 #define IBLND_MSG_CONNREQ 0xc0 /* connection request */
519 #define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
520 #define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
521 #define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
522 #define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
523 #define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
524 #define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
525 #define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
526 #define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
527 #define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
530 __u32 ibr_magic; /* sender's magic */
531 __u16 ibr_version; /* sender's version */
532 __u8 ibr_why; /* reject reason */
533 __u8 ibr_padding; /* padding */
534 __u64 ibr_incarnation; /* incarnation of peer_ni */
535 struct kib_connparams ibr_cp; /* connection parameters */
538 /* connection rejection reasons */
539 #define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
540 #define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
541 #define IBLND_REJECT_FATAL 3 /* Anything else */
543 #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer_ni */
544 #define IBLND_REJECT_CONN_STALE 5 /* stale peer_ni */
546 /* peer_ni's rdma frags doesn't match mine */
547 #define IBLND_REJECT_RDMA_FRAGS 6
548 /* peer_ni's msg queue size doesn't match mine */
549 #define IBLND_REJECT_MSG_QUEUE_SIZE 7
550 #define IBLND_REJECT_INVALID_SRV_ID 8
552 /***********************************************************************/
554 struct kib_rx { /* receive message */
555 /* queue for attention */
556 struct list_head rx_list;
558 struct kib_conn *rx_conn;
559 /* # bytes received (-1 while posted) */
561 /* message buffer (host vaddr) */
562 struct kib_msg *rx_msg;
563 /* message buffer (I/O addr) */
565 /* for dma_unmap_single() */
566 DEFINE_DMA_UNMAP_ADDR(rx_msgunmap);
567 /* receive work item... */
568 struct ib_recv_wr rx_wrq;
569 /* ...and its memory */
570 struct ib_sge rx_sge;
573 #define IBLND_POSTRX_DONT_POST 0 /* don't post */
574 #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
575 #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer_ni back 1 credit */
576 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
578 struct kib_tx { /* transmit message */
579 /* queue on idle_txs ibc_tx_queue etc. */
580 struct list_head tx_list;
582 struct kib_tx_pool *tx_pool;
584 struct kib_conn *tx_conn;
585 /* # tx callbacks outstanding */
587 /* queued for sending */
589 /* waiting for peer_ni */
591 /* LNET completion status */
593 /* health status of the transmit */
594 enum lnet_msg_hstatus tx_hstatus;
595 /* completion deadline */
597 /* completion cookie */
599 /* lnet msgs to finalize on completion */
600 struct lnet_msg *tx_lntmsg[2];
601 /* message buffer (host vaddr) */
602 struct kib_msg *tx_msg;
603 /* message buffer (I/O addr) */
605 /* for dma_unmap_single() */
606 DEFINE_DMA_UNMAP_ADDR(tx_msgunmap);
607 /** sge for tx_msgaddr */
608 struct ib_sge tx_msgsge;
609 /* # send work items */
611 /* # used scatter/gather elements */
613 /* send work items... */
614 struct ib_rdma_wr *tx_wrq;
615 /* ...and their memory */
616 struct ib_sge *tx_sge;
617 /* rdma descriptor */
618 struct kib_rdma_desc *tx_rd;
619 /* # entries in... */
621 /* dma_map_sg descriptor */
622 struct scatterlist *tx_frags;
623 /* rdma phys page addrs */
625 /* gaps in fragments */
628 struct kib_fmr tx_fmr;
633 struct kib_connvars {
634 /* connection-in-progress variables */
635 struct kib_msg cv_msg;
639 /* scheduler information */
640 struct kib_sched_info *ibc_sched;
642 struct kib_peer_ni *ibc_peer;
644 struct kib_hca_dev *ibc_hdev;
645 /* stash on peer_ni's conn list */
646 struct list_head ibc_list;
647 /* schedule for attention */
648 struct list_head ibc_sched_list;
649 /* version of connection */
651 /* reconnect later */
652 __u16 ibc_reconnect:1;
653 /* which instance of the peer */
654 __u64 ibc_incarnation;
656 atomic_t ibc_refcount;
657 /* what's happening */
659 /* # uncompleted sends */
660 int ibc_nsends_posted;
661 /* # uncompleted NOOPs */
662 int ibc_noops_posted;
663 /* # credits I have */
665 /* # credits to return */
666 int ibc_outstanding_credits;
667 /* # ACK/DONE msg credits */
668 int ibc_reserved_credits;
669 /* set on comms error */
671 /* connections queue depth */
672 __u16 ibc_queue_depth;
673 /* connections max frags */
675 /* receive buffers owned */
676 unsigned int ibc_nrx:16;
677 /* scheduled for attention */
678 unsigned int ibc_scheduled:1;
679 /* CQ callback fired */
680 unsigned int ibc_ready:1;
681 /* time of last send */
682 ktime_t ibc_last_send;
683 /** link chain for kiblnd_check_conns only */
684 struct list_head ibc_connd_list;
685 /** rxs completed before ESTABLISHED */
686 struct list_head ibc_early_rxs;
687 /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
688 struct list_head ibc_tx_noops;
689 /* sends that need a credit */
690 struct list_head ibc_tx_queue;
691 /* sends that don't need a credit */
692 struct list_head ibc_tx_queue_nocred;
693 /* sends that need to reserve an ACK/DONE msg */
694 struct list_head ibc_tx_queue_rsrvd;
695 /* active tx awaiting completion */
696 struct list_head ibc_active_txs;
697 /* zombie tx awaiting done */
698 struct list_head ibc_zombie_txs;
702 struct kib_rx *ibc_rxs;
703 /* premapped rx msg pages */
704 struct kib_pages *ibc_rx_pages;
707 struct rdma_cm_id *ibc_cmid;
708 /* completion queue */
709 struct ib_cq *ibc_cq;
711 /* in-progress connection state */
712 struct kib_connvars *ibc_connvars;
715 #define IBLND_CONN_INIT 0 /* being initialised */
716 #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
717 #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
718 #define IBLND_CONN_ESTABLISHED 3 /* connection established */
719 #define IBLND_CONN_CLOSING 4 /* being closed */
720 #define IBLND_CONN_DISCONNECTED 5 /* disconnected */
723 /* stash on global peer_ni list */
724 struct list_head ibp_list;
725 /* who's on the other end(s) */
728 struct lnet_ni *ibp_ni;
729 /* all active connections */
730 struct list_head ibp_conns;
731 /* next connection to send on for round robin */
732 struct kib_conn *ibp_next_conn;
733 /* msgs waiting for a conn */
734 struct list_head ibp_tx_queue;
735 /* incarnation of peer_ni */
736 __u64 ibp_incarnation;
737 /* when (in seconds) I was last alive */
738 time64_t ibp_last_alive;
740 atomic_t ibp_refcount;
741 /* version of peer_ni */
743 /* current passive connection attempts */
744 unsigned short ibp_accepting;
745 /* current active connection attempts */
746 unsigned short ibp_connecting;
747 /* reconnect this peer_ni later */
748 unsigned char ibp_reconnecting;
749 /* counter of how many times we triggered a conn race */
750 unsigned char ibp_races;
751 /* # consecutive reconnection attempts to this peer */
752 unsigned int ibp_reconnected;
753 /* errno on closing this peer_ni */
755 /* max map_on_demand */
757 /* max_peer_credits */
758 __u16 ibp_queue_depth;
761 #ifndef HAVE_IB_INC_RKEY
763 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
764 * for calculating a new rkey for type 2 memory windows.
765 * @rkey - the rkey to increment.
767 static inline u32 ib_inc_rkey(u32 rkey)
769 const u32 mask = 0x000000ff;
770 return ((rkey + 1) & mask) | (rkey & ~mask);
774 extern struct kib_data kiblnd_data;
776 extern void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
778 int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
781 kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
783 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
784 int concurrent_sends;
786 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
787 concurrent_sends = tunables->lnd_concurrent_sends;
789 if (version == IBLND_MSG_VERSION_1) {
790 if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
791 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
793 if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
794 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
797 return concurrent_sends;
801 kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
803 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
804 atomic_inc(&hdev->ibh_ref);
808 kiblnd_hdev_decref(struct kib_hca_dev *hdev)
810 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
811 if (atomic_dec_and_test(&hdev->ibh_ref))
812 kiblnd_hdev_destroy(hdev);
816 kiblnd_dev_can_failover(struct kib_dev *dev)
818 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
821 if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
824 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
827 return dev->ibd_can_failover;
830 #define kiblnd_conn_addref(conn) \
832 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
833 (conn), atomic_read(&(conn)->ibc_refcount)); \
834 atomic_inc(&(conn)->ibc_refcount); \
837 #define kiblnd_conn_decref(conn) \
839 unsigned long flags; \
841 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
842 (conn), atomic_read(&(conn)->ibc_refcount)); \
843 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
844 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
845 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
846 list_add_tail(&(conn)->ibc_list, \
847 &kiblnd_data.kib_connd_zombies); \
848 wake_up(&kiblnd_data.kib_connd_waitq); \
849 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
853 #define kiblnd_peer_addref(peer_ni) \
855 CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)++\n", \
856 (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \
857 atomic_read (&(peer_ni)->ibp_refcount)); \
858 atomic_inc(&(peer_ni)->ibp_refcount); \
861 #define kiblnd_peer_decref(peer_ni) \
863 CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)--\n", \
864 (peer_ni), libcfs_nid2str((peer_ni)->ibp_nid), \
865 atomic_read (&(peer_ni)->ibp_refcount)); \
866 LASSERT_ATOMIC_POS(&(peer_ni)->ibp_refcount); \
867 if (atomic_dec_and_test(&(peer_ni)->ibp_refcount)) \
868 kiblnd_destroy_peer(peer_ni); \
872 kiblnd_peer_connecting(struct kib_peer_ni *peer_ni)
874 return peer_ni->ibp_connecting != 0 ||
875 peer_ni->ibp_reconnecting != 0 ||
876 peer_ni->ibp_accepting != 0;
880 kiblnd_peer_idle(struct kib_peer_ni *peer_ni)
882 return !kiblnd_peer_connecting(peer_ni) && list_empty(&peer_ni->ibp_conns);
885 static inline struct list_head *
886 kiblnd_nid2peerlist (lnet_nid_t nid)
889 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
891 return &kiblnd_data.kib_peers[hash];
895 kiblnd_peer_active(struct kib_peer_ni *peer_ni)
897 /* Am I in the peer_ni hash table? */
898 return !list_empty(&peer_ni->ibp_list);
901 static inline struct kib_conn *
902 kiblnd_get_conn_locked(struct kib_peer_ni *peer_ni)
904 struct list_head *next;
906 LASSERT(!list_empty(&peer_ni->ibp_conns));
908 /* Advance to next connection, be sure to skip the head node */
909 if (!peer_ni->ibp_next_conn ||
910 peer_ni->ibp_next_conn->ibc_list.next == &peer_ni->ibp_conns)
911 next = peer_ni->ibp_conns.next;
913 next = peer_ni->ibp_next_conn->ibc_list.next;
914 peer_ni->ibp_next_conn = list_entry(next, struct kib_conn, ibc_list);
916 return peer_ni->ibp_next_conn;
920 kiblnd_send_keepalive(struct kib_conn *conn)
922 s64 keepalive_ns = *kiblnd_tunables.kib_keepalive * NSEC_PER_SEC;
924 return (*kiblnd_tunables.kib_keepalive > 0) &&
925 ktime_after(ktime_get(),
926 ktime_add_ns(conn->ibc_last_send, keepalive_ns));
930 kiblnd_need_noop(struct kib_conn *conn)
932 struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
933 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
935 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
936 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
938 if (conn->ibc_outstanding_credits <
939 IBLND_CREDITS_HIGHWATER(tunables, conn) &&
940 !kiblnd_send_keepalive(conn))
941 return 0; /* No need to send NOOP */
943 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
944 if (!list_empty(&conn->ibc_tx_queue_nocred))
945 return 0; /* NOOP can be piggybacked */
947 /* No tx to piggyback NOOP onto or no credit to send a tx */
948 return (list_empty(&conn->ibc_tx_queue) ||
949 conn->ibc_credits == 0);
952 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
953 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
954 conn->ibc_credits == 0) /* no credit */
957 if (conn->ibc_credits == 1 && /* last credit reserved for */
958 conn->ibc_outstanding_credits == 0) /* giving back credits */
961 /* No tx to piggyback NOOP onto or no credit to send a tx */
962 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
966 kiblnd_abort_receives(struct kib_conn *conn)
968 ib_modify_qp(conn->ibc_cmid->qp,
969 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
972 static inline const char *
973 kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
975 if (q == &conn->ibc_tx_queue)
978 if (q == &conn->ibc_tx_queue_rsrvd)
979 return "tx_queue_rsrvd";
981 if (q == &conn->ibc_tx_queue_nocred)
982 return "tx_queue_nocred";
984 if (q == &conn->ibc_active_txs)
991 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
992 * lowest bits of the work request id to stash the work item type. */
994 #define IBLND_WID_INVAL 0
995 #define IBLND_WID_TX 1
996 #define IBLND_WID_RX 2
997 #define IBLND_WID_RDMA 3
998 #define IBLND_WID_MR 4
999 #define IBLND_WID_MASK 7UL
1002 kiblnd_ptr2wreqid (void *ptr, int type)
1004 unsigned long lptr = (unsigned long)ptr;
1006 LASSERT ((lptr & IBLND_WID_MASK) == 0);
1007 LASSERT ((type & ~IBLND_WID_MASK) == 0);
1008 return (__u64)(lptr | type);
1011 static inline void *
1012 kiblnd_wreqid2ptr (__u64 wreqid)
1014 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
1018 kiblnd_wreqid2type (__u64 wreqid)
1020 return (wreqid & IBLND_WID_MASK);
1024 kiblnd_set_conn_state(struct kib_conn *conn, int state)
1026 conn->ibc_state = state;
1031 kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
1033 msg->ibm_type = type;
1034 msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob;
1038 kiblnd_rd_size(struct kib_rdma_desc *rd)
1043 for (i = size = 0; i < rd->rd_nfrags; i++)
1044 size += rd->rd_frags[i].rf_nob;
1050 kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
1052 return rd->rd_frags[index].rf_addr;
1056 kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
1058 return rd->rd_frags[index].rf_nob;
1062 kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
1068 kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
1070 if (nob < rd->rd_frags[index].rf_nob) {
1071 rd->rd_frags[index].rf_addr += nob;
1072 rd->rd_frags[index].rf_nob -= nob;
1081 kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
1083 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
1084 msgtype == IBLND_MSG_PUT_ACK);
1086 return msgtype == IBLND_MSG_GET_REQ ?
1087 offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
1088 offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
1092 kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1094 return ib_dma_mapping_error(dev, dma_addr);
1097 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
1098 void *msg, size_t size,
1099 enum dma_data_direction direction)
1101 return ib_dma_map_single(dev, msg, size, direction);
1104 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
1105 __u64 addr, size_t size,
1106 enum dma_data_direction direction)
1108 ib_dma_unmap_single(dev, addr, size, direction);
1111 #define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
1112 #define KIBLND_UNMAP_ADDR(p, m, a) (a)
1114 static inline int kiblnd_dma_map_sg(struct ib_device *dev,
1115 struct scatterlist *sg, int nents,
1116 enum dma_data_direction direction)
1118 return ib_dma_map_sg(dev, sg, nents, direction);
1121 static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
1122 struct scatterlist *sg, int nents,
1123 enum dma_data_direction direction)
1125 ib_dma_unmap_sg(dev, sg, nents, direction);
1128 #ifndef HAVE_IB_SG_DMA_ADDRESS
1129 #include <linux/scatterlist.h>
1130 #define ib_sg_dma_address(dev, sg) sg_dma_address(sg)
1131 #define ib_sg_dma_len(dev, sg) sg_dma_len(sg)
1134 static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
1135 struct scatterlist *sg)
1137 return ib_sg_dma_address(dev, sg);
1140 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
1141 struct scatterlist *sg)
1143 return ib_sg_dma_len(dev, sg);
1146 /* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
1147 * right because OFED1.2 defines it as const, to use it we have to add
1148 * (void *) cast to overcome "const" */
1150 #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
1151 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
1153 void kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs);
1154 void kiblnd_map_rx_descs(struct kib_conn *conn);
1155 void kiblnd_unmap_rx_descs(struct kib_conn *conn);
1156 void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
1157 struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
1159 int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
1160 struct kib_rdma_desc *rd, u32 nob, u64 iov,
1161 struct kib_fmr *fmr);
1162 void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
1164 int kiblnd_tunables_setup(struct lnet_ni *ni);
1165 int kiblnd_tunables_init(void);
1167 int kiblnd_connd (void *arg);
1168 int kiblnd_scheduler(void *arg);
1169 int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
1170 int kiblnd_failover_thread (void *arg);
1172 int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
1174 int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1175 struct rdma_cm_event *event);
1176 int kiblnd_translate_mtu(int value);
1178 int kiblnd_dev_failover(struct kib_dev *dev, struct net *ns);
1179 int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
1181 void kiblnd_destroy_peer(struct kib_peer_ni *peer);
1182 bool kiblnd_reconnect_peer(struct kib_peer_ni *peer);
1183 void kiblnd_destroy_dev(struct kib_dev *dev);
1184 void kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni);
1185 struct kib_peer_ni *kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid);
1186 int kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
1187 int version, u64 incarnation);
1188 int kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why);
1190 struct kib_conn *kiblnd_create_conn(struct kib_peer_ni *peer_ni,
1191 struct rdma_cm_id *cmid,
1192 int state, int version);
1193 void kiblnd_destroy_conn(struct kib_conn *conn);
1194 void kiblnd_close_conn(struct kib_conn *conn, int error);
1195 void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
1197 void kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid);
1198 void kiblnd_txlist_done(struct list_head *txlist, int status,
1199 enum lnet_msg_hstatus hstatus);
1201 void kiblnd_qp_event(struct ib_event *event, void *arg);
1202 void kiblnd_cq_event(struct ib_event *event, void *arg);
1203 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1205 void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
1206 int credits, lnet_nid_t dstnid, __u64 dststamp);
1207 int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
1208 int kiblnd_post_rx(struct kib_rx *rx, int credit);
1210 int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
1211 int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1212 int delayed, unsigned int niov, struct kvec *iov,
1213 lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,