4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lnet/klnds/o2iblnd/o2iblnd.h
33 * Author: Eric Barton <eric@bartonsoftware.com>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #if defined(MLNX_OFED_BUILD) && !defined(HAVE_SANE_IB_DMA_MAP_SG)
40 #undef CONFIG_INFINIBAND_VIRT_DMA
43 #if defined(NEED_LOCKDEP_IS_HELD_DISCARD_CONST) \
44 && defined(CONFIG_LOCKDEP) \
45 && defined(lockdep_is_held)
46 #undef lockdep_is_held
47 #define lockdep_is_held(lock) \
48 lock_is_held((struct lockdep_map *)&(lock)->dep_map)
51 #ifdef HAVE_COMPAT_RDMA
52 #include <linux/compat-2.6.h>
54 #ifdef LINUX_3_17_COMPAT_H
55 #undef NEED_KTIME_GET_REAL_NS
58 #define HAVE_NLA_PUT_U64_64BIT 1
59 #define HAVE_NLA_PARSE_6_PARAMS 1
60 #define HAVE_NETLINK_EXTACK 1
62 /* MOFED has its own bitmap_alloc backport */
63 #define HAVE_BITMAP_ALLOC 1
67 #include <linux/kthread.h>
69 #include <linux/string.h>
70 #include <linux/stat.h>
71 #include <linux/errno.h>
72 #include <linux/unistd.h>
73 #include <linux/uio.h>
75 #include <asm/uaccess.h>
78 #include <linux/init.h>
80 #include <linux/file.h>
81 #include <linux/stat.h>
82 #include <linux/list.h>
83 #include <linux/kmod.h>
84 #include <linux/sysctl.h>
85 #include <linux/pci.h>
90 #include <rdma/rdma_cm.h>
91 #include <rdma/ib_cm.h>
92 #include <rdma/ib_verbs.h>
93 #ifdef HAVE_FMR_POOL_API
94 #include <rdma/ib_fmr_pool.h>
97 #define DEBUG_SUBSYSTEM S_LND
99 #include <lnet/lib-lnet.h>
100 #include <lnet/lnet_rdma.h>
101 #include "o2iblnd-idl.h"
103 enum kiblnd_ni_lnd_tunables_attr {
104 LNET_NET_O2IBLND_TUNABLES_ATTR_UNSPEC = 0,
106 LNET_NET_O2IBLND_TUNABLES_ATTR_HIW_PEER_CREDITS,
107 LNET_NET_O2IBLND_TUNABLES_ATTR_CONCURRENT_SENDS,
108 LNET_NET_O2IBLND_TUNABLES_ATTR_MAP_ON_DEMAND,
109 LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_POOL_SIZE,
110 LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_FLUSH_TRIGGER,
111 LNET_NET_O2IBLND_TUNABLES_ATTR_FMR_CACHE,
112 LNET_NET_O2IBLND_TUNABLES_ATTR_NTX,
113 LNET_NET_O2IBLND_TUNABLES_ATTR_CONNS_PER_PEER,
114 LNET_NET_O2IBLND_TUNABLES_ATTR_LND_TIMEOUT,
115 __LNET_NET_O2IBLND_TUNABLES_ATTR_MAX_PLUS_ONE,
118 #define LNET_NET_O2IBLND_TUNABLES_ATTR_MAX (__LNET_NET_O2IBLND_TUNABLES_ATTR_MAX_PLUS_ONE - 1)
120 #define IBLND_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
121 #define IBLND_N_SCHED 2
122 #define IBLND_N_SCHED_HIGH 4
124 struct kib_tunables {
125 int *kib_dev_failover; /* HCA failover */
126 unsigned int *kib_service; /* IB service number */
127 int *kib_cksum; /* checksum struct kib_msg? */
128 int *kib_timeout; /* comms timeout (seconds) */
129 int *kib_keepalive; /* keepalive timeout (seconds) */
130 char **kib_default_ipif; /* default IPoIB interface */
131 int *kib_retry_count;
132 int *kib_rnr_retry_count;
133 int *kib_ib_mtu; /* IB MTU */
134 int *kib_require_priv_port;/* accept only privileged ports */
135 int *kib_use_priv_port; /* use privileged port for active connect */
136 /* # threads on each CPT */
138 int *kib_wrq_sge; /* # sg elements per wrq */
139 int *kib_use_fastreg_gaps; /* enable discontiguous fastreg fragment support */
142 extern struct kib_tunables kiblnd_tunables;
144 #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
145 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
147 #define IBLND_CREDITS_DEFAULT 8 /* default # of peer_ni credits */
148 #define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *) 0)->ibm_credits)) - 1) /* Max # of peer_ni credits */
150 #ifdef HAVE_RDMA_CREATE_ID_5ARG
151 # define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
152 rdma_create_id((ns) ? (ns) : &init_net, cb, dev, ps, qpt)
154 # ifdef HAVE_RDMA_CREATE_ID_4ARG
155 # define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
156 rdma_create_id(cb, dev, ps, qpt)
158 # define kiblnd_rdma_create_id(ns, cb, dev, ps, qpt) \
159 rdma_create_id(cb, dev, ps)
163 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
164 #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
165 #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
167 /* max size of queued messages (inc hdr) */
168 #define IBLND_MSG_SIZE (4<<10)
169 /* max # of fragments supported. + 1 for unaligned case */
170 #define IBLND_MAX_RDMA_FRAGS (LNET_MAX_IOV + 1)
172 /************************/
173 /* derived constants... */
174 /* Pools (shared by connections on each CPT) */
175 /* These pools can grow at runtime, so don't need give a very large value */
176 #define IBLND_TX_POOL 256
177 #define IBLND_FMR_POOL 256
178 #define IBLND_FMR_POOL_FLUSH 192
180 /* RX messages (per connection) */
181 #define IBLND_RX_MSGS(c) \
182 ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
183 #define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
184 #define IBLND_RX_MSG_PAGES(c) \
185 ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
187 /* WRs and CQEs (per connection) */
188 #define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
190 /* 2 = LNet msg + Transfer chain */
191 #define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + kiblnd_send_wrs(c))
195 /* o2iblnd can run over aliased interface */
197 #define KIB_IFNAME_SIZE IFALIASZ
199 #define KIB_IFNAME_SIZE 256
203 IBLND_DEV_CAPS_FASTREG_ENABLED = BIT(0),
204 IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT = BIT(1),
205 #ifdef HAVE_FMR_POOL_API
206 IBLND_DEV_CAPS_FMR_ENABLED = BIT(2),
210 #define IS_FAST_REG_DEV(dev) \
211 ((dev)->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
215 struct list_head ibd_list; /* chain on kib_devs */
216 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
217 __u32 ibd_ifip; /* IPoIB interface IP */
218 /** IPoIB interface name */
219 char ibd_ifname[KIB_IFNAME_SIZE];
220 int ibd_nnets; /* # nets extant */
222 time64_t ibd_next_failover;
223 /* # failover failures */
224 int ibd_failed_failover;
225 /* failover in progress */
226 unsigned int ibd_failover;
227 /* IPoIB interface is a bonding master */
228 unsigned int ibd_can_failover;
229 struct list_head ibd_nets;
230 struct kib_hca_dev *ibd_hdev;
231 enum kib_dev_caps ibd_dev_caps;
235 struct rdma_cm_id *ibh_cmid; /* listener cmid */
236 struct ib_device *ibh_ibdev; /* IB device */
237 int ibh_page_shift; /* page shift of current HCA */
238 int ibh_page_size; /* page size of current HCA */
239 __u64 ibh_page_mask; /* page mask of current HCA */
240 __u64 ibh_mr_size; /* size of MR */
241 int ibh_max_qp_wr; /* maximum work requests size */
242 #ifdef HAVE_IB_GET_DMA_MR
243 struct ib_mr *ibh_mrs; /* global MR */
245 struct ib_pd *ibh_pd; /* PD */
246 u8 ibh_port; /* port number */
247 struct ib_event_handler
248 ibh_event_handler; /* IB event handler */
249 int ibh_state; /* device status */
250 #define IBLND_DEV_PORT_DOWN 0
251 #define IBLND_DEV_PORT_ACTIVE 1
252 #define IBLND_DEV_FATAL 2
253 struct kib_dev *ibh_dev; /* owner */
254 atomic_t ibh_ref; /* refcount */
257 /** # of seconds to keep pool alive */
258 #define IBLND_POOL_DEADLINE 300
259 /** # of seconds to retry if allocation failed */
260 #define IBLND_POOL_RETRY 1
263 int ibp_npages; /* # pages */
264 struct page *ibp_pages[0]; /* page array */
270 typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
271 int inc, struct kib_pool **pp_po);
272 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
273 typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
274 typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
278 #define IBLND_POOL_NAME_LEN 32
283 /* network it belongs to */
284 struct kib_net *ps_net;
286 char ps_name[IBLND_POOL_NAME_LEN];
288 struct list_head ps_pool_list;
289 /* failed pool list */
290 struct list_head ps_failed_pool_list;
291 /* time stamp for retry if failed to allocate */
292 time64_t ps_next_retry;
293 /* is allocating new pool */
300 /* create a new pool */
301 kib_ps_pool_create_t ps_pool_create;
303 kib_ps_pool_destroy_t ps_pool_destroy;
304 /* initialize new allocated node */
305 kib_ps_node_init_t ps_node_init;
307 kib_ps_node_fini_t ps_node_fini;
311 /* chain on pool list */
312 struct list_head po_list;
313 /* pre-allocated node */
314 struct list_head po_free_list;
315 /* pool_set of this pool */
316 struct kib_poolset *po_owner;
317 /* deadline of this pool */
318 time64_t po_deadline;
319 /* # of elements in use */
321 /* pool is created on failed HCA */
323 /* # of pre-allocated elements */
327 struct kib_tx_poolset {
328 struct kib_poolset tps_poolset; /* pool-set */
329 __u64 tps_next_tx_cookie; /* cookie of TX */
333 struct kib_pool tpo_pool; /* pool */
334 struct kib_hca_dev *tpo_hdev; /* device for this pool */
335 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
336 struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */
339 struct kib_fmr_poolset {
340 spinlock_t fps_lock; /* serialize */
341 struct kib_net *fps_net; /* IB network */
342 struct list_head fps_pool_list; /* FMR pool list */
343 struct list_head fps_failed_pool_list; /* FMR pool list */
344 __u64 fps_version; /* validity stamp */
345 int fps_cpt; /* CPT id */
347 int fps_flush_trigger;
349 /* is allocating new pool */
351 /* time stamp for retry if failed to allocate */
352 time64_t fps_next_retry;
355 #ifndef HAVE_IB_RDMA_WR
357 struct ib_send_wr wr;
361 struct kib_fast_reg_descriptor { /* For fast registration */
362 struct list_head frd_list;
363 struct ib_rdma_wr frd_inv_wr;
364 #ifdef HAVE_IB_MAP_MR_SG
365 struct ib_reg_wr frd_fastreg_wr;
367 struct ib_rdma_wr frd_fastreg_wr;
368 struct ib_fast_reg_page_list *frd_frpl;
370 struct ib_mr *frd_mr;
375 struct kib_fmr_pool {
376 struct list_head fpo_list; /* chain on pool list */
377 struct kib_hca_dev *fpo_hdev; /* device for this pool */
378 struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
379 #ifdef HAVE_FMR_POOL_API
382 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
385 struct { /* For fast registration */
386 struct list_head fpo_pool_list;
389 #ifdef HAVE_FMR_POOL_API
391 bool fpo_is_fmr; /* True if FMR pools allocated */
393 time64_t fpo_deadline; /* deadline of this pool */
394 int fpo_failed; /* fmr pool is failed */
395 int fpo_map_count; /* # of mapped FMR */
399 struct kib_fmr_pool *fmr_pool; /* pool of FMR */
400 #ifdef HAVE_FMR_POOL_API
401 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
402 #endif /* HAVE_FMR_POOL_API */
403 struct kib_fast_reg_descriptor *fmr_frd;
407 #ifdef HAVE_FMR_POOL_API
409 #ifdef HAVE_ORACLE_OFED_EXTENSIONS
410 #define kib_fmr_pool_map(pool, pgs, n, iov) \
411 ib_fmr_pool_map_phys((pool), (pgs), (n), (iov), NULL)
413 #define kib_fmr_pool_map(pool, pgs, n, iov) \
414 ib_fmr_pool_map_phys((pool), (pgs), (n), (iov))
417 #endif /* HAVE_FMR_POOL_API */
420 /* chain on struct kib_dev::ibd_nets */
421 struct list_head ibn_list;
422 __u64 ibn_incarnation;/* my epoch */
423 int ibn_init; /* initialisation state */
424 int ibn_shutdown; /* shutting down? */
426 atomic_t ibn_npeers; /* # peers extant */
427 atomic_t ibn_nconns; /* # connections extant */
429 struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */
430 struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
432 struct kib_dev *ibn_dev; /* underlying IB device */
433 struct lnet_ni *ibn_ni; /* LNet interface */
436 #define KIB_THREAD_SHIFT 16
437 #define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
438 #define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
439 #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
441 struct kib_sched_info {
444 /* schedulers sleep here */
445 wait_queue_head_t ibs_waitq;
446 /* conns to check for rx completions */
447 struct list_head ibs_conns;
448 /* number of scheduler threads */
450 /* max allowed scheduler threads */
451 int ibs_nthreads_max;
452 int ibs_cpt; /* CPT id */
456 int kib_init; /* initialisation state */
457 int kib_shutdown; /* shut down? */
458 struct list_head kib_devs; /* IB devices extant */
459 /* list head of failed devices */
460 struct list_head kib_failed_devs;
461 /* schedulers sleep here */
462 wait_queue_head_t kib_failover_waitq;
463 atomic_t kib_nthreads; /* # live threads */
464 /* stabilize net/dev/peer_ni/conn ops */
465 rwlock_t kib_global_lock;
466 /* hash table of all my known peers */
467 DECLARE_HASHTABLE(kib_peers, IBLND_PEER_HASH_BITS);
468 /* the connd task (serialisation assertions) */
470 /* connections to setup/teardown */
471 struct list_head kib_connd_conns;
472 /* connections with zero refcount */
473 struct list_head kib_connd_zombies;
474 /* connections to reconnect */
475 struct list_head kib_reconn_list;
476 /* peers wait for reconnection */
477 struct list_head kib_reconn_wait;
478 /* connections wait for completion */
479 struct list_head kib_connd_waits;
481 * The second that peers are pulled out from \a kib_reconn_wait
484 time64_t kib_reconn_sec;
485 /* connection daemon sleeps here */
486 wait_queue_head_t kib_connd_waitq;
487 spinlock_t kib_connd_lock; /* serialise */
488 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
489 /* percpt data for schedulers */
490 struct kib_sched_info **kib_scheds;
493 #define IBLND_INIT_NOTHING 0
494 #define IBLND_INIT_DATA 1
495 #define IBLND_INIT_ALL 2
497 struct kib_rx { /* receive message */
498 /* queue for attention */
499 struct list_head rx_list;
501 struct kib_conn *rx_conn;
502 /* # bytes received (-1 while posted) */
504 /* message buffer (host vaddr) */
505 struct kib_msg *rx_msg;
506 /* message buffer (I/O addr) */
508 /* for dma_unmap_single() */
509 DEFINE_DMA_UNMAP_ADDR(rx_msgunmap);
510 /* receive work item... */
511 struct ib_recv_wr rx_wrq;
512 /* ...and its memory */
513 struct ib_sge rx_sge;
516 #define IBLND_POSTRX_DONT_POST 0 /* don't post */
517 #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
518 #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer_ni back 1 credit */
519 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */
521 struct kib_tx { /* transmit message */
522 /* queue on idle_txs ibc_tx_queue etc. */
523 struct list_head tx_list;
525 struct kib_tx_pool *tx_pool;
527 struct kib_conn *tx_conn;
528 /* # tx callbacks outstanding */
530 /* queued for sending */
531 unsigned long tx_queued:1,
532 /* waiting for peer_ni */
536 /* LNET completion status */
538 /* health status of the transmit */
539 enum lnet_msg_hstatus tx_hstatus;
540 /* completion deadline */
542 /* completion cookie */
544 /* lnet msgs to finalize on completion */
545 struct lnet_msg *tx_lntmsg[2];
546 /* message buffer (host vaddr) */
547 struct kib_msg *tx_msg;
548 /* message buffer (I/O addr) */
550 /* for dma_unmap_single() */
551 DEFINE_DMA_UNMAP_ADDR(tx_msgunmap);
552 /* # send work items */
554 /* # used scatter/gather elements */
556 /* send work items... */
557 struct ib_rdma_wr *tx_wrq;
558 /* ...and their memory */
559 struct ib_sge *tx_sge;
560 /* rdma descriptor */
561 struct kib_rdma_desc *tx_rd;
562 /* # entries in... */
564 /* dma_map_sg descriptor */
565 struct scatterlist *tx_frags;
566 /* rdma phys page addrs */
568 /* gaps in fragments */
571 struct kib_fmr tx_fmr;
576 struct kib_connvars {
577 /* connection-in-progress variables */
578 struct kib_msg cv_msg;
582 /* scheduler information */
583 struct kib_sched_info *ibc_sched;
585 struct kib_peer_ni *ibc_peer;
587 struct kib_hca_dev *ibc_hdev;
588 /* stash on peer_ni's conn list */
589 struct list_head ibc_list;
590 /* schedule for attention */
591 struct list_head ibc_sched_list;
592 /* version of connection */
594 /* reconnect later */
595 __u16 ibc_reconnect:1;
596 /* which instance of the peer */
597 __u64 ibc_incarnation;
599 atomic_t ibc_refcount;
600 /* what's happening */
602 /* # uncompleted sends */
603 int ibc_nsends_posted;
604 /* # uncompleted NOOPs */
605 int ibc_noops_posted;
606 /* # credits I have */
608 /* # credits to return */
609 int ibc_outstanding_credits;
610 /* # ACK/DONE msg credits */
611 int ibc_reserved_credits;
612 /* set on comms error */
614 /* connections queue depth */
615 __u16 ibc_queue_depth;
616 /* connections max frags */
618 /* count of timeout txs waiting on cq */
620 /* receive buffers owned */
621 unsigned int ibc_nrx:16;
622 /* scheduled for attention */
623 unsigned int ibc_scheduled:1;
624 /* CQ callback fired */
625 unsigned int ibc_ready:1;
626 /* time of last send */
627 ktime_t ibc_last_send;
628 /** link chain for kiblnd_check_conns only */
629 struct list_head ibc_connd_list;
630 /** rxs completed before ESTABLISHED */
631 struct list_head ibc_early_rxs;
632 /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */
633 struct list_head ibc_tx_noops;
634 /* sends that need a credit */
635 struct list_head ibc_tx_queue;
636 /* sends that don't need a credit */
637 struct list_head ibc_tx_queue_nocred;
638 /* sends that need to reserve an ACK/DONE msg */
639 struct list_head ibc_tx_queue_rsrvd;
640 /* active tx awaiting completion */
641 struct list_head ibc_active_txs;
642 /* zombie tx awaiting done */
643 struct list_head ibc_zombie_txs;
647 struct kib_rx *ibc_rxs;
648 /* premapped rx msg pages */
649 struct kib_pages *ibc_rx_pages;
652 struct rdma_cm_id *ibc_cmid;
653 /* completion queue */
654 struct ib_cq *ibc_cq;
656 /* in-progress connection state */
657 struct kib_connvars *ibc_connvars;
660 #define IBLND_CONN_INIT 0 /* being initialised */
661 #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
662 #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
663 #define IBLND_CONN_ESTABLISHED 3 /* connection established */
664 #define IBLND_CONN_CLOSING 4 /* being closed */
665 #define IBLND_CONN_DISCONNECTED 5 /* disconnected */
668 /* on peer_ni hash chain */
669 struct hlist_node ibp_list;
670 /* who's on the other end(s) */
673 struct lnet_ni *ibp_ni;
674 /* all active connections */
675 struct list_head ibp_conns;
676 /* next connection to send on for round robin */
677 struct kib_conn *ibp_next_conn;
678 /* msgs waiting for a conn */
679 struct list_head ibp_tx_queue;
680 /* incarnation of peer_ni */
681 __u64 ibp_incarnation;
682 /* when (in seconds) I was last alive */
683 time64_t ibp_last_alive;
685 struct kref ibp_kref;
686 /* version of peer_ni */
688 /* current passive connection attempts */
689 unsigned short ibp_accepting;
690 /* current active connection attempts */
691 unsigned short ibp_connecting;
692 /* reconnect this peer_ni later */
693 unsigned char ibp_reconnecting;
694 /* counter of how many times we triggered a conn race */
695 unsigned char ibp_races;
696 /* # consecutive reconnection attempts to this peer */
697 unsigned int ibp_reconnected;
698 /* errno on closing this peer_ni */
700 /* max map_on_demand */
702 /* max_peer_credits */
703 __u16 ibp_queue_depth;
704 /* reduced value which allows conn to be created if max fails */
705 __u16 ibp_queue_depth_mod;
706 /* Number of connections allocated. */
710 #ifndef HAVE_IB_INC_RKEY
712 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
713 * for calculating a new rkey for type 2 memory windows.
714 * @rkey - the rkey to increment.
716 static inline u32 ib_inc_rkey(u32 rkey)
718 const u32 mask = 0x000000ff;
719 return ((rkey + 1) & mask) | (rkey & ~mask);
723 extern struct kib_data kiblnd_data;
725 extern void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
727 int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
729 static inline int kiblnd_timeout(void)
731 return *kiblnd_tunables.kib_timeout ? *kiblnd_tunables.kib_timeout :
732 lnet_get_lnd_timeout();
736 kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
738 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
739 int concurrent_sends;
741 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
742 concurrent_sends = tunables->lnd_concurrent_sends;
744 if (version == IBLND_MSG_VERSION_1) {
745 if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
746 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
748 if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
749 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
752 return concurrent_sends;
756 kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
758 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
759 atomic_inc(&hdev->ibh_ref);
763 kiblnd_hdev_decref(struct kib_hca_dev *hdev)
765 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
766 if (atomic_dec_and_test(&hdev->ibh_ref))
767 kiblnd_hdev_destroy(hdev);
771 kiblnd_dev_can_failover(struct kib_dev *dev)
773 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
776 if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
779 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
782 return dev->ibd_can_failover;
785 static inline void kiblnd_conn_addref(struct kib_conn *conn)
787 #ifdef O2IBLND_CONN_REFCOUNT_DEBUG
788 CDEBUG(D_NET, "conn[%p] (%d)++\n",
789 (conn), atomic_read(&(conn)->ibc_refcount));
791 atomic_inc(&(conn)->ibc_refcount);
794 static inline void kiblnd_conn_decref(struct kib_conn *conn)
797 #ifdef O2IBLND_CONN_REFCOUNT_DEBUG
798 CDEBUG(D_NET, "conn[%p] (%d)--\n",
799 (conn), atomic_read(&(conn)->ibc_refcount));
801 LASSERT(atomic_read(&(conn)->ibc_refcount) > 0);
802 if (atomic_dec_and_test(&(conn)->ibc_refcount)) {
803 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
804 list_add_tail(&(conn)->ibc_list,
805 &kiblnd_data.kib_connd_zombies);
806 wake_up(&kiblnd_data.kib_connd_waitq);
807 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
811 void kiblnd_destroy_peer(struct kref *kref);
813 static inline void kiblnd_peer_addref(struct kib_peer_ni *peer_ni)
815 CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)++\n",
816 peer_ni, libcfs_nid2str(peer_ni->ibp_nid),
817 kref_read(&peer_ni->ibp_kref));
818 kref_get(&(peer_ni)->ibp_kref);
821 static inline void kiblnd_peer_decref(struct kib_peer_ni *peer_ni)
823 CDEBUG(D_NET, "peer_ni[%p] -> %s (%d)--\n",
824 peer_ni, libcfs_nid2str(peer_ni->ibp_nid),
825 kref_read(&peer_ni->ibp_kref));
826 kref_put(&peer_ni->ibp_kref, kiblnd_destroy_peer);
830 kiblnd_peer_connecting(struct kib_peer_ni *peer_ni)
832 return peer_ni->ibp_connecting != 0 ||
833 peer_ni->ibp_reconnecting != 0 ||
834 peer_ni->ibp_accepting != 0;
838 kiblnd_peer_idle(struct kib_peer_ni *peer_ni)
840 return !kiblnd_peer_connecting(peer_ni) && list_empty(&peer_ni->ibp_conns);
844 kiblnd_peer_active(struct kib_peer_ni *peer_ni)
846 /* Am I in the peer_ni hash table? */
847 return !hlist_unhashed(&peer_ni->ibp_list);
850 static inline struct kib_conn *
851 kiblnd_get_conn_locked(struct kib_peer_ni *peer_ni)
853 struct list_head *next;
855 LASSERT(!list_empty(&peer_ni->ibp_conns));
857 /* Advance to next connection, be sure to skip the head node */
858 if (!peer_ni->ibp_next_conn ||
859 peer_ni->ibp_next_conn->ibc_list.next == &peer_ni->ibp_conns)
860 next = peer_ni->ibp_conns.next;
862 next = peer_ni->ibp_next_conn->ibc_list.next;
863 peer_ni->ibp_next_conn = list_entry(next, struct kib_conn, ibc_list);
865 return peer_ni->ibp_next_conn;
869 kiblnd_send_keepalive(struct kib_conn *conn)
871 s64 keepalive_ns = *kiblnd_tunables.kib_keepalive * NSEC_PER_SEC;
873 return (*kiblnd_tunables.kib_keepalive > 0) &&
874 ktime_after(ktime_get(),
875 ktime_add_ns(conn->ibc_last_send, keepalive_ns));
878 /* when to return credits eagerly */
880 kiblnd_credits_highwater(struct lnet_ioctl_config_o2iblnd_tunables *t,
881 struct lnet_ioctl_config_lnd_cmn_tunables *nt,
882 struct kib_conn *conn)
884 int credits_hiw = IBLND_CREDIT_HIGHWATER_V1;
886 if ((conn->ibc_version) == IBLND_MSG_VERSION_1)
889 /* if queue depth is negotiated down, calculate hiw proportionally */
890 credits_hiw = (conn->ibc_queue_depth * t->lnd_peercredits_hiw) /
891 nt->lct_peer_tx_credits;
897 kiblnd_need_noop(struct kib_conn *conn)
899 struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
900 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
901 struct lnet_ioctl_config_lnd_cmn_tunables *net_tunables;
903 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
904 tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
905 net_tunables = &ni->ni_net->net_tunables;
907 if (conn->ibc_outstanding_credits <
908 kiblnd_credits_highwater(tunables, net_tunables, conn) &&
909 !kiblnd_send_keepalive(conn))
910 return 0; /* No need to send NOOP */
912 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
913 if (!list_empty(&conn->ibc_tx_queue_nocred))
914 return 0; /* NOOP can be piggybacked */
916 /* No tx to piggyback NOOP onto or no credit to send a tx */
917 return (list_empty(&conn->ibc_tx_queue) ||
918 conn->ibc_credits == 0);
921 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
922 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
923 conn->ibc_credits == 0) /* no credit */
926 if (conn->ibc_credits == 1 && /* last credit reserved for */
927 conn->ibc_outstanding_credits == 0) /* giving back credits */
930 /* No tx to piggyback NOOP onto or no credit to send a tx */
931 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
935 kiblnd_abort_receives(struct kib_conn *conn)
937 ib_modify_qp(conn->ibc_cmid->qp,
938 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
941 static inline const char *
942 kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
944 if (q == &conn->ibc_tx_queue)
947 if (q == &conn->ibc_tx_queue_rsrvd)
948 return "tx_queue_rsrvd";
950 if (q == &conn->ibc_tx_queue_nocred)
951 return "tx_queue_nocred";
953 if (q == &conn->ibc_active_txs)
960 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
961 * lowest bits of the work request id to stash the work item type. */
963 #define IBLND_WID_INVAL 0
964 #define IBLND_WID_TX 1
965 #define IBLND_WID_RX 2
966 #define IBLND_WID_RDMA 3
967 #define IBLND_WID_MR 4
968 #define IBLND_WID_MASK 7UL
971 kiblnd_ptr2wreqid (void *ptr, int type)
973 unsigned long lptr = (unsigned long)ptr;
975 LASSERT ((lptr & IBLND_WID_MASK) == 0);
976 LASSERT ((type & ~IBLND_WID_MASK) == 0);
977 return (__u64)(lptr | type);
981 kiblnd_wreqid2ptr (__u64 wreqid)
983 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
987 kiblnd_wreqid2type (__u64 wreqid)
989 return (wreqid & IBLND_WID_MASK);
993 kiblnd_set_conn_state(struct kib_conn *conn, int state)
995 conn->ibc_state = state;
1000 kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
1002 msg->ibm_type = type;
1003 msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob;
1007 kiblnd_rd_size(struct kib_rdma_desc *rd)
1012 for (i = size = 0; i < rd->rd_nfrags; i++)
1013 size += rd->rd_frags[i].rf_nob;
1019 kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
1021 return rd->rd_frags[index].rf_addr;
1025 kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
1027 return rd->rd_frags[index].rf_nob;
1031 kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
1037 kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
1039 if (nob < rd->rd_frags[index].rf_nob) {
1040 rd->rd_frags[index].rf_addr += nob;
1041 rd->rd_frags[index].rf_nob -= nob;
1050 kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
1052 LASSERT (msgtype == IBLND_MSG_GET_REQ ||
1053 msgtype == IBLND_MSG_PUT_ACK);
1055 return msgtype == IBLND_MSG_GET_REQ ?
1056 offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
1057 offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
1061 kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1063 return ib_dma_mapping_error(dev, dma_addr);
1066 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
1067 void *msg, size_t size,
1068 enum dma_data_direction direction)
1070 return ib_dma_map_single(dev, msg, size, direction);
1073 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
1074 __u64 addr, size_t size,
1075 enum dma_data_direction direction)
1077 ib_dma_unmap_single(dev, addr, size, direction);
1080 #define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
1081 #define KIBLND_UNMAP_ADDR(p, m, a) (a)
1084 int kiblnd_dma_map_sg(struct kib_hca_dev *hdev, struct kib_tx *tx)
1086 struct scatterlist *sg = tx->tx_frags;
1087 int nents = tx->tx_nfrags;
1088 enum dma_data_direction direction = tx->tx_dmadir;
1091 return lnet_rdma_map_sg_attrs(hdev->ibh_ibdev->dma_device,
1092 sg, nents, direction);
1094 return ib_dma_map_sg(hdev->ibh_ibdev, sg, nents, direction);
1098 void kiblnd_dma_unmap_sg(struct kib_hca_dev *hdev, struct kib_tx *tx)
1100 struct scatterlist *sg = tx->tx_frags;
1101 int nents = tx->tx_nfrags;
1102 enum dma_data_direction direction = tx->tx_dmadir;
1105 lnet_rdma_unmap_sg(hdev->ibh_ibdev->dma_device,
1106 sg, nents, direction);
1108 ib_dma_unmap_sg(hdev->ibh_ibdev, sg, nents, direction);
1111 #ifndef HAVE_IB_SG_DMA_ADDRESS
1112 #include <linux/scatterlist.h>
1113 #define ib_sg_dma_address(dev, sg) sg_dma_address(sg)
1114 #define ib_sg_dma_len(dev, sg) sg_dma_len(sg)
1117 static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
1118 struct scatterlist *sg)
1120 return ib_sg_dma_address(dev, sg);
1123 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
1124 struct scatterlist *sg)
1126 return ib_sg_dma_len(dev, sg);
1129 #ifndef HAVE_RDMA_CONNECT_LOCKED
1130 #define rdma_connect_locked(cmid, cpp) rdma_connect(cmid, cpp)
1133 /* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
1134 * right because OFED1.2 defines it as const, to use it we have to add
1135 * (void *) cast to overcome "const" */
1137 #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
1138 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
1140 void kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs);
1141 void kiblnd_map_rx_descs(struct kib_conn *conn);
1142 void kiblnd_unmap_rx_descs(struct kib_conn *conn);
1143 void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
1144 struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
1146 int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
1147 struct kib_rdma_desc *rd, u32 nob, u64 iov,
1148 struct kib_fmr *fmr);
1149 void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
1151 int kiblnd_tunables_setup(struct lnet_ni *ni);
1152 int kiblnd_tunables_init(void);
1154 int kiblnd_connd (void *arg);
1155 int kiblnd_scheduler(void *arg);
1156 #define kiblnd_thread_start(fn, data, namefmt, arg...) \
1158 struct task_struct *__task = kthread_run(fn, data, \
1160 if (!IS_ERR(__task)) \
1161 atomic_inc(&kiblnd_data.kib_nthreads); \
1162 PTR_ERR_OR_ZERO(__task); \
1165 int kiblnd_failover_thread (void *arg);
1167 int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
1169 int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1170 struct rdma_cm_event *event);
1171 int kiblnd_translate_mtu(int value);
1173 int kiblnd_dev_failover(struct kib_dev *dev, struct net *ns);
1174 int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
1176 bool kiblnd_reconnect_peer(struct kib_peer_ni *peer);
1177 void kiblnd_destroy_dev(struct kib_dev *dev);
1178 void kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni);
1179 struct kib_peer_ni *kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid);
1180 int kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
1181 int version, u64 incarnation);
1182 int kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why);
1184 struct kib_conn *kiblnd_create_conn(struct kib_peer_ni *peer_ni,
1185 struct rdma_cm_id *cmid,
1186 int state, int version);
1187 void kiblnd_destroy_conn(struct kib_conn *conn);
1188 void kiblnd_close_conn(struct kib_conn *conn, int error);
1189 void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
1191 void kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid);
1192 void kiblnd_txlist_done(struct list_head *txlist, int status,
1193 enum lnet_msg_hstatus hstatus);
1195 void kiblnd_qp_event(struct ib_event *event, void *arg);
1196 void kiblnd_cq_event(struct ib_event *event, void *arg);
1197 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1199 void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
1200 int credits, lnet_nid_t dstnid, __u64 dststamp);
1201 int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
1202 int kiblnd_post_rx(struct kib_rx *rx, int credit);
1204 int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
1205 int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1206 int delayed, unsigned int niov,
1207 struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
1209 unsigned int kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx);
1211 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
1212 #undef netdev_notifier_info_to_dev
1213 #define netdev_notifier_info_to_dev(ndev) ndev
1216 #define kiblnd_dump_conn_dbg(conn) \
1218 if (conn && conn->ibc_cmid) \
1219 CDEBUG(D_NET, "conn %p state %d nposted %d/%d c/o/r %d/%d/%d ce %d : cm_id %p qp_num 0x%x device_name %s\n", \
1222 conn->ibc_noops_posted, \
1223 conn->ibc_nsends_posted, \
1224 conn->ibc_credits, \
1225 conn->ibc_outstanding_credits, \
1226 conn->ibc_reserved_credits, \
1227 conn->ibc_comms_error, \
1229 conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0, \
1230 conn->ibc_cmid->qp ? (conn->ibc_cmid->qp->device ? dev_name(&conn->ibc_cmid->qp->device->dev) : "NULL") : "NULL"); \
1232 CDEBUG(D_NET, "conn %p state %d nposted %d/%d c/o/r %d/%d/%d ce %d : cm_id NULL\n", \
1235 conn->ibc_noops_posted, \
1236 conn->ibc_nsends_posted, \
1237 conn->ibc_credits, \
1238 conn->ibc_outstanding_credits, \
1239 conn->ibc_reserved_credits, \
1240 conn->ibc_comms_error \