X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd.h;h=71c1c1e35a26e8544d22983324d7378a0da24899;hp=a78fa53260303ee087d1a3c09d5fc363e7b9fb92;hb=ddea2d2f982276001562f169a3a3556a64cd58ce;hpb=0f8dca08a4f68cba82c2c822998ecc309d3b7aaf diff --git a/lnet/klnds/o2iblnd/o2iblnd.h b/lnet/klnds/o2iblnd/o2iblnd.h index a78fa53..71c1c1e 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.h +++ b/lnet/klnds/o2iblnd/o2iblnd.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -38,23 +38,16 @@ * Author: Eric Barton */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif -#ifndef AUTOCONF_INCLUDED -#include -#endif +#include #include #include #include #include #include #include -#include #include #include -#include #include #include @@ -65,66 +58,69 @@ #include #include #include -#include #include +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,32) +#include +#endif + +#ifdef HAVE_COMPAT_RDMA +#include +#endif #include #include +#include +#include +#include +#include + #define DEBUG_SUBSYSTEM S_LND #include #include #include -#include - -#if !HAVE_GFP_T -typedef int gfp_t; -#endif -#include -#include -#include -#include - -/* tunables fixed at compile time */ -#ifdef CONFIG_SMP -# define IBLND_N_SCHED cfs_num_online_cpus() /* # schedulers */ -#else -# define IBLND_N_SCHED 1 /* # schedulers */ -#endif +#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ +/* # scheduler loops before reschedule */ +#define IBLND_RESCHED 100 -#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ -#define IBLND_RESCHED 100 /* # scheduler loops before reschedule */ +#define IBLND_N_SCHED 2 +#define IBLND_N_SCHED_HIGH 4 typedef struct { - unsigned int *kib_service; /* IB service number */ - int *kib_min_reconnect_interval; /* first failed connection retry... */ - int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ - int *kib_cksum; /* checksum kib_msg_t? */ - int *kib_timeout; /* comms timeout (seconds) */ - int *kib_keepalive; /* keepalive timeout (seconds) */ - int *kib_ntx; /* # tx descs */ - int *kib_credits; /* # concurrent sends */ - int *kib_peertxcredits; /* # concurrent sends to 1 peer */ - int *kib_peerrtrcredits; /* # per-peer router buffer credits */ - int *kib_peercredits_hiw; /* # when eagerly to return credits */ - int *kib_peertimeout; /* seconds to consider peer dead */ - char **kib_default_ipif; /* default IPoIB interface */ - int *kib_retry_count; - int *kib_rnr_retry_count; - int *kib_concurrent_sends; /* send work queue sizing */ - int *kib_ib_mtu; /* IB MTU */ - int *kib_map_on_demand; /* map-on-demand if RD has more fragments - * than this value, 0 disable map-on-demand */ - int *kib_pmr_pool_size; /* # physical MR in pool */ - int *kib_fmr_pool_size; /* # FMRs in pool */ - int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ - int *kib_fmr_cache; /* enable FMR pool cache? */ + int *kib_dev_failover; /* HCA failover */ + unsigned int *kib_service; /* IB service number */ + int *kib_min_reconnect_interval; /* first failed connection retry... */ + int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ + int *kib_cksum; /* checksum kib_msg_t? */ + int *kib_timeout; /* comms timeout (seconds) */ + int *kib_keepalive; /* keepalive timeout (seconds) */ + int *kib_ntx; /* # tx descs */ + int *kib_credits; /* # concurrent sends */ + int *kib_peertxcredits; /* # concurrent sends to 1 peer */ + int *kib_peerrtrcredits; /* # per-peer router buffer credits */ + int *kib_peercredits_hiw; /* # when eagerly to return credits */ + int *kib_peertimeout; /* seconds to consider peer dead */ + char **kib_default_ipif; /* default IPoIB interface */ + int *kib_retry_count; + int *kib_rnr_retry_count; + int *kib_concurrent_sends; /* send work queue sizing */ + int *kib_ib_mtu; /* IB MTU */ + int *kib_map_on_demand; /* map-on-demand if RD has more fragments + * than this value, 0 disable map-on-demand */ + int *kib_pmr_pool_size; /* # physical MR in pool */ + int *kib_fmr_pool_size; /* # FMRs in pool */ + int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ + int *kib_fmr_cache; /* enable FMR pool cache? */ #if defined(CONFIG_SYSCTL) && !CFS_SYSFS_MODULE_PARM - cfs_sysctl_table_header_t *kib_sysctl; /* sysctl interface */ + struct ctl_table_header *kib_sysctl; /* sysctl interface */ #endif + int *kib_require_priv_port;/* accept only privileged ports */ + int *kib_use_priv_port; /* use privileged port for active connect */ + /* # threads on each CPT */ + int *kib_nscheds; } kib_tunables_t; extern kib_tunables_t kiblnd_tunables; @@ -142,6 +138,12 @@ extern kib_tunables_t kiblnd_tunables; IBLND_CREDIT_HIGHWATER_V1 : \ *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */ +#ifdef HAVE_RDMA_CREATE_ID_4ARG +#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt) +#else +#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps) +#endif + static inline int kiblnd_concurrent_sends_v1(void) { @@ -171,6 +173,12 @@ kiblnd_concurrent_sends_v1(void) /************************/ /* derived constants... */ +/* Pools (shared by connections on each CPT) */ +/* These pools can grow at runtime, so don't need give a very large value */ +#define IBLND_TX_POOL 256 +#define IBLND_PMR_POOL 256 +#define IBLND_FMR_POOL 256 +#define IBLND_FMR_POOL_FLUSH 192 /* TX messages (shared by all connections) */ #define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx) @@ -185,30 +193,58 @@ kiblnd_concurrent_sends_v1(void) #define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v)) #define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v)) +struct kib_hca_dev; + +/* o2iblnd can run over aliased interface */ +#ifdef IFALIASZ +#define KIB_IFNAME_SIZE IFALIASZ +#else +#define KIB_IFNAME_SIZE 256 +#endif + typedef struct { - cfs_list_t ibd_list; /* chain on kib_devs */ - __u32 ibd_ifip; /* IPoIB interface IP */ - char ibd_ifname[32]; /* IPoIB interface name */ - int ibd_nnets; /* # nets extant */ - - struct rdma_cm_id *ibd_cmid; /* IB listener (bound to 1 device) */ - struct ib_pd *ibd_pd; /* PD for the device */ - int ibd_page_shift; /* page shift of current HCA */ - int ibd_page_size; /* page size of current HCA */ - __u64 ibd_page_mask; /* page mask of current HCA */ - int ibd_mr_shift; /* bits shift of max MR size */ - __u64 ibd_mr_size; /* size of MR */ - - int ibd_nmrs; /* # of global MRs */ - struct ib_mr **ibd_mrs; /* MR for non RDMA I/O */ + struct list_head ibd_list; /* chain on kib_devs */ + struct list_head ibd_fail_list; /* chain on kib_failed_devs */ + __u32 ibd_ifip; /* IPoIB interface IP */ + /** IPoIB interface name */ + char ibd_ifname[KIB_IFNAME_SIZE]; + int ibd_nnets; /* # nets extant */ + + cfs_time_t ibd_next_failover; + /* # failover failures */ + int ibd_failed_failover; + /* failover in progress */ + unsigned int ibd_failover; + /* IPoIB interface is a bonding master */ + unsigned int ibd_can_failover; + struct list_head ibd_nets; + struct kib_hca_dev *ibd_hdev; } kib_dev_t; -#define IBLND_POOL_DEADLINE 300 /* # of seconds to keep pool alive */ +typedef struct kib_hca_dev +{ + struct rdma_cm_id *ibh_cmid; /* listener cmid */ + struct ib_device *ibh_ibdev; /* IB device */ + int ibh_page_shift; /* page shift of current HCA */ + int ibh_page_size; /* page size of current HCA */ + __u64 ibh_page_mask; /* page mask of current HCA */ + int ibh_mr_shift; /* bits shift of max MR size */ + __u64 ibh_mr_size; /* size of MR */ + int ibh_nmrs; /* # of global MRs */ + struct ib_mr **ibh_mrs; /* global MR */ + struct ib_pd *ibh_pd; /* PD */ + kib_dev_t *ibh_dev; /* owner */ + atomic_t ibh_ref; /* refcount */ +} kib_hca_dev_t; + +/** # of seconds to keep pool alive */ +#define IBLND_POOL_DEADLINE 300 +/** # of seconds to retry if allocation failed */ +#define IBLND_POOL_RETRY 1 typedef struct { - struct ib_device *ibp_device; /* device for mapping */ int ibp_npages; /* # pages */ struct page *ibp_pages[0]; /* page array */ } kib_pages_t; @@ -216,23 +252,22 @@ typedef struct struct kib_pmr_pool; typedef struct { - cfs_list_t pmr_list; /* chain node */ - struct ib_phys_buf *pmr_ipb; /* physical buffer */ - struct ib_mr *pmr_mr; /* IB MR */ - struct kib_pmr_pool *pmr_pool; /* owner of this MR */ - __u64 pmr_iova; /* Virtual I/O address */ - int pmr_refcount; /* reference count */ + struct list_head pmr_list; /* chain node */ + struct ib_phys_buf *pmr_ipb; /* physical buffer */ + struct ib_mr *pmr_mr; /* IB MR */ + struct kib_pmr_pool *pmr_pool; /* owner of this MR */ + __u64 pmr_iova; /* Virtual I/O address */ + int pmr_refcount; /* reference count */ } kib_phys_mr_t; struct kib_pool; struct kib_poolset; -typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, int inc, struct kib_pool **pp_po); +typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, + int inc, struct kib_pool **pp_po); typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po); -typedef void (*kib_ps_node_init_t)(struct kib_pool *po, - cfs_list_t *node); -typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, - cfs_list_t *node); +typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node); +typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node); struct kib_net; @@ -240,28 +275,51 @@ struct kib_net; typedef struct kib_poolset { - cfs_spinlock_t ps_lock; /* serialize */ - struct kib_net *ps_net; /* network it belongs to */ - char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ - cfs_list_t ps_pool_list; /* list of pools */ - cfs_time_t ps_next_retry; /* time stamp for retry if failed to allocate */ - int ps_increasing; /* is allocating new pool */ - int ps_pool_size; /* new pool size */ - - kib_ps_pool_create_t ps_pool_create; /* create a new pool */ - kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ - kib_ps_node_init_t ps_node_init; /* initialize new allocated node */ - kib_ps_node_fini_t ps_node_fini; /* finalize node */ + /* serialize */ + spinlock_t ps_lock; + /* network it belongs to */ + struct kib_net *ps_net; + /* pool set name */ + char ps_name[IBLND_POOL_NAME_LEN]; + /* list of pools */ + struct list_head ps_pool_list; + /* failed pool list */ + struct list_head ps_failed_pool_list; + /* time stamp for retry if failed to allocate */ + cfs_time_t ps_next_retry; + /* is allocating new pool */ + int ps_increasing; + /* new pool size */ + int ps_pool_size; + /* CPT id */ + int ps_cpt; + + /* create a new pool */ + kib_ps_pool_create_t ps_pool_create; + /* destroy a pool */ + kib_ps_pool_destroy_t ps_pool_destroy; + /* initialize new allocated node */ + kib_ps_node_init_t ps_node_init; + /* finalize node */ + kib_ps_node_fini_t ps_node_fini; } kib_poolset_t; typedef struct kib_pool { - cfs_list_t po_list; /* chain on pool list */ - cfs_list_t po_free_list; /* pre-allocated node */ - kib_poolset_t *po_owner; /* pool_set of this pool */ - cfs_time_t po_deadline; /* deadline of this pool */ - int po_allocated; /* # of elements in use */ - int po_size; /* # of pre-allocated elements */ + /* chain on pool list */ + struct list_head po_list; + /* pre-allocated node */ + struct list_head po_free_list; + /* pool_set of this pool */ + kib_poolset_t *po_owner; + /* deadline of this pool */ + cfs_time_t po_deadline; + /* # of elements in use */ + int po_allocated; + /* pool is created on failed HCA */ + int po_failed; + /* # of pre-allocated elements */ + int po_size; } kib_pool_t; typedef struct { @@ -271,6 +329,7 @@ typedef struct { typedef struct { kib_pool_t tpo_pool; /* pool */ + struct kib_hca_dev *tpo_hdev; /* device for this pool */ struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ } kib_tx_pool_t; @@ -280,26 +339,35 @@ typedef struct { } kib_pmr_poolset_t; typedef struct kib_pmr_pool { + struct kib_hca_dev *ppo_hdev; /* device for this pool */ kib_pool_t ppo_pool; /* pool */ } kib_pmr_pool_t; typedef struct { - cfs_spinlock_t fps_lock; /* serialize */ - struct kib_net *fps_net; /* IB network */ - cfs_list_t fps_pool_list; /* FMR pool list */ - __u64 fps_version; /* validity stamp */ - int fps_increasing; /* is allocating new pool */ - cfs_time_t fps_next_retry; /* time stamp for retry if failed to allocate */ + spinlock_t fps_lock; /* serialize */ + struct kib_net *fps_net; /* IB network */ + struct list_head fps_pool_list; /* FMR pool list */ + struct list_head fps_failed_pool_list; /* FMR pool list */ + __u64 fps_version; /* validity stamp */ + int fps_cpt; /* CPT id */ + int fps_pool_size; + int fps_flush_trigger; + /* is allocating new pool */ + int fps_increasing; + /* time stamp for retry if failed to allocate */ + cfs_time_t fps_next_retry; } kib_fmr_poolset_t; typedef struct { - cfs_list_t fpo_list; /* chain on pool list */ - kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ - struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ - cfs_time_t fpo_deadline; /* deadline of this pool */ - int fpo_map_count; /* # of mapped FMR */ + struct list_head fpo_list; /* chain on pool list */ + struct kib_hca_dev *fpo_hdev; /* device for this pool */ + kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ + struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ + cfs_time_t fpo_deadline; /* deadline of this pool */ + int fpo_failed; /* fmr pool is failed */ + int fpo_map_count; /* # of mapped FMR */ } kib_fmr_pool_t; typedef struct { @@ -309,44 +377,69 @@ typedef struct { typedef struct kib_net { - __u64 ibn_incarnation; /* my epoch */ - int ibn_init; /* initialisation state */ - int ibn_shutdown; /* shutting down? */ - unsigned int ibn_with_fmr:1; /* FMR? */ - unsigned int ibn_with_pmr:1; /* PMR? */ + /* chain on kib_dev_t::ibd_nets */ + struct list_head ibn_list; + __u64 ibn_incarnation;/* my epoch */ + int ibn_init; /* initialisation state */ + int ibn_shutdown; /* shutting down? */ - cfs_atomic_t ibn_npeers; /* # peers extant */ - cfs_atomic_t ibn_nconns; /* # connections extant */ + atomic_t ibn_npeers; /* # peers extant */ + atomic_t ibn_nconns; /* # connections extant */ - kib_tx_poolset_t ibn_tx_ps; /* tx pool-set */ - kib_fmr_poolset_t ibn_fmr_ps; /* fmr pool-set */ - kib_pmr_poolset_t ibn_pmr_ps; /* pmr pool-set */ + kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ + kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ + kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */ - kib_dev_t *ibn_dev; /* underlying IB device */ + kib_dev_t *ibn_dev; /* underlying IB device */ } kib_net_t; +#define KIB_THREAD_SHIFT 16 +#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid)) +#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT) +#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1)) + +struct kib_sched_info { + /* serialise */ + spinlock_t ibs_lock; + /* schedulers sleep here */ + wait_queue_head_t ibs_waitq; + /* conns to check for rx completions */ + struct list_head ibs_conns; + /* number of scheduler threads */ + int ibs_nthreads; + /* max allowed scheduler threads */ + int ibs_nthreads_max; + int ibs_cpt; /* CPT id */ +}; + typedef struct { - int kib_init; /* initialisation state */ - int kib_shutdown; /* shut down? */ - cfs_list_t kib_devs; /* IB devices extant */ - cfs_atomic_t kib_nthreads; /* # live threads */ - cfs_rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */ - - cfs_list_t *kib_peers; /* hash table of all my known peers */ - int kib_peer_hash_size;/* size of kib_peers */ - - void *kib_connd; /* the connd task (serialisation assertions) */ - cfs_list_t kib_connd_conns; /* connections to setup/teardown */ - cfs_list_t kib_connd_zombies;/* connections with zero refcount */ - cfs_waitq_t kib_connd_waitq; /* connection daemon sleeps here */ - cfs_spinlock_t kib_connd_lock; /* serialise */ - - cfs_waitq_t kib_sched_waitq; /* schedulers sleep here */ - cfs_list_t kib_sched_conns; /* conns to check for rx completions */ - cfs_spinlock_t kib_sched_lock; /* serialise */ - - struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ + int kib_init; /* initialisation state */ + int kib_shutdown; /* shut down? */ + struct list_head kib_devs; /* IB devices extant */ + /* list head of failed devices */ + struct list_head kib_failed_devs; + /* schedulers sleep here */ + wait_queue_head_t kib_failover_waitq; + atomic_t kib_nthreads; /* # live threads */ + /* stabilize net/dev/peer/conn ops */ + rwlock_t kib_global_lock; + /* hash table of all my known peers */ + struct list_head *kib_peers; + /* size of kib_peers */ + int kib_peer_hash_size; + /* the connd task (serialisation assertions) */ + void *kib_connd; + /* connections to setup/teardown */ + struct list_head kib_connd_conns; + /* connections with zero refcount */ + struct list_head kib_connd_zombies; + /* connection daemon sleeps here */ + wait_queue_head_t kib_connd_waitq; + spinlock_t kib_connd_lock; /* serialise */ + struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ + /* percpt data for schedulers */ + struct kib_sched_info **kib_scheds; } kib_data_t; #define IBLND_INIT_NOTHING 0 @@ -413,7 +506,7 @@ typedef struct typedef struct { /* First 2 fields fixed FOR ALL TIME */ - __u32 ibm_magic; /* I'm an openibnal message */ + __u32 ibm_magic; /* I'm an ibnal message */ __u16 ibm_version; /* this is my version number */ __u8 ibm_type; /* msg type */ @@ -476,15 +569,24 @@ typedef struct { typedef struct kib_rx /* receive message */ { - cfs_list_t rx_list; /* queue for attention */ - struct kib_conn *rx_conn; /* owning conn */ - int rx_nob; /* # bytes received (-1 while posted) */ - enum ib_wc_status rx_status; /* completion status */ - kib_msg_t *rx_msg; /* message buffer (host vaddr) */ - __u64 rx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */ - struct ib_recv_wr rx_wrq; /* receive work item... */ - struct ib_sge rx_sge; /* ...and its memory */ + /* queue for attention */ + struct list_head rx_list; + /* owning conn */ + struct kib_conn *rx_conn; + /* # bytes received (-1 while posted) */ + int rx_nob; + /* completion status */ + enum ib_wc_status rx_status; + /* message buffer (host vaddr) */ + kib_msg_t *rx_msg; + /* message buffer (I/O addr) */ + __u64 rx_msgaddr; + /* for dma_unmap_single() */ + DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); + /* receive work item... */ + struct ib_recv_wr rx_wrq; + /* ...and its memory */ + struct ib_sge rx_sge; } kib_rx_t; #define IBLND_POSTRX_DONT_POST 0 /* don't post */ @@ -494,31 +596,54 @@ typedef struct kib_rx /* receive message */ typedef struct kib_tx /* transmit message */ { - cfs_list_t tx_list; /* queue on idle_txs ibc_tx_queue etc. */ - kib_tx_pool_t *tx_pool; /* pool I'm from */ - struct kib_conn *tx_conn; /* owning conn */ - short tx_sending; /* # tx callbacks outstanding */ - short tx_queued; /* queued for sending */ - short tx_waiting; /* waiting for peer */ - int tx_status; /* LNET completion status */ - unsigned long tx_deadline; /* completion deadline */ - __u64 tx_cookie; /* completion cookie */ - lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */ - kib_msg_t *tx_msg; /* message buffer (host vaddr) */ - __u64 tx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */ - int tx_nwrq; /* # send work items */ - struct ib_send_wr *tx_wrq; /* send work items... */ - struct ib_sge *tx_sge; /* ...and their memory */ - kib_rdma_desc_t *tx_rd; /* rdma descriptor */ - int tx_nfrags; /* # entries in... */ - struct scatterlist *tx_frags; /* dma_map_sg descriptor */ - __u64 *tx_pages; /* rdma phys page addrs */ - union { - kib_phys_mr_t *pmr; /* MR for physical buffer */ - kib_fmr_t fmr; /* FMR */ - } tx_u; - int tx_dmadir; /* dma direction */ + /* queue on idle_txs ibc_tx_queue etc. */ + struct list_head tx_list; + /* pool I'm from */ + kib_tx_pool_t *tx_pool; + /* owning conn */ + struct kib_conn *tx_conn; + /* # tx callbacks outstanding */ + short tx_sending; + /* queued for sending */ + short tx_queued; + /* waiting for peer */ + short tx_waiting; + /* LNET completion status */ + int tx_status; + /* completion deadline */ + unsigned long tx_deadline; + /* completion cookie */ + __u64 tx_cookie; + /* lnet msgs to finalize on completion */ + lnet_msg_t *tx_lntmsg[2]; + /* message buffer (host vaddr) */ + kib_msg_t *tx_msg; + /* message buffer (I/O addr) */ + __u64 tx_msgaddr; + /* for dma_unmap_single() */ + DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); + /* # send work items */ + int tx_nwrq; + /* send work items... */ + struct ib_send_wr *tx_wrq; + /* ...and their memory */ + struct ib_sge *tx_sge; + /* rdma descriptor */ + kib_rdma_desc_t *tx_rd; + /* # entries in... */ + int tx_nfrags; + /* dma_map_sg descriptor */ + struct scatterlist *tx_frags; + /* rdma phys page addrs */ + __u64 *tx_pages; + union { + /* MR for physical buffer */ + kib_phys_mr_t *pmr; + /* FMR */ + kib_fmr_t fmr; + } tx_u; + /* dma direction */ + int tx_dmadir; } kib_tx_t; typedef struct kib_connvars @@ -529,39 +654,75 @@ typedef struct kib_connvars typedef struct kib_conn { - struct kib_peer *ibc_peer; /* owning peer */ - cfs_list_t ibc_list; /* stash on peer's conn list */ - cfs_list_t ibc_sched_list; /* schedule for attention */ - __u16 ibc_version; /* version of connection */ - __u64 ibc_incarnation; /* which instance of the peer */ - cfs_atomic_t ibc_refcount; /* # users */ - int ibc_state; /* what's happening */ - int ibc_nsends_posted; /* # uncompleted sends */ - int ibc_noops_posted; /* # uncompleted NOOPs */ - int ibc_credits; /* # credits I have */ - int ibc_outstanding_credits; /* # credits to return */ - int ibc_reserved_credits;/* # ACK/DONE msg credits */ - int ibc_comms_error; /* set on comms error */ - int ibc_nrx:16; /* receive buffers owned */ - int ibc_scheduled:1; /* scheduled for attention */ - int ibc_ready:1; /* CQ callback fired */ - unsigned long ibc_last_send; /* time of last send */ - cfs_list_t ibc_early_rxs; /* rxs completed before ESTABLISHED */ - cfs_list_t ibc_tx_queue; /* sends that need a credit */ - cfs_list_t ibc_tx_queue_nocred;/* sends that don't need a credit */ - cfs_list_t ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */ - cfs_list_t ibc_active_txs; /* active tx awaiting completion */ - cfs_spinlock_t ibc_lock; /* serialise */ - kib_rx_t *ibc_rxs; /* the rx descs */ - kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ - - struct rdma_cm_id *ibc_cmid; /* CM id */ - struct ib_cq *ibc_cq; /* completion queue */ - - kib_connvars_t *ibc_connvars; /* in-progress connection state */ + /* scheduler information */ + struct kib_sched_info *ibc_sched; + /* owning peer */ + struct kib_peer *ibc_peer; + /* HCA bound on */ + kib_hca_dev_t *ibc_hdev; + /* stash on peer's conn list */ + struct list_head ibc_list; + /* schedule for attention */ + struct list_head ibc_sched_list; + /* version of connection */ + __u16 ibc_version; + /* which instance of the peer */ + __u64 ibc_incarnation; + /* # users */ + atomic_t ibc_refcount; + /* what's happening */ + int ibc_state; + /* # uncompleted sends */ + int ibc_nsends_posted; + /* # uncompleted NOOPs */ + int ibc_noops_posted; + /* # credits I have */ + int ibc_credits; + /* # credits to return */ + int ibc_outstanding_credits; + /* # ACK/DONE msg credits */ + int ibc_reserved_credits; + /* set on comms error */ + int ibc_comms_error; + /* receive buffers owned */ + unsigned int ibc_nrx:16; + /* scheduled for attention */ + unsigned int ibc_scheduled:1; + /* CQ callback fired */ + unsigned int ibc_ready:1; + /* time of last send */ + unsigned long ibc_last_send; + /** link chain for kiblnd_check_conns only */ + struct list_head ibc_connd_list; + /** rxs completed before ESTABLISHED */ + struct list_head ibc_early_rxs; + /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */ + struct list_head ibc_tx_noops; + /* sends that need a credit */ + struct list_head ibc_tx_queue; + /* sends that don't need a credit */ + struct list_head ibc_tx_queue_nocred; + /* sends that need to reserve an ACK/DONE msg */ + struct list_head ibc_tx_queue_rsrvd; + /* active tx awaiting completion */ + struct list_head ibc_active_txs; + /* serialise */ + spinlock_t ibc_lock; + /* the rx descs */ + kib_rx_t *ibc_rxs; + /* premapped rx msg pages */ + kib_pages_t *ibc_rx_pages; + + /* CM id */ + struct rdma_cm_id *ibc_cmid; + /* completion queue */ + struct ib_cq *ibc_cq; + + /* in-progress connection state */ + kib_connvars_t *ibc_connvars; } kib_conn_t; -#define IBLND_CONN_INIT 0 /* being intialised */ +#define IBLND_CONN_INIT 0 /* being initialised */ #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ #define IBLND_CONN_ESTABLISHED 3 /* connection established */ @@ -570,100 +731,142 @@ typedef struct kib_conn typedef struct kib_peer { - cfs_list_t ibp_list; /* stash on global peer list */ - lnet_nid_t ibp_nid; /* who's on the other end(s) */ - lnet_ni_t *ibp_ni; /* LNet interface */ - cfs_atomic_t ibp_refcount; /* # users */ - cfs_list_t ibp_conns; /* all active connections */ - cfs_list_t ibp_tx_queue; /* msgs waiting for a conn */ - __u16 ibp_version; /* version of peer */ - __u64 ibp_incarnation; /* incarnation of peer */ - int ibp_connecting; /* current active connection attempts */ - int ibp_accepting; /* current passive connection attempts */ - int ibp_error; /* errno on closing this peer */ - cfs_time_t ibp_last_alive; /* when (in jiffies) I was last alive */ + /* stash on global peer list */ + struct list_head ibp_list; + /* who's on the other end(s) */ + lnet_nid_t ibp_nid; + /* LNet interface */ + lnet_ni_t *ibp_ni; + /* # users */ + atomic_t ibp_refcount; + /* all active connections */ + struct list_head ibp_conns; + /* msgs waiting for a conn */ + struct list_head ibp_tx_queue; + /* version of peer */ + __u16 ibp_version; + /* incarnation of peer */ + __u64 ibp_incarnation; + /* current active connection attempts */ + int ibp_connecting; + /* current passive connection attempts */ + int ibp_accepting; + /* errno on closing this peer */ + int ibp_error; + /* when (in jiffies) I was last alive */ + cfs_time_t ibp_last_alive; } kib_peer_t; extern kib_data_t kiblnd_data; +extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev); + +static inline void +kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev) +{ + LASSERT(atomic_read(&hdev->ibh_ref) > 0); + atomic_inc(&hdev->ibh_ref); +} + +static inline void +kiblnd_hdev_decref(kib_hca_dev_t *hdev) +{ + LASSERT(atomic_read(&hdev->ibh_ref) > 0); + if (atomic_dec_and_test(&hdev->ibh_ref)) + kiblnd_hdev_destroy(hdev); +} + +static inline int +kiblnd_dev_can_failover(kib_dev_t *dev) +{ + if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */ + return 0; + + if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */ + return 0; + + if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */ + return 1; + + return dev->ibd_can_failover; +} + #define kiblnd_conn_addref(conn) \ do { \ CDEBUG(D_NET, "conn[%p] (%d)++\n", \ - (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \ - LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0); \ - cfs_atomic_inc(&(conn)->ibc_refcount); \ + (conn), atomic_read(&(conn)->ibc_refcount)); \ + atomic_inc(&(conn)->ibc_refcount); \ } while (0) -#define kiblnd_conn_decref(conn) \ -do { \ - unsigned long flags; \ - \ - CDEBUG(D_NET, "conn[%p] (%d)--\n", \ - (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \ - LASSERT(cfs_atomic_read(&(conn)->ibc_refcount) > 0); \ - if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \ - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \ - cfs_list_add_tail(&(conn)->ibc_list, \ - &kiblnd_data.kib_connd_zombies); \ - cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \ - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\ - } \ +#define kiblnd_conn_decref(conn) \ +do { \ + unsigned long flags; \ + \ + CDEBUG(D_NET, "conn[%p] (%d)--\n", \ + (conn), atomic_read(&(conn)->ibc_refcount)); \ + LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \ + if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \ + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \ + list_add_tail(&(conn)->ibc_list, \ + &kiblnd_data.kib_connd_zombies); \ + wake_up(&kiblnd_data.kib_connd_waitq); \ + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\ + } \ } while (0) #define kiblnd_peer_addref(peer) \ do { \ - CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \ - (peer), libcfs_nid2str((peer)->ibp_nid), \ - cfs_atomic_read (&(peer)->ibp_refcount)); \ - LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0); \ - cfs_atomic_inc(&(peer)->ibp_refcount); \ + CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \ + (peer), libcfs_nid2str((peer)->ibp_nid), \ + atomic_read (&(peer)->ibp_refcount)); \ + atomic_inc(&(peer)->ibp_refcount); \ } while (0) #define kiblnd_peer_decref(peer) \ do { \ - CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \ - (peer), libcfs_nid2str((peer)->ibp_nid), \ - cfs_atomic_read (&(peer)->ibp_refcount)); \ - LASSERT(cfs_atomic_read(&(peer)->ibp_refcount) > 0); \ - if (cfs_atomic_dec_and_test(&(peer)->ibp_refcount)) \ - kiblnd_destroy_peer(peer); \ + CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \ + (peer), libcfs_nid2str((peer)->ibp_nid), \ + atomic_read (&(peer)->ibp_refcount)); \ + LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \ + if (atomic_dec_and_test(&(peer)->ibp_refcount)) \ + kiblnd_destroy_peer(peer); \ } while (0) -static inline cfs_list_t * +static inline struct list_head * kiblnd_nid2peerlist (lnet_nid_t nid) { - unsigned int hash = - ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size; + unsigned int hash = + ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size; - return (&kiblnd_data.kib_peers [hash]); + return &kiblnd_data.kib_peers[hash]; } static inline int kiblnd_peer_active (kib_peer_t *peer) { - /* Am I in the peer hash table? */ - return (!cfs_list_empty(&peer->ibp_list)); + /* Am I in the peer hash table? */ + return !list_empty(&peer->ibp_list); } static inline kib_conn_t * kiblnd_get_conn_locked (kib_peer_t *peer) { - LASSERT (!cfs_list_empty(&peer->ibp_conns)); + LASSERT(!list_empty(&peer->ibp_conns)); /* just return the first connection */ - return cfs_list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list); + return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list); } static inline int kiblnd_send_keepalive(kib_conn_t *conn) { - return (*kiblnd_tunables.kib_keepalive > 0) && - cfs_time_after(jiffies, conn->ibc_last_send + - *kiblnd_tunables.kib_keepalive*CFS_HZ); + return (*kiblnd_tunables.kib_keepalive > 0) && + cfs_time_after(jiffies, conn->ibc_last_send + + *kiblnd_tunables.kib_keepalive*HZ); } static inline int -kiblnd_send_noop(kib_conn_t *conn) +kiblnd_need_noop(kib_conn_t *conn) { LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -672,15 +875,26 @@ kiblnd_send_noop(kib_conn_t *conn) !kiblnd_send_keepalive(conn)) return 0; /* No need to send NOOP */ - if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) - return 0; /* NOOP can be piggybacked */ + if (IBLND_OOB_CAPABLE(conn->ibc_version)) { + if (!list_empty(&conn->ibc_tx_queue_nocred)) + return 0; /* NOOP can be piggybacked */ + + /* No tx to piggyback NOOP onto or no credit to send a tx */ + return (list_empty(&conn->ibc_tx_queue) || + conn->ibc_credits == 0); + } + + if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */ + !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */ + conn->ibc_credits == 0) /* no credit */ + return 0; - if (!IBLND_OOB_CAPABLE(conn->ibc_version)) - /* can't piggyback? */ - return cfs_list_empty(&conn->ibc_tx_queue); + if (conn->ibc_credits == 1 && /* last credit reserved for */ + conn->ibc_outstanding_credits == 0) /* giving back credits */ + return 0; /* No tx to piggyback NOOP onto or no credit to send a tx */ - return (cfs_list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 0); + return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1); } static inline void @@ -691,22 +905,22 @@ kiblnd_abort_receives(kib_conn_t *conn) } static inline const char * -kiblnd_queue2str (kib_conn_t *conn, cfs_list_t *q) +kiblnd_queue2str(kib_conn_t *conn, struct list_head *q) { - if (q == &conn->ibc_tx_queue) - return "tx_queue"; + if (q == &conn->ibc_tx_queue) + return "tx_queue"; - if (q == &conn->ibc_tx_queue_rsrvd) - return "tx_queue_rsrvd"; + if (q == &conn->ibc_tx_queue_rsrvd) + return "tx_queue_rsrvd"; - if (q == &conn->ibc_tx_queue_nocred) - return "tx_queue_nocred"; + if (q == &conn->ibc_tx_queue_nocred) + return "tx_queue_nocred"; - if (q == &conn->ibc_active_txs) - return "active_txs"; + if (q == &conn->ibc_active_txs) + return "active_txs"; - LBUG(); - return NULL; + LBUG(); + return NULL; } /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the @@ -742,8 +956,8 @@ kiblnd_wreqid2type (__u64 wreqid) static inline void kiblnd_set_conn_state (kib_conn_t *conn, int state) { - conn->ibc_state = state; - cfs_mb(); + conn->ibc_state = state; + smp_mb(); } static inline void @@ -923,24 +1137,24 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, #endif -struct ib_mr *kiblnd_find_rd_dma_mr(kib_net_t *net, +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd); -struct ib_mr *kiblnd_find_dma_mr(kib_net_t *net, +struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size); void kiblnd_map_rx_descs(kib_conn_t *conn); void kiblnd_unmap_rx_descs(kib_conn_t *conn); int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags); void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx); -void kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node); -cfs_list_t *kiblnd_pool_alloc_node(kib_poolset_t *ps); +void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node); +struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps); int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, __u64 iov, kib_fmr_t *fmr); void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status); -int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_rdma_desc_t *rd, - __u64 *iova, kib_phys_mr_t **pp_pmr); +int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, + kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr); void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr); int kiblnd_startup (lnet_ni_t *ni); @@ -953,15 +1167,17 @@ void kiblnd_tunables_fini(void); int kiblnd_connd (void *arg); int kiblnd_scheduler(void *arg); -int kiblnd_thread_start (int (*fn)(void *arg), void *arg); +int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); +int kiblnd_failover_thread (void *arg); -int kiblnd_alloc_pages (kib_pages_t **pp, int npages); +int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages); void kiblnd_free_pages (kib_pages_t *p); int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event); int kiblnd_translate_mtu(int value); +int kiblnd_dev_failover(kib_dev_t *dev); int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid); void kiblnd_destroy_peer (kib_peer_t *peer); void kiblnd_destroy_dev (kib_dev_t *dev); @@ -987,8 +1203,7 @@ void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid); void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn); void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn); void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob); -void kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, - int status); +void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status); void kiblnd_check_sends (kib_conn_t *conn); void kiblnd_qp_event(struct ib_event *event, void *arg);