*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/o2iblnd/o2iblnd.c
*
}
}
-static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
+static int kiblnd_unpack_rd(struct kib_msg *msg, bool flip)
{
struct kib_rdma_desc *rd;
- int nob;
- int n;
- int i;
+ int nob;
+ int n;
+ int i;
- LASSERT (msg->ibm_type == IBLND_MSG_GET_REQ ||
- msg->ibm_type == IBLND_MSG_PUT_ACK);
+ LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
+ msg->ibm_type == IBLND_MSG_PUT_ACK);
- rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
- &msg->ibm_u.get.ibgm_rd :
- &msg->ibm_u.putack.ibpam_rd;
+ rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
+ &msg->ibm_u.get.ibgm_rd :
+ &msg->ibm_u.putack.ibpam_rd;
- if (flip) {
- __swab32s(&rd->rd_key);
- __swab32s(&rd->rd_nfrags);
- }
+ if (flip) {
+ __swab32s(&rd->rd_key);
+ __swab32s(&rd->rd_nfrags);
+ }
- n = rd->rd_nfrags;
+ n = rd->rd_nfrags;
- if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
- CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
- n, IBLND_MAX_RDMA_FRAGS);
- return 1;
- }
+ if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
+ CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
+ n, IBLND_MAX_RDMA_FRAGS);
+ return 1;
+ }
nob = offsetof(struct kib_msg, ibm_u) +
- kiblnd_rd_msg_size(rd, msg->ibm_type, n);
+ kiblnd_rd_msg_size(rd, msg->ibm_type, n);
- if (msg->ibm_nob < nob) {
- CERROR("Short %s: %d(%d)\n",
- kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
- return 1;
- }
+ if (msg->ibm_nob < nob) {
+ CERROR("Short %s: %d(%d)\n",
+ kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
+ return 1;
+ }
- if (!flip)
- return 0;
+ if (!flip)
+ return 0;
- for (i = 0; i < n; i++) {
- __swab32s(&rd->rd_frags[i].rf_nob);
- __swab64s(&rd->rd_frags[i].rf_addr);
- }
+ for (i = 0; i < n; i++) {
+ __swab32s(&rd->rd_frags[i].rf_nob);
+ __swab64s(&rd->rd_frags[i].rf_addr);
+ }
- return 0;
+ return 0;
}
void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
{
const int hdr_size = offsetof(struct kib_msg, ibm_u);
- __u32 msg_cksum;
- __u16 version;
- int msg_nob;
- int flip;
-
- /* 6 bytes are enough to have received magic + version */
- if (nob < 6) {
- CERROR("Short message: %d\n", nob);
- return -EPROTO;
- }
+ __u32 msg_cksum;
+ __u16 version;
+ int msg_nob;
+ bool flip;
- if (msg->ibm_magic == IBLND_MSG_MAGIC) {
- flip = 0;
- } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
- flip = 1;
- } else {
- CERROR("Bad magic: %08x\n", msg->ibm_magic);
- return -EPROTO;
- }
+ /* 6 bytes are enough to have received magic + version */
+ if (nob < 6) {
+ CERROR("Short message: %d\n", nob);
+ return -EPROTO;
+ }
- version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
- if (version != IBLND_MSG_VERSION &&
- version != IBLND_MSG_VERSION_1) {
- CERROR("Bad version: %x\n", version);
- return -EPROTO;
- }
+ if (msg->ibm_magic == IBLND_MSG_MAGIC) {
+ flip = false;
+ } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
+ flip = true;
+ } else {
+ CERROR("Bad magic: %08x\n", msg->ibm_magic);
+ return -EPROTO;
+ }
- if (nob < hdr_size) {
- CERROR("Short message: %d\n", nob);
- return -EPROTO;
- }
+ version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
+ if (version != IBLND_MSG_VERSION &&
+ version != IBLND_MSG_VERSION_1) {
+ CERROR("Bad version: %x\n", version);
+ return -EPROTO;
+ }
- msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
- if (msg_nob > nob) {
- CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
- return -EPROTO;
- }
+ if (nob < hdr_size) {
+ CERROR("Short message: %d\n", nob);
+ return -EPROTO;
+ }
- /* checksum must be computed with ibm_cksum zero and BEFORE anything
- * gets flipped */
- msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
- msg->ibm_cksum = 0;
- if (msg_cksum != 0 &&
- msg_cksum != kiblnd_cksum(msg, msg_nob)) {
- CERROR("Bad checksum\n");
- return -EPROTO;
- }
+ msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
+ if (msg_nob > nob) {
+ CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
+ return -EPROTO;
+ }
+
+ /* checksum must be computed with ibm_cksum zero and BEFORE anything
+ * gets flipped
+ */
+ msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
+ msg->ibm_cksum = 0;
+ if (msg_cksum != 0 &&
+ msg_cksum != kiblnd_cksum(msg, msg_nob)) {
+ CERROR("Bad checksum\n");
+ return -EPROTO;
+ }
- msg->ibm_cksum = msg_cksum;
+ msg->ibm_cksum = msg_cksum;
- if (flip) {
- /* leave magic unflipped as a clue to peer_ni endianness */
- msg->ibm_version = version;
+ if (flip) {
+ /* leave magic unflipped as a clue to peer_ni endianness */
+ msg->ibm_version = version;
BUILD_BUG_ON(sizeof(msg->ibm_type) != 1);
BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1);
- msg->ibm_nob = msg_nob;
- __swab64s(&msg->ibm_srcnid);
- __swab64s(&msg->ibm_srcstamp);
- __swab64s(&msg->ibm_dstnid);
- __swab64s(&msg->ibm_dststamp);
- }
+ msg->ibm_nob = msg_nob;
+ __swab64s(&msg->ibm_srcnid);
+ __swab64s(&msg->ibm_srcstamp);
+ __swab64s(&msg->ibm_dstnid);
+ __swab64s(&msg->ibm_dststamp);
+ }
- if (msg->ibm_srcnid == LNET_NID_ANY) {
- CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
- return -EPROTO;
- }
+ if (msg->ibm_srcnid == LNET_NID_ANY) {
+ CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
+ return -EPROTO;
+ }
- if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
- CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
- msg_nob, kiblnd_msgtype2size(msg->ibm_type));
- return -EPROTO;
- }
+ if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
+ CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
+ msg_nob, kiblnd_msgtype2size(msg->ibm_type));
+ return -EPROTO;
+ }
- switch (msg->ibm_type) {
- default:
- CERROR("Unknown message type %x\n", msg->ibm_type);
- return -EPROTO;
+ switch (msg->ibm_type) {
+ default:
+ CERROR("Unknown message type %x\n", msg->ibm_type);
+ return -EPROTO;
- case IBLND_MSG_NOOP:
- case IBLND_MSG_IMMEDIATE:
- case IBLND_MSG_PUT_REQ:
- break;
+ case IBLND_MSG_NOOP:
+ case IBLND_MSG_IMMEDIATE:
+ case IBLND_MSG_PUT_REQ:
+ break;
- case IBLND_MSG_PUT_ACK:
- case IBLND_MSG_GET_REQ:
- if (kiblnd_unpack_rd(msg, flip))
- return -EPROTO;
- break;
+ case IBLND_MSG_PUT_ACK:
+ case IBLND_MSG_GET_REQ:
+ if (kiblnd_unpack_rd(msg, flip))
+ return -EPROTO;
+ break;
- case IBLND_MSG_PUT_NAK:
- case IBLND_MSG_PUT_DONE:
- case IBLND_MSG_GET_DONE:
- if (flip)
- __swab32s(&msg->ibm_u.completion.ibcm_status);
- break;
+ case IBLND_MSG_PUT_NAK:
+ case IBLND_MSG_PUT_DONE:
+ case IBLND_MSG_GET_DONE:
+ if (flip)
+ __swab32s(&msg->ibm_u.completion.ibcm_status);
+ break;
- case IBLND_MSG_CONNREQ:
- case IBLND_MSG_CONNACK:
- if (flip) {
- __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
- __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
- __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
- }
- break;
- }
- return 0;
+ case IBLND_MSG_CONNREQ:
+ case IBLND_MSG_CONNACK:
+ if (flip) {
+ __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
+ __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
+ __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
+ }
+ break;
+ }
+ return 0;
}
int
{
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
- struct list_head *ctmp;
int i;
unsigned long flags;
if (peer_ni->ibp_ni != ni)
continue;
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns,
+ ibc_list) {
if (index-- > 0)
continue;
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
static void
kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies)
{
+ struct kib_fmr_pool *fpo;
+
if (fps->fps_net == NULL) /* intialized? */
return;
spin_lock(&fps->fps_lock);
- while (!list_empty(&fps->fps_pool_list)) {
- struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
- struct kib_fmr_pool,
- fpo_list);
-
+ while ((fpo = list_first_entry_or_null(&fps->fps_pool_list,
+ struct kib_fmr_pool,
+ fpo_list)) != NULL) {
fpo->fpo_failed = 1;
if (fpo->fpo_map_count == 0)
list_move(&fpo->fpo_list, zombies);
if (frd) {
frd->frd_valid = false;
+ frd->frd_posted = false;
+ fmr->fmr_frd = NULL;
spin_lock(&fps->fps_lock);
list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
spin_unlock(&fps->fps_lock);
- fmr->fmr_frd = NULL;
}
}
fmr->fmr_pool = NULL;
bool is_rx = (rd != tx->tx_rd);
#ifdef HAVE_FMR_POOL_API
__u64 *pages = tx->tx_pages;
- bool tx_pages_mapped = 0;
+ bool tx_pages_mapped = false;
int npages = 0;
#endif
int rc;
if (!tx_pages_mapped) {
npages = kiblnd_map_tx_pages(tx, rd);
- tx_pages_mapped = 1;
+ tx_pages_mapped = true;
}
pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool,
pages, npages, iov);
if (likely(!IS_ERR(pfmr))) {
fmr->fmr_key = is_rx ? pfmr->fmr->rkey
- : pfmr->fmr->lkey;
+ : pfmr->fmr->lkey;
fmr->fmr_frd = NULL;
fmr->fmr_pfmr = pfmr;
fmr->fmr_pool = fpo;
#endif
struct ib_mr *mr;
- frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
- struct kib_fast_reg_descriptor,
- frd_list);
+ frd = list_first_entry(
+ &fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
list_del(&frd->frd_list);
spin_unlock(&fps->fps_lock);
rd->rd_nfrags, PAGE_SIZE);
#endif /* HAVE_IB_MAP_MR_SG_5ARGS */
if (unlikely(n != rd->rd_nfrags)) {
- CERROR("Failed to map mr %d/%d "
- "elements\n", n, rd->rd_nfrags);
+ CERROR("Failed to map mr %d/%d elements\n",
+ n, rd->rd_nfrags);
return n < 0 ? n : -EINVAL;
}
#else /* HAVE_IB_MAP_MR_SG */
if (!tx_pages_mapped) {
npages = kiblnd_map_tx_pages(tx, rd);
- tx_pages_mapped = 1;
+ tx_pages_mapped = true;
}
LASSERT(npages <= frpl->max_page_list_len);
memcpy(frpl->page_list, pages,
- sizeof(*pages) * npages);
+ sizeof(*pages) * npages);
/* Prepare FastReg WR */
wr = &frd->frd_fastreg_wr;
wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
wr->wr.wr.fast_reg.length = nob;
wr->wr.wr.fast_reg.rkey =
- is_rx ? mr->rkey : mr->lkey;
+ is_rx ? mr->rkey : mr->lkey;
wr->wr.wr.fast_reg.access_flags =
- (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE);
+ (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
#endif /* HAVE_IB_MAP_MR_SG */
fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
fmr->fmr_frd = frd;
fmr->fmr_pool = fpo;
+ frd->frd_posted = false;
return 0;
}
spin_unlock(&fps->fps_lock);
{
struct kib_pool *pool;
- while (!list_empty(head)) {
- pool = list_entry(head->next, struct kib_pool, po_list);
+ while ((pool = list_first_entry_or_null(head,
+ struct kib_pool,
+ po_list)) != NULL) {
list_del(&pool->po_list);
LASSERT(pool->po_owner != NULL);
static void
kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
+ struct kib_pool *po;
+
if (ps->ps_net == NULL) /* intialized? */
return;
spin_lock(&ps->ps_lock);
- while (!list_empty(&ps->ps_pool_list)) {
- struct kib_pool *po = list_entry(ps->ps_pool_list.next,
- struct kib_pool, po_list);
-
+ while ((po = list_first_entry_or_null(&ps->ps_pool_list,
+ struct kib_pool,
+ po_list)) != NULL) {
po->po_failed = 1;
if (po->po_allocated == 0)
list_move(&po->po_list, zombies);
return 0;
}
+static int kiblnd_get_link_status(struct net_device *dev)
+{
+ int ret = -1;
+
+ LASSERT(dev);
+
+ if (!netif_running(dev))
+ ret = 0;
+ /* Some devices may not be providing link settings */
+ else if (dev->ethtool_ops->get_link)
+ ret = dev->ethtool_ops->get_link(dev);
+
+ return ret;
+}
+
static int
kiblnd_dev_need_failover(struct kib_dev *dev, struct net *ns)
{
LIST_HEAD(zombie_tpo);
LIST_HEAD(zombie_ppo);
LIST_HEAD(zombie_fpo);
- struct rdma_cm_id *cmid = NULL;
+ struct rdma_cm_id *cmid = NULL;
struct kib_hca_dev *hdev = NULL;
struct kib_hca_dev *old;
- struct ib_pd *pd;
+ struct ib_pd *pd;
struct kib_net *net;
- struct sockaddr_in addr;
- unsigned long flags;
- int rc = 0;
+ struct sockaddr_in addr;
+ struct net_device *netdev;
+ unsigned long flags;
+ int rc = 0;
int i;
- LASSERT (*kiblnd_tunables.kib_dev_failover > 1 ||
- dev->ibd_can_failover ||
- dev->ibd_hdev == NULL);
+ LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
+ dev->ibd_can_failover ||
+ dev->ibd_hdev == NULL);
rc = kiblnd_dev_need_failover(dev, ns);
- if (rc <= 0)
- goto out;
+ if (rc <= 0)
+ goto out;
- if (dev->ibd_hdev != NULL &&
- dev->ibd_hdev->ibh_cmid != NULL) {
- /* XXX it's not good to close old listener at here,
- * because we can fail to create new listener.
- * But we have to close it now, otherwise rdma_bind_addr
- * will return EADDRINUSE... How crap! */
+ if (dev->ibd_hdev != NULL &&
+ dev->ibd_hdev->ibh_cmid != NULL) {
+ /* XXX it's not good to close old listener at here,
+ * because we can fail to create new listener.
+ * But we have to close it now, otherwise rdma_bind_addr
+ * will return EADDRINUSE... How crap! */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
cmid = dev->ibd_hdev->ibh_cmid;
dev->ibd_hdev->ibh_cmid = NULL;
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- rdma_destroy_id(cmid);
- }
+ rdma_destroy_id(cmid);
+ }
cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP,
IB_QPT_RC);
- if (IS_ERR(cmid)) {
- rc = PTR_ERR(cmid);
- CERROR("Failed to create cmid for failover: %d\n", rc);
- goto out;
- }
+ if (IS_ERR(cmid)) {
+ rc = PTR_ERR(cmid);
+ CERROR("Failed to create cmid for failover: %d\n", rc);
+ goto out;
+ }
- memset(&addr, 0, sizeof(addr));
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
- addr.sin_port = htons(*kiblnd_tunables.kib_service);
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
+ addr.sin_port = htons(*kiblnd_tunables.kib_service);
- /* Bind to failover device or port */
- rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
+ /* Bind to failover device or port */
+ rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
if (rc != 0 || cmid->device == NULL) {
CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
- rdma_destroy_id(cmid);
- goto out;
- }
+ rdma_destroy_id(cmid);
+ goto out;
+ }
LIBCFS_ALLOC(hdev, sizeof(*hdev));
- if (hdev == NULL) {
- CERROR("Failed to allocate kib_hca_dev\n");
- rdma_destroy_id(cmid);
- rc = -ENOMEM;
- goto out;
- }
+ if (hdev == NULL) {
+ CERROR("Failed to allocate kib_hca_dev\n");
+ rdma_destroy_id(cmid);
+ rc = -ENOMEM;
+ goto out;
+ }
- atomic_set(&hdev->ibh_ref, 1);
- hdev->ibh_dev = dev;
- hdev->ibh_cmid = cmid;
- hdev->ibh_ibdev = cmid->device;
+ atomic_set(&hdev->ibh_ref, 1);
+ hdev->ibh_dev = dev;
+ hdev->ibh_cmid = cmid;
+ hdev->ibh_ibdev = cmid->device;
hdev->ibh_port = cmid->port_num;
#ifdef HAVE_IB_ALLOC_PD_2ARGS
goto out;
}
- hdev->ibh_pd = pd;
+ hdev->ibh_pd = pd;
- rc = rdma_listen(cmid, 0);
- if (rc != 0) {
- CERROR("Can't start new listener: %d\n", rc);
- goto out;
- }
+ rc = rdma_listen(cmid, 0);
+ if (rc != 0) {
+ CERROR("Can't start new listener: %d\n", rc);
+ goto out;
+ }
rc = kiblnd_hdev_get_attr(hdev);
if (rc != 0) {
if (hdev != NULL)
kiblnd_hdev_decref(hdev);
- if (rc != 0)
+ if (rc != 0) {
dev->ibd_failed_failover++;
- else
+ } else {
dev->ibd_failed_failover = 0;
+ rcu_read_lock();
+ netdev = dev_get_by_name_rcu(ns, dev->ibd_ifname);
+ if (netdev && (kiblnd_get_link_status(netdev) == 1))
+ kiblnd_set_ni_fatal_on(dev->ibd_hdev, 0);
+ rcu_read_unlock();
+ }
+
return rc;
}
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
wake_up_all(&sched->ibs_waitq);
- wake_up_all(&kiblnd_data.kib_connd_waitq);
- wake_up_all(&kiblnd_data.kib_failover_waitq);
+ wake_up(&kiblnd_data.kib_connd_waitq);
+ wake_up(&kiblnd_data.kib_failover_waitq);
wait_var_event_warning(&kiblnd_data.kib_nthreads,
!atomic_read(&kiblnd_data.kib_nthreads),
}
for (i = 0; i < nthrs; i++) {
- long id;
- char name[20];
- id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
- KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
- rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
+ long id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
+
+ rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id,
+ "kiblnd_sd_%02ld_%02ld",
+ KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
if (rc == 0)
continue;
kiblnd_tunables_setup(ni);
/*
- * ni_interfaces is only to support legacy pre Multi-Rail
- * tcp bonding for ksocklnd. Multi-Rail wants each secondary
- * IP to be treated as an unique 'struct ni' interfaces instead.
+ * Multi-Rail wants each secondary
+ * IP to be treated as an unique 'struct ni' interface.
*/
- if (ni->ni_interfaces[0] != NULL) {
+ if (ni->ni_interface != NULL) {
/* Use the IPoIB interface specified in 'networks=' */
- if (ni->ni_interfaces[1] != NULL) {
- CERROR("ko2iblnd: Multiple interfaces not supported\n");
- rc = -EINVAL;
- goto failed;
- }
-
- ifname = ni->ni_interfaces[0];
+ ifname = ni->ni_interface;
} else {
ifname = *kiblnd_tunables.kib_default_ipif;
}
.lnd_ctl = kiblnd_ctl,
.lnd_send = kiblnd_send,
.lnd_recv = kiblnd_recv,
+ .lnd_get_dev_prio = kiblnd_get_dev_prio,
};
static void ko2inlnd_assert_wire_constants(void)