X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fralnd%2Fralnd.c;h=91ff1b147b3f675a5e188e2c07a97e2c6065ec63;hp=bee886d9d109cf6c58deae09c37485464560bef8;hb=59071a8334bbc1a3a6d31565b7474063438d1f43;hpb=bbca181a81d9b4c47deb26cf7a3e466e67f33563 diff --git a/lnet/klnds/ralnd/ralnd.c b/lnet/klnds/ralnd/ralnd.c index bee886d..91ff1b1 100644 --- a/lnet/klnds/ralnd/ralnd.c +++ b/lnet/klnds/ralnd/ralnd.c @@ -1,220 +1,62 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: +/* + * GPL HEADER START * - * Copyright (C) 2004 Cluster File Systems, Inc. - * Author: Eric Barton + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * This file is part of Lustre, http://www.lustre.org. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. * + * GPL HEADER END */ -#include "ranal.h" - -static int kranal_devids[] = {RAPK_MAIN_DEVICE_ID, - RAPK_EXPANSION_DEVICE_ID}; - -nal_t kranal_api; -ptl_handle_ni_t kranal_ni; -kra_data_t kranal_data; -kra_tunables_t kranal_tunables; - -#define RANAL_SYSCTL_TIMEOUT 1 -#define RANAL_SYSCTL_LISTENER_TIMEOUT 2 -#define RANAL_SYSCTL_BACKLOG 3 -#define RANAL_SYSCTL_PORT 4 -#define RANAL_SYSCTL_MAX_IMMEDIATE 5 - -#define RANAL_SYSCTL 202 - -static ctl_table kranal_ctl_table[] = { - {RANAL_SYSCTL_TIMEOUT, "timeout", - &kranal_tunables.kra_timeout, sizeof(int), - 0644, NULL, &proc_dointvec}, - {RANAL_SYSCTL_LISTENER_TIMEOUT, "listener_timeout", - &kranal_tunables.kra_listener_timeout, sizeof(int), - 0644, NULL, &proc_dointvec}, - {RANAL_SYSCTL_BACKLOG, "backlog", - &kranal_tunables.kra_backlog, sizeof(int), - 0644, NULL, kranal_listener_procint}, - {RANAL_SYSCTL_PORT, "port", - &kranal_tunables.kra_port, sizeof(int), - 0644, NULL, kranal_listener_procint}, - {RANAL_SYSCTL_MAX_IMMEDIATE, "max_immediate", - &kranal_tunables.kra_max_immediate, sizeof(int), - 0644, NULL, &proc_dointvec}, - { 0 } -}; - -static ctl_table kranal_top_ctl_table[] = { - {RANAL_SYSCTL, "ranal", NULL, 0, 0555, kranal_ctl_table}, - { 0 } +/* + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/ralnd/ralnd.c + * + * Author: Eric Barton + */ +#include "ralnd.h" + +static int kranal_devids[RANAL_MAXDEVS] = {RAPK_MAIN_DEVICE_ID, + RAPK_EXPANSION_DEVICE_ID}; + +lnd_t the_kralnd = { + .lnd_type = RALND, + .lnd_startup = kranal_startup, + .lnd_shutdown = kranal_shutdown, + .lnd_ctl = kranal_ctl, + .lnd_send = kranal_send, + .lnd_recv = kranal_recv, + .lnd_eager_recv = kranal_eager_recv, + .lnd_accept = kranal_accept, }; -int -kranal_sock_write (struct socket *sock, void *buffer, int nob) -{ - int rc; - mm_segment_t oldmm = get_fs(); - struct iovec iov = { - .iov_base = buffer, - .iov_len = nob - }; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = &iov, - .msg_iovlen = 1, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = MSG_DONTWAIT - }; - - /* We've set up the socket's send buffer to be large enough for - * everything we send, so a single non-blocking send should - * complete without error. */ - - set_fs(KERNEL_DS); - rc = sock_sendmsg(sock, &msg, iov.iov_len); - set_fs(oldmm); - - if (rc == nob) - return 0; - - if (rc >= 0) - return -EAGAIN; - - return rc; -} - -int -kranal_sock_read (struct socket *sock, void *buffer, int nob, int timeout) -{ - int rc; - mm_segment_t oldmm = get_fs(); - long ticks = timeout * HZ; - unsigned long then; - struct timeval tv; - - LASSERT (nob > 0); - LASSERT (ticks > 0); - - for (;;) { - struct iovec iov = { - .iov_base = buffer, - .iov_len = nob - }; - struct msghdr msg = { - .msg_name = NULL, - .msg_namelen = 0, - .msg_iov = &iov, - .msg_iovlen = 1, - .msg_control = NULL, - .msg_controllen = 0, - .msg_flags = 0 - }; - - /* Set receive timeout to remaining time */ - tv = (struct timeval) { - .tv_sec = ticks / HZ, - .tv_usec = ((ticks % HZ) * 1000000) / HZ - }; - set_fs(KERNEL_DS); - rc = sock_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, - (char *)&tv, sizeof(tv)); - set_fs(oldmm); - if (rc != 0) { - CERROR("Can't set socket recv timeout %d: %d\n", - timeout, rc); - return rc; - } - - set_fs(KERNEL_DS); - then = jiffies; - rc = sock_recvmsg(sock, &msg, iov.iov_len, 0); - ticks -= jiffies - then; - set_fs(oldmm); - - if (rc < 0) - return rc; - - if (rc == 0) - return -ECONNABORTED; - - buffer = ((char *)buffer) + rc; - nob -= rc; - - if (nob == 0) - return 0; - - if (ticks <= 0) - return -ETIMEDOUT; - } -} - -int -kranal_create_sock(struct socket **sockp) -{ - struct socket *sock; - int rc; - int option; - mm_segment_t oldmm = get_fs(); - - rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); - if (rc != 0) { - CERROR("Can't create socket: %d\n", rc); - return rc; - } - - /* Ensure sending connection info doesn't block */ - option = 2 * sizeof(kra_connreq_t); - set_fs(KERNEL_DS); - rc = sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, - (char *)&option, sizeof(option)); - set_fs(oldmm); - if (rc != 0) { - CERROR("Can't set send buffer %d: %d\n", option, rc); - goto failed; - } - - option = 1; - set_fs(KERNEL_DS); - rc = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&option, sizeof(option)); - set_fs(oldmm); - if (rc != 0) { - CERROR("Can't set SO_REUSEADDR: %d\n", rc); - goto failed; - } - - *sockp = sock; - return 0; - - failed: - sock_release(sock); - return rc; -} - -void -kranal_pause(int ticks) -{ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(ticks); -} +kra_data_t kranal_data; void -kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid) +kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, lnet_nid_t dstnid) { RAP_RETURN rrc; @@ -222,8 +64,12 @@ kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid) connreq->racr_magic = RANAL_MSG_MAGIC; connreq->racr_version = RANAL_MSG_VERSION; + + if (conn == NULL) /* prepping a "stub" reply */ + return; + connreq->racr_devid = conn->rac_device->rad_id; - connreq->racr_srcnid = kranal_lib.libnal_ni.ni_pid.nid; + connreq->racr_srcnid = kranal_data.kra_ni->ni_nid; connreq->racr_dstnid = dstnid; connreq->racr_peerstamp = kranal_data.kra_peerstamp; connreq->racr_connstamp = conn->rac_my_connstamp; @@ -234,22 +80,73 @@ kranal_pack_connreq(kra_connreq_t *connreq, kra_conn_t *conn, ptl_nid_t dstnid) } int -kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout) +kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int active) { + int timeout = active ? *kranal_tunables.kra_timeout : + lnet_acceptor_timeout(); + int swab; int rc; - rc = kranal_sock_read(sock, connreq, sizeof(*connreq), timeout); + /* return 0 on success, -ve on error, +ve to tell the peer I'm "old" */ + + rc = libcfs_sock_read(sock, &connreq->racr_magic, + sizeof(connreq->racr_magic), timeout); if (rc != 0) { - CERROR("Read failed: %d\n", rc); - return rc; + CERROR("Read(magic) failed(1): %d\n", rc); + return -EIO; + } + + if (connreq->racr_magic != RANAL_MSG_MAGIC && + connreq->racr_magic != __swab32(RANAL_MSG_MAGIC)) { + /* Unexpected magic! */ + if (!active && + (connreq->racr_magic == LNET_PROTO_MAGIC || + connreq->racr_magic == __swab32(LNET_PROTO_MAGIC))) { + /* future protocol version compatibility! + * When LNET unifies protocols over all LNDs, the first + * thing sent will be a version query. +ve rc means I + * reply with my current magic/version */ + return EPROTO; + } + + CERROR("Unexpected magic %08x (%s)\n", + connreq->racr_magic, active ? "active" : "passive"); + return -EPROTO; + } + + swab = (connreq->racr_magic == __swab32(RANAL_MSG_MAGIC)); + + rc = libcfs_sock_read(sock, &connreq->racr_version, + sizeof(connreq->racr_version), timeout); + if (rc != 0) { + CERROR("Read(version) failed: %d\n", rc); + return -EIO; } - if (connreq->racr_magic != RANAL_MSG_MAGIC) { - if (__swab32(connreq->racr_magic) != RANAL_MSG_MAGIC) { - CERROR("Unexpected magic %08x\n", connreq->racr_magic); + if (swab) + __swab16s(&connreq->racr_version); + + if (connreq->racr_version != RANAL_MSG_VERSION) { + if (active) { + CERROR("Unexpected version %d\n", connreq->racr_version); return -EPROTO; } + /* If this is a future version of the ralnd protocol, and I'm + * passive (accepted the connection), tell my peer I'm "old" + * (+ve rc) */ + return EPROTO; + } + + rc = libcfs_sock_read(sock, &connreq->racr_devid, + sizeof(connreq->racr_version) - + offsetof(kra_connreq_t, racr_devid), + timeout); + if (rc != 0) { + CERROR("Read(body) failed: %d\n", rc); + return -EIO; + } + if (swab) { __swab32s(&connreq->racr_magic); __swab16s(&connreq->racr_version); __swab16s(&connreq->racr_devid); @@ -265,14 +162,9 @@ kranal_recv_connreq(struct socket *sock, kra_connreq_t *connreq, int timeout) __swab32s(&connreq->racr_riparams.CompletionCookie); } - if (connreq->racr_version != RANAL_MSG_VERSION) { - CERROR("Unexpected version %d\n", connreq->racr_version); - return -EPROTO; - } - - if (connreq->racr_srcnid == PTL_NID_ANY || - connreq->racr_dstnid == PTL_NID_ANY) { - CERROR("Received PTL_NID_ANY\n"); + if (connreq->racr_srcnid == LNET_NID_ANY || + connreq->racr_dstnid == LNET_NID_ANY) { + CERROR("Received LNET_NID_ANY\n"); return -EPROTO; } @@ -289,22 +181,23 @@ int kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn) { kra_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; + cfs_list_t *ctmp; + cfs_list_t *cnxt; int loopback; int count = 0; - loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid; + loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid; - list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { - conn = list_entry(ctmp, kra_conn_t, rac_list); + cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { + conn = cfs_list_entry(ctmp, kra_conn_t, rac_list); if (conn == newconn) continue; if (conn->rac_peerstamp != newconn->rac_peerstamp) { - CDEBUG(D_NET, "Closing stale conn nid:"LPX64 - " peerstamp:"LPX64"("LPX64")\n", peer->rap_nid, + CDEBUG(D_NET, "Closing stale conn nid: %s " + " peerstamp:"LPX64"("LPX64")\n", + libcfs_nid2str(peer->rap_nid), conn->rac_peerstamp, newconn->rac_peerstamp); LASSERT (conn->rac_peerstamp < newconn->rac_peerstamp); count++; @@ -322,8 +215,9 @@ kranal_close_stale_conns_locked (kra_peer_t *peer, kra_conn_t *newconn) LASSERT (conn->rac_peer_connstamp < newconn->rac_peer_connstamp); - CDEBUG(D_NET, "Closing stale conn nid:"LPX64 - " connstamp:"LPX64"("LPX64")\n", peer->rap_nid, + CDEBUG(D_NET, "Closing stale conn nid: %s" + " connstamp:"LPX64"("LPX64")\n", + libcfs_nid2str(peer->rap_nid), conn->rac_peer_connstamp, newconn->rac_peer_connstamp); count++; @@ -337,13 +231,13 @@ int kranal_conn_isdup_locked(kra_peer_t *peer, kra_conn_t *newconn) { kra_conn_t *conn; - struct list_head *tmp; + cfs_list_t *tmp; int loopback; - loopback = peer->rap_nid == kranal_lib.libnal_ni.ni_pid.nid; + loopback = peer->rap_nid == kranal_data.kra_ni->ni_nid; - list_for_each(tmp, &peer->rap_conns) { - conn = list_entry(tmp, kra_conn_t, rac_list); + cfs_list_for_each(tmp, &peer->rap_conns) { + conn = cfs_list_entry(tmp, kra_conn_t, rac_list); /* 'newconn' is from an earlier version of 'peer'!!! */ if (newconn->rac_peerstamp < conn->rac_peerstamp) @@ -386,7 +280,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn) { unsigned long flags; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); conn->rac_my_connstamp = kranal_data.kra_connstamp++; @@ -394,7 +288,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn) conn->rac_cqid = kranal_data.kra_next_cqid++; } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); } int @@ -403,37 +297,37 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev) kra_conn_t *conn; RAP_RETURN rrc; - LASSERT (!in_interrupt()); - PORTAL_ALLOC(conn, sizeof(*conn)); + LASSERT (!cfs_in_interrupt()); + LIBCFS_ALLOC(conn, sizeof(*conn)); if (conn == NULL) return -ENOMEM; memset(conn, 0, sizeof(*conn)); - atomic_set(&conn->rac_refcount, 1); - INIT_LIST_HEAD(&conn->rac_list); - INIT_LIST_HEAD(&conn->rac_hashlist); - INIT_LIST_HEAD(&conn->rac_schedlist); - INIT_LIST_HEAD(&conn->rac_fmaq); - INIT_LIST_HEAD(&conn->rac_rdmaq); - INIT_LIST_HEAD(&conn->rac_replyq); - spin_lock_init(&conn->rac_lock); + cfs_atomic_set(&conn->rac_refcount, 1); + CFS_INIT_LIST_HEAD(&conn->rac_list); + CFS_INIT_LIST_HEAD(&conn->rac_hashlist); + CFS_INIT_LIST_HEAD(&conn->rac_schedlist); + CFS_INIT_LIST_HEAD(&conn->rac_fmaq); + CFS_INIT_LIST_HEAD(&conn->rac_rdmaq); + CFS_INIT_LIST_HEAD(&conn->rac_replyq); + spin_lock_init(&conn->rac_lock); kranal_set_conn_uniqueness(conn); conn->rac_device = dev; - conn->rac_timeout = MAX(kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT); + conn->rac_timeout = MAX(*kranal_tunables.kra_timeout, RANAL_MIN_TIMEOUT); kranal_update_reaper_timeout(conn->rac_timeout); rrc = RapkCreateRi(dev->rad_handle, conn->rac_cqid, &conn->rac_rihandle); if (rrc != RAP_SUCCESS) { CERROR("RapkCreateRi failed: %d\n", rrc); - PORTAL_FREE(conn, sizeof(*conn)); + LIBCFS_FREE(conn, sizeof(*conn)); return -ENETDOWN; } - atomic_inc(&kranal_data.kra_nconns); + cfs_atomic_inc(&kranal_data.kra_nconns); *connp = conn; return 0; } @@ -443,15 +337,15 @@ kranal_destroy_conn(kra_conn_t *conn) { RAP_RETURN rrc; - LASSERT (!in_interrupt()); + LASSERT (!cfs_in_interrupt()); LASSERT (!conn->rac_scheduled); - LASSERT (list_empty(&conn->rac_list)); - LASSERT (list_empty(&conn->rac_hashlist)); - LASSERT (list_empty(&conn->rac_schedlist)); - LASSERT (atomic_read(&conn->rac_refcount) == 0); - LASSERT (list_empty(&conn->rac_fmaq)); - LASSERT (list_empty(&conn->rac_rdmaq)); - LASSERT (list_empty(&conn->rac_replyq)); + LASSERT (cfs_list_empty(&conn->rac_list)); + LASSERT (cfs_list_empty(&conn->rac_hashlist)); + LASSERT (cfs_list_empty(&conn->rac_schedlist)); + LASSERT (cfs_atomic_read(&conn->rac_refcount) == 0); + LASSERT (cfs_list_empty(&conn->rac_fmaq)); + LASSERT (cfs_list_empty(&conn->rac_rdmaq)); + LASSERT (cfs_list_empty(&conn->rac_replyq)); rrc = RapkDestroyRi(conn->rac_device->rad_handle, conn->rac_rihandle); @@ -460,20 +354,20 @@ kranal_destroy_conn(kra_conn_t *conn) if (conn->rac_peer != NULL) kranal_peer_decref(conn->rac_peer); - PORTAL_FREE(conn, sizeof(*conn)); - atomic_dec(&kranal_data.kra_nconns); + LIBCFS_FREE(conn, sizeof(*conn)); + cfs_atomic_dec(&kranal_data.kra_nconns); } void kranal_terminate_conn_locked (kra_conn_t *conn) { - LASSERT (!in_interrupt()); + LASSERT (!cfs_in_interrupt()); LASSERT (conn->rac_state == RANAL_CONN_CLOSING); - LASSERT (!list_empty(&conn->rac_hashlist)); - LASSERT (list_empty(&conn->rac_list)); + LASSERT (!cfs_list_empty(&conn->rac_hashlist)); + LASSERT (cfs_list_empty(&conn->rac_list)); /* Remove from conn hash table: no new callbacks */ - list_del_init(&conn->rac_hashlist); + cfs_list_del_init(&conn->rac_hashlist); kranal_conn_decref(conn); conn->rac_state = RANAL_CONN_CLOSED; @@ -488,17 +382,18 @@ kranal_close_conn_locked (kra_conn_t *conn, int error) { kra_peer_t *peer = conn->rac_peer; - CDEBUG(error == 0 ? D_NET : D_ERROR, - "closing conn to "LPX64": error %d\n", peer->rap_nid, error); + CDEBUG_LIMIT(error == 0 ? D_NET : D_NETERROR, + "closing conn to %s: error %d\n", + libcfs_nid2str(peer->rap_nid), error); - LASSERT (!in_interrupt()); + LASSERT (!cfs_in_interrupt()); LASSERT (conn->rac_state == RANAL_CONN_ESTABLISHED); - LASSERT (!list_empty(&conn->rac_hashlist)); - LASSERT (!list_empty(&conn->rac_list)); + LASSERT (!cfs_list_empty(&conn->rac_hashlist)); + LASSERT (!cfs_list_empty(&conn->rac_list)); - list_del_init(&conn->rac_list); + cfs_list_del_init(&conn->rac_list); - if (list_empty(&peer->rap_conns) && + if (cfs_list_empty(&peer->rap_conns) && peer->rap_persistence == 0) { /* Non-persistent peer with no more conns... */ kranal_unlink_peer_locked(peer); @@ -509,7 +404,7 @@ kranal_close_conn_locked (kra_conn_t *conn, int error) * RDMA. Otherwise if we wait for the full timeout we can also be sure * all RDMA has stopped. */ conn->rac_last_rx = jiffies; - mb(); + cfs_mb(); conn->rac_state = RANAL_CONN_CLOSING; kranal_schedule_conn(conn); /* schedule sending CLOSE */ @@ -523,56 +418,62 @@ kranal_close_conn (kra_conn_t *conn, int error) unsigned long flags; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (conn->rac_state == RANAL_CONN_ESTABLISHED) kranal_close_conn_locked(conn, error); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); } int kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq, __u32 peer_ip, int peer_port) { - kra_device_t *dev = conn->rac_device; - unsigned long flags; - RAP_RETURN rrc; - - /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive - * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */ - conn->rac_last_tx = jiffies; - conn->rac_keepalive = 0; - - /* Schedule conn on rad_new_conns */ - kranal_conn_addref(conn); - spin_lock_irqsave(&dev->rad_lock, flags); - list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns); - wake_up(&dev->rad_waitq); - spin_unlock_irqrestore(&dev->rad_lock, flags); - - rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams); - if (rrc != RAP_SUCCESS) { - CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n", - HIPQUAD(peer_ip), peer_port, rrc); - return -ECONNABORTED; - } - - /* Scheduler doesn't touch conn apart from to deschedule and decref it - * after RapkCompleteSync() return success, so conn is all mine */ - - conn->rac_peerstamp = connreq->racr_peerstamp; - conn->rac_peer_connstamp = connreq->racr_connstamp; - conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout); - kranal_update_reaper_timeout(conn->rac_keepalive); - return 0; + kra_device_t *dev = conn->rac_device; + unsigned long flags; + RAP_RETURN rrc; + + /* CAVEAT EMPTOR: we're really overloading rac_last_tx + rac_keepalive + * to do RapkCompleteSync() timekeeping (see kibnal_scheduler). */ + conn->rac_last_tx = jiffies; + conn->rac_keepalive = 0; + + rrc = RapkSetRiParams(conn->rac_rihandle, &connreq->racr_riparams); + if (rrc != RAP_SUCCESS) { + CERROR("Error setting riparams from %u.%u.%u.%u/%d: %d\n", + HIPQUAD(peer_ip), peer_port, rrc); + return -ECONNABORTED; + } + + /* Schedule conn on rad_new_conns */ + kranal_conn_addref(conn); + spin_lock_irqsave(&dev->rad_lock, flags); + cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns); + wake_up(&dev->rad_waitq); + spin_unlock_irqrestore(&dev->rad_lock, flags); + + rrc = RapkWaitToConnect(conn->rac_rihandle); + if (rrc != RAP_SUCCESS) { + CERROR("Error waiting to connect to %u.%u.%u.%u/%d: %d\n", + HIPQUAD(peer_ip), peer_port, rrc); + return -ECONNABORTED; + } + + /* Scheduler doesn't touch conn apart from to deschedule and decref it + * after RapkCompleteSync() return success, so conn is all mine */ + + conn->rac_peerstamp = connreq->racr_peerstamp; + conn->rac_peer_connstamp = connreq->racr_connstamp; + conn->rac_keepalive = RANAL_TIMEOUT2KEEPALIVE(connreq->racr_timeout); + kranal_update_reaper_timeout(conn->rac_keepalive); + return 0; } int -kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp, - ptl_nid_t *dst_nidp, kra_conn_t **connp) +kranal_passive_conn_handshake (struct socket *sock, lnet_nid_t *src_nidp, + lnet_nid_t *dst_nidp, kra_conn_t **connp) { - struct sockaddr_in addr; __u32 peer_ip; unsigned int peer_port; kra_connreq_t rx_connreq; @@ -580,33 +481,36 @@ kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp, kra_conn_t *conn; kra_device_t *dev; int rc; - int len; int i; - len = sizeof(addr); - rc = sock->ops->getname(sock, (struct sockaddr *)&addr, &len, 2); + rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); if (rc != 0) { CERROR("Can't get peer's IP: %d\n", rc); return rc; } - peer_ip = ntohl(addr.sin_addr.s_addr); - peer_port = ntohs(addr.sin_port); - - if (peer_port >= 1024) { - CERROR("Refusing unprivileged connection from %u.%u.%u.%u/%d\n", - HIPQUAD(peer_ip), peer_port); - return -ECONNREFUSED; - } + rc = kranal_recv_connreq(sock, &rx_connreq, 0); - rc = kranal_recv_connreq(sock, &rx_connreq, - kranal_tunables.kra_listener_timeout); - if (rc != 0) { + if (rc < 0) { CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n", HIPQUAD(peer_ip), peer_port, rc); return rc; } + if (rc > 0) { + /* Request from "new" peer: send reply with my MAGIC/VERSION to + * tell her I'm old... */ + kranal_pack_connreq(&tx_connreq, NULL, LNET_NID_ANY); + + rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq), + lnet_acceptor_timeout()); + if (rc != 0) + CERROR("Can't tx stub connreq to %u.%u.%u.%u/%d: %d\n", + HIPQUAD(peer_ip), peer_port, rc); + + return -EPROTO; + } + for (i = 0;;i++) { if (i == kranal_data.kra_ndevs) { CERROR("Can't match dev %d from %u.%u.%u.%u/%d\n", @@ -624,7 +528,8 @@ kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp, kranal_pack_connreq(&tx_connreq, conn, rx_connreq.racr_srcnid); - rc = kranal_sock_write(sock, &tx_connreq, sizeof(tx_connreq)); + rc = libcfs_sock_write(sock, &tx_connreq, sizeof(tx_connreq), + lnet_acceptor_timeout()); if (rc != 0) { CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n", HIPQUAD(peer_ip), peer_port, rc); @@ -645,72 +550,8 @@ kranal_passive_conn_handshake (struct socket *sock, ptl_nid_t *src_nidp, } int -ranal_connect_sock(kra_peer_t *peer, struct socket **sockp) -{ - struct sockaddr_in locaddr; - struct sockaddr_in srvaddr; - struct socket *sock; - unsigned int port; - int rc; - - for (port = 1023; port >= 512; port--) { - - memset(&locaddr, 0, sizeof(locaddr)); - locaddr.sin_family = AF_INET; - locaddr.sin_port = htons(port); - locaddr.sin_addr.s_addr = htonl(INADDR_ANY); - - memset (&srvaddr, 0, sizeof (srvaddr)); - srvaddr.sin_family = AF_INET; - srvaddr.sin_port = htons (peer->rap_port); - srvaddr.sin_addr.s_addr = htonl (peer->rap_ip); - - rc = kranal_create_sock(&sock); - if (rc != 0) - return rc; - - rc = sock->ops->bind(sock, - (struct sockaddr *)&locaddr, sizeof(locaddr)); - if (rc != 0) { - sock_release(sock); - - if (rc == -EADDRINUSE) { - CDEBUG(D_NET, "Port %d already in use\n", port); - continue; - } - - CERROR("Can't bind to reserved port %d: %d\n", port, rc); - return rc; - } - - rc = sock->ops->connect(sock, - (struct sockaddr *)&srvaddr, sizeof(srvaddr), - 0); - if (rc == 0) { - *sockp = sock; - return 0; - } - - sock_release(sock); - - if (rc != -EADDRNOTAVAIL) { - CERROR("Can't connect port %d to %u.%u.%u.%u/%d: %d\n", - port, HIPQUAD(peer->rap_ip), peer->rap_port, rc); - return rc; - } - - CDEBUG(D_NET, "Port %d not available for %u.%u.%u.%u/%d\n", - port, HIPQUAD(peer->rap_ip), peer->rap_port); - } - - /* all ports busy */ - return -EHOSTUNREACH; -} - - -int kranal_active_conn_handshake(kra_peer_t *peer, - ptl_nid_t *dst_nidp, kra_conn_t **connp) + lnet_nid_t *dst_nidp, kra_conn_t **connp) { kra_connreq_t connreq; kra_conn_t *conn; @@ -721,7 +562,7 @@ kranal_active_conn_handshake(kra_peer_t *peer, /* spread connections over all devices using both peer NIDs to ensure * all nids use all devices */ - idx = peer->rap_nid + kranal_lib.libnal_ni.ni_pid.nid; + idx = peer->rap_nid + kranal_data.kra_ni->ni_nid; dev = &kranal_data.kra_devices[idx % kranal_data.kra_ndevs]; rc = kranal_create_conn(&conn, dev); @@ -730,7 +571,22 @@ kranal_active_conn_handshake(kra_peer_t *peer, kranal_pack_connreq(&connreq, conn, peer->rap_nid); - rc = ranal_connect_sock(peer, &sock); + if (the_lnet.ln_testprotocompat != 0) { + /* single-shot proto test */ + LNET_LOCK(); + if ((the_lnet.ln_testprotocompat & 1) != 0) { + connreq.racr_version++; + the_lnet.ln_testprotocompat &= ~1; + } + if ((the_lnet.ln_testprotocompat & 2) != 0) { + connreq.racr_magic = LNET_PROTO_MAGIC; + the_lnet.ln_testprotocompat &= ~2; + } + LNET_UNLOCK(); + } + + rc = lnet_connect(&sock, peer->rap_nid, + 0, peer->rap_ip, peer->rap_port); if (rc != 0) goto failed_0; @@ -738,29 +594,31 @@ kranal_active_conn_handshake(kra_peer_t *peer, * immediately after accepting a connection, so we connect and then * send immediately. */ - rc = kranal_sock_write(sock, &connreq, sizeof(connreq)); + rc = libcfs_sock_write(sock, &connreq, sizeof(connreq), + lnet_acceptor_timeout()); if (rc != 0) { CERROR("Can't tx connreq to %u.%u.%u.%u/%d: %d\n", HIPQUAD(peer->rap_ip), peer->rap_port, rc); - goto failed_1; + goto failed_2; } - rc = kranal_recv_connreq(sock, &connreq, kranal_tunables.kra_timeout); + rc = kranal_recv_connreq(sock, &connreq, 1); if (rc != 0) { CERROR("Can't rx connreq from %u.%u.%u.%u/%d: %d\n", HIPQUAD(peer->rap_ip), peer->rap_port, rc); - goto failed_1; + goto failed_2; } - sock_release(sock); + libcfs_sock_release(sock); rc = -EPROTO; if (connreq.racr_srcnid != peer->rap_nid) { CERROR("Unexpected srcnid from %u.%u.%u.%u/%d: " - "received "LPX64" expected "LPX64"\n", + "received %s expected %s\n", HIPQUAD(peer->rap_ip), peer->rap_port, - connreq.racr_srcnid, peer->rap_nid); - goto failed_0; + libcfs_nid2str(connreq.racr_srcnid), + libcfs_nid2str(peer->rap_nid)); + goto failed_1; } if (connreq.racr_devid != dev->rad_id) { @@ -768,20 +626,23 @@ kranal_active_conn_handshake(kra_peer_t *peer, "received %d expected %d\n", HIPQUAD(peer->rap_ip), peer->rap_port, connreq.racr_devid, dev->rad_id); - goto failed_0; + goto failed_1; } rc = kranal_set_conn_params(conn, &connreq, peer->rap_ip, peer->rap_port); if (rc != 0) - goto failed_0; + goto failed_1; *connp = conn; *dst_nidp = connreq.racr_dstnid; return 0; + failed_2: + libcfs_sock_release(sock); failed_1: - sock_release(sock); + lnet_connect_console_error(rc, peer->rap_nid, + peer->rap_ip, peer->rap_port); failed_0: kranal_conn_decref(conn); return rc; @@ -792,8 +653,8 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) { kra_peer_t *peer2; kra_tx_t *tx; - ptl_nid_t peer_nid; - ptl_nid_t dst_nid; + lnet_nid_t peer_nid; + lnet_nid_t dst_nid; unsigned long flags; kra_conn_t *conn; int rc; @@ -809,12 +670,13 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) if (rc != 0) return rc; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); if (!kranal_peer_active(peer)) { /* raced with peer getting unlinked */ - write_unlock_irqrestore(&kranal_data.kra_global_lock, - flags); + write_unlock_irqrestore(&kranal_data. \ + kra_global_lock, + flags); kranal_conn_decref(conn); return -ESTALE; } @@ -830,14 +692,15 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) return rc; /* assume this is a new peer */ - peer = kranal_create_peer(peer_nid); - if (peer == NULL) { - CERROR("Can't allocate peer for "LPX64"\n", peer_nid); + rc = kranal_create_peer(&peer, peer_nid); + if (rc != 0) { + CERROR("Can't create conn for %s\n", + libcfs_nid2str(peer_nid)); kranal_conn_decref(conn); return -ENOMEM; } - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); peer2 = kranal_find_peer_locked(peer_nid); if (peer2 == NULL) { @@ -854,12 +717,13 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) /* Refuse connection if peer thinks we are a different NID. We check * this while holding the global lock, to synch with connection * destruction on NID change. */ - if (dst_nid != kranal_lib.libnal_ni.ni_pid.nid) { - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + if (kranal_data.kra_ni->ni_nid != dst_nid) { + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); - CERROR("Stale/bad connection with "LPX64 - ": dst_nid "LPX64", expected "LPX64"\n", - peer_nid, dst_nid, kranal_lib.libnal_ni.ni_pid.nid); + CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n", + libcfs_nid2str(peer_nid), libcfs_nid2str(dst_nid), + libcfs_nid2str(kranal_data.kra_ni->ni_nid)); rc = -ESTALE; goto failed; } @@ -869,19 +733,20 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) * _don't_ have any blocked txs to complete with failure. */ rc = kranal_conn_isdup_locked(peer, conn); if (rc != 0) { - LASSERT (!list_empty(&peer->rap_conns)); - LASSERT (list_empty(&peer->rap_tx_queue)); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - CWARN("Not creating duplicate connection to "LPX64": %d\n", - peer_nid, rc); + LASSERT (!cfs_list_empty(&peer->rap_conns)); + LASSERT (cfs_list_empty(&peer->rap_tx_queue)); + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); + CWARN("Not creating duplicate connection to %s: %d\n", + libcfs_nid2str(peer_nid), rc); rc = 0; goto failed; } if (new_peer) { /* peer table takes my ref on the new peer */ - list_add_tail(&peer->rap_list, - kranal_nid2peerlist(peer_nid)); + cfs_list_add_tail(&peer->rap_list, + kranal_nid2peerlist(peer_nid)); } /* initialise timestamps before reaper looks at them */ @@ -889,32 +754,34 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer) kranal_peer_addref(peer); /* +1 ref for conn */ conn->rac_peer = peer; - list_add_tail(&conn->rac_list, &peer->rap_conns); + cfs_list_add_tail(&conn->rac_list, &peer->rap_conns); kranal_conn_addref(conn); /* +1 ref for conn table */ - list_add_tail(&conn->rac_hashlist, - kranal_cqid2connlist(conn->rac_cqid)); + cfs_list_add_tail(&conn->rac_hashlist, + kranal_cqid2connlist(conn->rac_cqid)); /* Schedule all packets blocking for a connection */ - while (!list_empty(&peer->rap_tx_queue)) { - tx = list_entry(peer->rap_tx_queue.next, - kra_tx_t, tx_list); + while (!cfs_list_empty(&peer->rap_tx_queue)) { + tx = cfs_list_entry(peer->rap_tx_queue.next, + kra_tx_t, tx_list); - list_del(&tx->tx_list); + cfs_list_del(&tx->tx_list); kranal_post_fma(conn, tx); } nstale = kranal_close_stale_conns_locked(peer, conn); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); /* CAVEAT EMPTOR: passive peer can disappear NOW */ if (nstale != 0) - CWARN("Closed %d stale conns to "LPX64"\n", nstale, peer_nid); + CWARN("Closed %d stale conns to %s\n", nstale, + libcfs_nid2str(peer_nid)); - CDEBUG(D_WARNING, "New connection to "LPX64" on devid[%d] = %d\n", - peer_nid, conn->rac_device->rad_idx, conn->rac_device->rad_id); + CWARN("New connection to %s on devid[%d] = %d\n", + libcfs_nid2str(peer_nid), + conn->rac_device->rad_idx, conn->rac_device->rad_id); /* Ensure conn gets checked. Transmits may have been queued and an * FMA event may have happened before it got in the cq hash table */ @@ -933,18 +800,20 @@ kranal_connect (kra_peer_t *peer) { kra_tx_t *tx; unsigned long flags; - struct list_head zombies; + cfs_list_t zombies; int rc; LASSERT (peer->rap_connecting); - CDEBUG(D_NET, "About to handshake "LPX64"\n", peer->rap_nid); + CDEBUG(D_NET, "About to handshake %s\n", + libcfs_nid2str(peer->rap_nid)); rc = kranal_conn_handshake(NULL, peer); - CDEBUG(D_NET, "Done handshake "LPX64":%d \n", peer->rap_nid, rc); + CDEBUG(D_NET, "Done handshake %s:%d \n", + libcfs_nid2str(peer->rap_nid), rc); - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); LASSERT (peer->rap_connecting); peer->rap_connecting = 0; @@ -952,412 +821,185 @@ kranal_connect (kra_peer_t *peer) if (rc == 0) { /* kranal_conn_handshake() queues blocked txs immediately on * success to avoid messages jumping the queue */ - LASSERT (list_empty(&peer->rap_tx_queue)); + LASSERT (cfs_list_empty(&peer->rap_tx_queue)); - /* reset reconnection timeouts */ - peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL; - peer->rap_reconnect_time = CURRENT_SECONDS; + peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */ - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); return; } - LASSERT (peer->rap_reconnect_interval != 0); - peer->rap_reconnect_time = CURRENT_SECONDS + peer->rap_reconnect_interval; - peer->rap_reconnect_interval = MAX(RANAL_MAX_RECONNECT_INTERVAL, - 1 * peer->rap_reconnect_interval); + peer->rap_reconnect_interval *= 2; + peer->rap_reconnect_interval = + MAX(peer->rap_reconnect_interval, + *kranal_tunables.kra_min_reconnect_interval); + peer->rap_reconnect_interval = + MIN(peer->rap_reconnect_interval, + *kranal_tunables.kra_max_reconnect_interval); + + peer->rap_reconnect_time = jiffies + peer->rap_reconnect_interval * HZ; /* Grab all blocked packets while we have the global lock */ - list_add(&zombies, &peer->rap_tx_queue); - list_del_init(&peer->rap_tx_queue); + cfs_list_add(&zombies, &peer->rap_tx_queue); + cfs_list_del_init(&peer->rap_tx_queue); - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - if (list_empty(&zombies)) + if (cfs_list_empty(&zombies)) return; - CWARN("Dropping packets for "LPX64": connection failed\n", - peer->rap_nid); + CNETERR("Dropping packets for %s: connection failed\n", + libcfs_nid2str(peer->rap_nid)); do { - tx = list_entry(zombies.next, kra_tx_t, tx_list); + tx = cfs_list_entry(zombies.next, kra_tx_t, tx_list); - list_del(&tx->tx_list); + cfs_list_del(&tx->tx_list); kranal_tx_done(tx, -EHOSTUNREACH); - } while (!list_empty(&zombies)); + } while (!cfs_list_empty(&zombies)); } void kranal_free_acceptsock (kra_acceptsock_t *ras) { - sock_release(ras->ras_sock); - PORTAL_FREE(ras, sizeof(*ras)); -} - -int -kranal_listener (void *arg) -{ - struct sockaddr_in addr; - wait_queue_t wait; - struct socket *sock; - kra_acceptsock_t *ras; - int port; - char name[16]; - int rc; - unsigned long flags; - - /* Parent thread holds kra_nid_mutex, and is, or is about to - * block on kra_listener_signal */ - - port = kranal_tunables.kra_port; - snprintf(name, sizeof(name), "kranal_lstn%03d", port); - kportal_daemonize(name); - kportal_blockallsigs(); - - init_waitqueue_entry(&wait, current); - - rc = kranal_create_sock(&sock); - if (rc != 0) - goto out_0; - - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_port = htons(port); - addr.sin_addr.s_addr = INADDR_ANY; - - rc = sock->ops->bind(sock, (struct sockaddr *)&addr, sizeof(addr)); - if (rc != 0) { - CERROR("Can't bind to port %d\n", port); - goto out_1; - } - - rc = sock->ops->listen(sock, kranal_tunables.kra_backlog); - if (rc != 0) { - CERROR("Can't set listen backlog %d: %d\n", - kranal_tunables.kra_backlog, rc); - goto out_1; - } - - LASSERT (kranal_data.kra_listener_sock == NULL); - kranal_data.kra_listener_sock = sock; - - /* unblock waiting parent */ - LASSERT (kranal_data.kra_listener_shutdown == 0); - up(&kranal_data.kra_listener_signal); - - /* Wake me any time something happens on my socket */ - add_wait_queue(sock->sk->sk_sleep, &wait); - ras = NULL; - - while (kranal_data.kra_listener_shutdown == 0) { - - if (ras == NULL) { - PORTAL_ALLOC(ras, sizeof(*ras)); - if (ras == NULL) { - CERROR("Out of Memory: pausing...\n"); - kranal_pause(HZ); - continue; - } - ras->ras_sock = NULL; - } - - if (ras->ras_sock == NULL) { - ras->ras_sock = sock_alloc(); - if (ras->ras_sock == NULL) { - CERROR("Can't allocate socket: pausing...\n"); - kranal_pause(HZ); - continue; - } - /* XXX this should add a ref to sock->ops->owner, if - * TCP could be a module */ - ras->ras_sock->type = sock->type; - ras->ras_sock->ops = sock->ops; - } - - set_current_state(TASK_INTERRUPTIBLE); - - rc = sock->ops->accept(sock, ras->ras_sock, O_NONBLOCK); - - /* Sleep for socket activity? */ - if (rc == -EAGAIN && - kranal_data.kra_listener_shutdown == 0) - schedule(); - - set_current_state(TASK_RUNNING); - - if (rc == 0) { - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); - - list_add_tail(&ras->ras_list, - &kranal_data.kra_connd_acceptq); - - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); - wake_up(&kranal_data.kra_connd_waitq); - - ras = NULL; - continue; - } - - if (rc != -EAGAIN) { - CERROR("Accept failed: %d, pausing...\n", rc); - kranal_pause(HZ); - } - } - - if (ras != NULL) { - if (ras->ras_sock != NULL) - sock_release(ras->ras_sock); - PORTAL_FREE(ras, sizeof(*ras)); - } - - rc = 0; - remove_wait_queue(sock->sk->sk_sleep, &wait); - out_1: - sock_release(sock); - kranal_data.kra_listener_sock = NULL; - out_0: - /* set completion status and unblock thread waiting for me - * (parent on startup failure, executioner on normal shutdown) */ - kranal_data.kra_listener_shutdown = rc; - up(&kranal_data.kra_listener_signal); - - return 0; + libcfs_sock_release(ras->ras_sock); + LIBCFS_FREE(ras, sizeof(*ras)); } int -kranal_start_listener (void) -{ - long pid; - int rc; - - CDEBUG(D_NET, "Starting listener\n"); - - /* Called holding kra_nid_mutex: listener stopped */ - LASSERT (kranal_data.kra_listener_sock == NULL); - - kranal_data.kra_listener_shutdown = 0; - pid = kernel_thread(kranal_listener, NULL, 0); - if (pid < 0) { - CERROR("Can't spawn listener: %ld\n", pid); - return (int)pid; - } - - /* Block until listener has started up. */ - down(&kranal_data.kra_listener_signal); - - rc = kranal_data.kra_listener_shutdown; - LASSERT ((rc != 0) == (kranal_data.kra_listener_sock == NULL)); - - CDEBUG(D_NET, "Listener %ld started OK\n", pid); - return rc; -} - -void -kranal_stop_listener(int clear_acceptq) +kranal_accept (lnet_ni_t *ni, struct socket *sock) { - struct list_head zombie_accepts; - unsigned long flags; - kra_acceptsock_t *ras; - - CDEBUG(D_NET, "Stopping listener\n"); - - /* Called holding kra_nid_mutex: listener running */ - LASSERT (kranal_data.kra_listener_sock != NULL); - - kranal_data.kra_listener_shutdown = 1; - wake_up_all(kranal_data.kra_listener_sock->sk->sk_sleep); - - /* Block until listener has torn down. */ - down(&kranal_data.kra_listener_signal); + kra_acceptsock_t *ras; + int rc; + __u32 peer_ip; + int peer_port; + unsigned long flags; - LASSERT (kranal_data.kra_listener_sock == NULL); - CDEBUG(D_NET, "Listener stopped\n"); + rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); + LASSERT (rc == 0); /* we succeeded before */ - if (!clear_acceptq) - return; + LIBCFS_ALLOC(ras, sizeof(*ras)); + if (ras == NULL) { + CERROR("ENOMEM allocating connection request from " + "%u.%u.%u.%u\n", HIPQUAD(peer_ip)); + return -ENOMEM; + } - /* Close any unhandled accepts */ - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + ras->ras_sock = sock; - list_add(&zombie_accepts, &kranal_data.kra_connd_acceptq); - list_del_init(&kranal_data.kra_connd_acceptq); + spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq); + wake_up(&kranal_data.kra_connd_waitq); - while (!list_empty(&zombie_accepts)) { - ras = list_entry(zombie_accepts.next, - kra_acceptsock_t, ras_list); - list_del(&ras->ras_list); - kranal_free_acceptsock(ras); - } + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + return 0; } int -kranal_listener_procint(ctl_table *table, int write, struct file *filp, - void *buffer, size_t *lenp) +kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid) { - int *tunable = (int *)table->data; - int old_val; - int rc; - - /* No race with nal initialisation since the nal is setup all the time - * it's loaded. When that changes, change this! */ - LASSERT (kranal_data.kra_init == RANAL_INIT_ALL); - - down(&kranal_data.kra_nid_mutex); - - LASSERT (tunable == &kranal_tunables.kra_port || - tunable == &kranal_tunables.kra_backlog); - old_val = *tunable; - - rc = proc_dointvec(table, write, filp, buffer, lenp); - - if (write && - (*tunable != old_val || - kranal_data.kra_listener_sock == NULL)) { + kra_peer_t *peer; + unsigned long flags; - if (kranal_data.kra_listener_sock != NULL) - kranal_stop_listener(0); + LASSERT (nid != LNET_NID_ANY); - rc = kranal_start_listener(); + LIBCFS_ALLOC(peer, sizeof(*peer)); + if (peer == NULL) + return -ENOMEM; - if (rc != 0) { - CWARN("Unable to start listener with new tunable:" - " reverting to old value\n"); - *tunable = old_val; - kranal_start_listener(); - } - } + memset(peer, 0, sizeof(*peer)); /* zero flags etc */ - up(&kranal_data.kra_nid_mutex); + peer->rap_nid = nid; + cfs_atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */ - LASSERT (kranal_data.kra_init == RANAL_INIT_ALL); - return rc; -} + CFS_INIT_LIST_HEAD(&peer->rap_list); + CFS_INIT_LIST_HEAD(&peer->rap_connd_list); + CFS_INIT_LIST_HEAD(&peer->rap_conns); + CFS_INIT_LIST_HEAD(&peer->rap_tx_queue); -int -kranal_set_mynid(ptl_nid_t nid) -{ - unsigned long flags; - lib_ni_t *ni = &kranal_lib.libnal_ni; - int rc = 0; + peer->rap_reconnect_interval = 0; /* OK to connect at any time */ - CDEBUG(D_NET, "setting mynid to "LPX64" (old nid="LPX64")\n", - nid, ni->ni_pid.nid); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); - down(&kranal_data.kra_nid_mutex); + if (kranal_data.kra_nonewpeers) { + /* shutdown has started already */ + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); - if (nid == ni->ni_pid.nid) { - /* no change of NID */ - up(&kranal_data.kra_nid_mutex); - return 0; + LIBCFS_FREE(peer, sizeof(*peer)); + CERROR("Can't create peer: network shutdown\n"); + return -ESHUTDOWN; } - if (kranal_data.kra_listener_sock != NULL) - kranal_stop_listener(1); - - write_lock_irqsave(&kranal_data.kra_global_lock, flags); - kranal_data.kra_peerstamp++; - ni->ni_pid.nid = nid; - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - - /* Delete all existing peers and their connections after new - * NID/connstamp set to ensure no old connections in our brave - * new world. */ - kranal_del_peer(PTL_NID_ANY, 0); - - if (nid != PTL_NID_ANY) - rc = kranal_start_listener(); + cfs_atomic_inc(&kranal_data.kra_npeers); - up(&kranal_data.kra_nid_mutex); - return rc; -} - -kra_peer_t * -kranal_create_peer (ptl_nid_t nid) -{ - kra_peer_t *peer; - - LASSERT (nid != PTL_NID_ANY); - - PORTAL_ALLOC(peer, sizeof(*peer)); - if (peer == NULL) - return NULL; - - memset(peer, 0, sizeof(*peer)); /* zero flags etc */ + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); - peer->rap_nid = nid; - atomic_set(&peer->rap_refcount, 1); /* 1 ref for caller */ - - INIT_LIST_HEAD(&peer->rap_list); - INIT_LIST_HEAD(&peer->rap_connd_list); - INIT_LIST_HEAD(&peer->rap_conns); - INIT_LIST_HEAD(&peer->rap_tx_queue); - - peer->rap_reconnect_time = CURRENT_SECONDS; - peer->rap_reconnect_interval = RANAL_MIN_RECONNECT_INTERVAL; - - atomic_inc(&kranal_data.kra_npeers); - return peer; + *peerp = peer; + return 0; } void kranal_destroy_peer (kra_peer_t *peer) { - CDEBUG(D_NET, "peer "LPX64" %p deleted\n", peer->rap_nid, peer); + CDEBUG(D_NET, "peer %s %p deleted\n", + libcfs_nid2str(peer->rap_nid), peer); - LASSERT (atomic_read(&peer->rap_refcount) == 0); + LASSERT (cfs_atomic_read(&peer->rap_refcount) == 0); LASSERT (peer->rap_persistence == 0); LASSERT (!kranal_peer_active(peer)); LASSERT (!peer->rap_connecting); - LASSERT (list_empty(&peer->rap_conns)); - LASSERT (list_empty(&peer->rap_tx_queue)); - LASSERT (list_empty(&peer->rap_connd_list)); + LASSERT (cfs_list_empty(&peer->rap_conns)); + LASSERT (cfs_list_empty(&peer->rap_tx_queue)); + LASSERT (cfs_list_empty(&peer->rap_connd_list)); - PORTAL_FREE(peer, sizeof(*peer)); + LIBCFS_FREE(peer, sizeof(*peer)); /* NB a peer's connections keep a reference on their peer until * they are destroyed, so we can be assured that _all_ state to do * with this peer has been cleaned up when its refcount drops to * zero. */ - atomic_dec(&kranal_data.kra_npeers); + cfs_atomic_dec(&kranal_data.kra_npeers); } kra_peer_t * -kranal_find_peer_locked (ptl_nid_t nid) +kranal_find_peer_locked (lnet_nid_t nid) { - struct list_head *peer_list = kranal_nid2peerlist(nid); - struct list_head *tmp; + cfs_list_t *peer_list = kranal_nid2peerlist(nid); + cfs_list_t *tmp; kra_peer_t *peer; - list_for_each (tmp, peer_list) { + cfs_list_for_each (tmp, peer_list) { - peer = list_entry(tmp, kra_peer_t, rap_list); + peer = cfs_list_entry(tmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || /* persistent peer */ - !list_empty(&peer->rap_conns)); /* active conn */ + !cfs_list_empty(&peer->rap_conns)); /* active conn */ if (peer->rap_nid != nid) continue; - CDEBUG(D_NET, "got peer [%p] -> "LPX64" (%d)\n", - peer, nid, atomic_read(&peer->rap_refcount)); + CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n", + peer, libcfs_nid2str(nid), + cfs_atomic_read(&peer->rap_refcount)); return peer; } return NULL; } kra_peer_t * -kranal_find_peer (ptl_nid_t nid) +kranal_find_peer (lnet_nid_t nid) { kra_peer_t *peer; - read_lock(&kranal_data.kra_global_lock); + read_lock(&kranal_data.kra_global_lock); peer = kranal_find_peer_locked(nid); if (peer != NULL) /* +1 ref for caller? */ kranal_peer_addref(peer); - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return peer; } @@ -1366,32 +1008,32 @@ void kranal_unlink_peer_locked (kra_peer_t *peer) { LASSERT (peer->rap_persistence == 0); - LASSERT (list_empty(&peer->rap_conns)); + LASSERT (cfs_list_empty(&peer->rap_conns)); LASSERT (kranal_peer_active(peer)); - list_del_init(&peer->rap_list); + cfs_list_del_init(&peer->rap_list); /* lose peerlist's ref */ kranal_peer_decref(peer); } int -kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp, +kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp, int *persistencep) { kra_peer_t *peer; - struct list_head *ptmp; + cfs_list_t *ptmp; int i; - read_lock(&kranal_data.kra_global_lock); + read_lock(&kranal_data.kra_global_lock); for (i = 0; i < kranal_data.kra_peer_hash_size; i++) { - list_for_each(ptmp, &kranal_data.kra_peers[i]) { + cfs_list_for_each(ptmp, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); if (index-- > 0) continue; @@ -1401,30 +1043,31 @@ kranal_get_peer_info (int index, ptl_nid_t *nidp, __u32 *ipp, int *portp, *portp = peer->rap_port; *persistencep = peer->rap_persistence; - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return 0; } } - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return -ENOENT; } int -kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port) +kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port) { unsigned long flags; kra_peer_t *peer; kra_peer_t *peer2; + int rc; - if (nid == PTL_NID_ANY) + if (nid == LNET_NID_ANY) return -EINVAL; - peer = kranal_create_peer(nid); - if (peer == NULL) - return -ENOMEM; + rc = kranal_create_peer(&peer, nid); + if (rc != 0) + return rc; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); peer2 = kranal_find_peer_locked(nid); if (peer2 != NULL) { @@ -1432,7 +1075,7 @@ kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port) peer = peer2; } else { /* peer table takes existing ref on peer */ - list_add_tail(&peer->rap_list, + cfs_list_add_tail(&peer->rap_list, kranal_nid2peerlist(nid)); } @@ -1440,30 +1083,24 @@ kranal_add_persistent_peer (ptl_nid_t nid, __u32 ip, int port) peer->rap_port = port; peer->rap_persistence++; - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); return 0; } void -kranal_del_peer_locked (kra_peer_t *peer, int single_share) +kranal_del_peer_locked (kra_peer_t *peer) { - struct list_head *ctmp; - struct list_head *cnxt; + cfs_list_t *ctmp; + cfs_list_t *cnxt; kra_conn_t *conn; - if (!single_share) - peer->rap_persistence = 0; - else if (peer->rap_persistence > 0) - peer->rap_persistence--; - - if (peer->rap_persistence != 0) - return; + peer->rap_persistence = 0; - if (list_empty(&peer->rap_conns)) { + if (cfs_list_empty(&peer->rap_conns)) { kranal_unlink_peer_locked(peer); } else { - list_for_each_safe(ctmp, cnxt, &peer->rap_conns) { - conn = list_entry(ctmp, kra_conn_t, rac_list); + cfs_list_for_each_safe(ctmp, cnxt, &peer->rap_conns) { + conn = cfs_list_entry(ctmp, kra_conn_t, rac_list); kranal_close_conn_locked(conn, 0); } @@ -1472,20 +1109,20 @@ kranal_del_peer_locked (kra_peer_t *peer, int single_share) } int -kranal_del_peer (ptl_nid_t nid, int single_share) +kranal_del_peer (lnet_nid_t nid) { unsigned long flags; - struct list_head *ptmp; - struct list_head *pnxt; + cfs_list_t *ptmp; + cfs_list_t *pnxt; kra_peer_t *peer; int lo; int hi; int i; int rc = -ENOENT; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); - if (nid != PTL_NID_ANY) + if (nid != LNET_NID_ANY) lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers; else { lo = 0; @@ -1493,23 +1130,20 @@ kranal_del_peer (ptl_nid_t nid, int single_share) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); - if (!(nid == PTL_NID_ANY || peer->rap_nid == nid)) + if (!(nid == LNET_NID_ANY || peer->rap_nid == nid)) continue; - kranal_del_peer_locked(peer, single_share); + kranal_del_peer_locked(peer); rc = 0; /* matched something */ - - if (single_share) - goto out; } } - out: - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); return rc; } @@ -1518,36 +1152,37 @@ kra_conn_t * kranal_get_conn_by_idx (int index) { kra_peer_t *peer; - struct list_head *ptmp; + cfs_list_t *ptmp; kra_conn_t *conn; - struct list_head *ctmp; + cfs_list_t *ctmp; int i; - read_lock (&kranal_data.kra_global_lock); + read_lock(&kranal_data.kra_global_lock); for (i = 0; i < kranal_data.kra_peer_hash_size; i++) { - list_for_each (ptmp, &kranal_data.kra_peers[i]) { + cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); - list_for_each (ctmp, &peer->rap_conns) { + cfs_list_for_each (ctmp, &peer->rap_conns) { if (index-- > 0) continue; - conn = list_entry(ctmp, kra_conn_t, rac_list); - CDEBUG(D_NET, "++conn[%p] -> "LPX64" (%d)\n", - conn, conn->rac_peer->rap_nid, - atomic_read(&conn->rac_refcount)); - atomic_inc(&conn->rac_refcount); - read_unlock(&kranal_data.kra_global_lock); + conn = cfs_list_entry(ctmp, kra_conn_t, + rac_list); + CDEBUG(D_NET, "++conn[%p] -> %s (%d)\n", conn, + libcfs_nid2str(conn->rac_peer->rap_nid), + cfs_atomic_read(&conn->rac_refcount)); + cfs_atomic_inc(&conn->rac_refcount); + read_unlock(&kranal_data.kra_global_lock); return conn; } } } - read_unlock(&kranal_data.kra_global_lock); + read_unlock(&kranal_data.kra_global_lock); return NULL; } @@ -1555,12 +1190,12 @@ int kranal_close_peer_conns_locked (kra_peer_t *peer, int why) { kra_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; + cfs_list_t *ctmp; + cfs_list_t *cnxt; int count = 0; - list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { - conn = list_entry(ctmp, kra_conn_t, rac_list); + cfs_list_for_each_safe (ctmp, cnxt, &peer->rap_conns) { + conn = cfs_list_entry(ctmp, kra_conn_t, rac_list); count++; kranal_close_conn_locked(conn, why); @@ -1570,20 +1205,20 @@ kranal_close_peer_conns_locked (kra_peer_t *peer, int why) } int -kranal_close_matching_conns (ptl_nid_t nid) +kranal_close_matching_conns (lnet_nid_t nid) { unsigned long flags; kra_peer_t *peer; - struct list_head *ptmp; - struct list_head *pnxt; + cfs_list_t *ptmp; + cfs_list_t *pnxt; int lo; int hi; int i; int count = 0; - write_lock_irqsave(&kranal_data.kra_global_lock, flags); + write_lock_irqsave(&kranal_data.kra_global_lock, flags); - if (nid != PTL_NID_ANY) + if (nid != LNET_NID_ANY) lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers; else { lo = 0; @@ -1591,88 +1226,88 @@ kranal_close_matching_conns (ptl_nid_t nid) } for (i = lo; i <= hi; i++) { - list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { + cfs_list_for_each_safe (ptmp, pnxt, &kranal_data.kra_peers[i]) { - peer = list_entry(ptmp, kra_peer_t, rap_list); + peer = cfs_list_entry(ptmp, kra_peer_t, rap_list); LASSERT (peer->rap_persistence > 0 || - !list_empty(&peer->rap_conns)); + !cfs_list_empty(&peer->rap_conns)); - if (!(nid == PTL_NID_ANY || nid == peer->rap_nid)) + if (!(nid == LNET_NID_ANY || nid == peer->rap_nid)) continue; count += kranal_close_peer_conns_locked(peer, 0); } } - write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); + write_unlock_irqrestore(&kranal_data.kra_global_lock, flags); /* wildcards always succeed */ - if (nid == PTL_NID_ANY) + if (nid == LNET_NID_ANY) return 0; return (count == 0) ? -ENOENT : 0; } int -kranal_cmd(struct portals_cfg *pcfg, void * private) +kranal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { - int rc = -EINVAL; + struct libcfs_ioctl_data *data = arg; + int rc = -EINVAL; - LASSERT (pcfg != NULL); + LASSERT (ni == kranal_data.kra_ni); - switch(pcfg->pcfg_command) { - case NAL_CMD_GET_PEER: { - ptl_nid_t nid = 0; + switch(cmd) { + case IOC_LIBCFS_GET_PEER: { + lnet_nid_t nid = 0; __u32 ip = 0; int port = 0; int share_count = 0; - rc = kranal_get_peer_info(pcfg->pcfg_count, + rc = kranal_get_peer_info(data->ioc_count, &nid, &ip, &port, &share_count); - pcfg->pcfg_nid = nid; - pcfg->pcfg_size = 0; - pcfg->pcfg_id = ip; - pcfg->pcfg_misc = port; - pcfg->pcfg_count = 0; - pcfg->pcfg_wait = share_count; + data->ioc_nid = nid; + data->ioc_count = share_count; + data->ioc_u32[0] = ip; + data->ioc_u32[1] = port; break; } - case NAL_CMD_ADD_PEER: { - rc = kranal_add_persistent_peer(pcfg->pcfg_nid, - pcfg->pcfg_id, /* IP */ - pcfg->pcfg_misc); /* port */ + case IOC_LIBCFS_ADD_PEER: { + rc = kranal_add_persistent_peer(data->ioc_nid, + data->ioc_u32[0], /* IP */ + data->ioc_u32[1]); /* port */ break; } - case NAL_CMD_DEL_PEER: { - rc = kranal_del_peer(pcfg->pcfg_nid, - /* flags == single_share */ - pcfg->pcfg_flags != 0); + case IOC_LIBCFS_DEL_PEER: { + rc = kranal_del_peer(data->ioc_nid); break; } - case NAL_CMD_GET_CONN: { - kra_conn_t *conn = kranal_get_conn_by_idx(pcfg->pcfg_count); + case IOC_LIBCFS_GET_CONN: { + kra_conn_t *conn = kranal_get_conn_by_idx(data->ioc_count); if (conn == NULL) rc = -ENOENT; else { rc = 0; - pcfg->pcfg_nid = conn->rac_peer->rap_nid; - pcfg->pcfg_id = conn->rac_device->rad_id; - pcfg->pcfg_misc = 0; - pcfg->pcfg_flags = 0; + data->ioc_nid = conn->rac_peer->rap_nid; + data->ioc_u32[0] = conn->rac_device->rad_id; kranal_conn_decref(conn); } break; } - case NAL_CMD_CLOSE_CONNECTION: { - rc = kranal_close_matching_conns(pcfg->pcfg_nid); + case IOC_LIBCFS_CLOSE_CONNECTION: { + rc = kranal_close_matching_conns(data->ioc_nid); break; } - case NAL_CMD_REGISTER_MYNID: { - if (pcfg->pcfg_nid == PTL_NID_ANY) + case IOC_LIBCFS_REGISTER_MYNID: { + /* Ignore if this is a noop */ + if (data->ioc_nid == ni->ni_nid) { + rc = 0; + } else { + CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n", + libcfs_nid2str(data->ioc_nid), + libcfs_nid2str(ni->ni_nid)); rc = -EINVAL; - else - rc = kranal_set_mynid(pcfg->pcfg_nid); + } break; } } @@ -1681,56 +1316,51 @@ kranal_cmd(struct portals_cfg *pcfg, void * private) } void -kranal_free_txdescs(struct list_head *freelist) +kranal_free_txdescs(cfs_list_t *freelist) { kra_tx_t *tx; - while (!list_empty(freelist)) { - tx = list_entry(freelist->next, kra_tx_t, tx_list); + while (!cfs_list_empty(freelist)) { + tx = cfs_list_entry(freelist->next, kra_tx_t, tx_list); - list_del(&tx->tx_list); - PORTAL_FREE(tx->tx_phys, PTL_MD_MAX_IOV * sizeof(*tx->tx_phys)); - PORTAL_FREE(tx, sizeof(*tx)); + cfs_list_del(&tx->tx_list); + LIBCFS_FREE(tx->tx_phys, LNET_MAX_IOV * sizeof(*tx->tx_phys)); + LIBCFS_FREE(tx, sizeof(*tx)); } } int -kranal_alloc_txdescs(struct list_head *freelist, int n) +kranal_alloc_txdescs(cfs_list_t *freelist, int n) { - int isnblk = (freelist == &kranal_data.kra_idle_nblk_txs); int i; kra_tx_t *tx; - LASSERT (freelist == &kranal_data.kra_idle_txs || - freelist == &kranal_data.kra_idle_nblk_txs); - LASSERT (list_empty(freelist)); + LASSERT (freelist == &kranal_data.kra_idle_txs); + LASSERT (cfs_list_empty(freelist)); for (i = 0; i < n; i++) { - PORTAL_ALLOC(tx, sizeof(*tx)); + LIBCFS_ALLOC(tx, sizeof(*tx)); if (tx == NULL) { - CERROR("Can't allocate %stx[%d]\n", - isnblk ? "nblk " : "", i); + CERROR("Can't allocate tx[%d]\n", i); kranal_free_txdescs(freelist); return -ENOMEM; } - PORTAL_ALLOC(tx->tx_phys, - PTL_MD_MAX_IOV * sizeof(*tx->tx_phys)); + LIBCFS_ALLOC(tx->tx_phys, + LNET_MAX_IOV * sizeof(*tx->tx_phys)); if (tx->tx_phys == NULL) { - CERROR("Can't allocate %stx[%d]->tx_phys\n", - isnblk ? "nblk " : "", i); + CERROR("Can't allocate tx[%d]->tx_phys\n", i); - PORTAL_FREE(tx, sizeof(*tx)); + LIBCFS_FREE(tx, sizeof(*tx)); kranal_free_txdescs(freelist); return -ENOMEM; } - tx->tx_isnblk = isnblk; tx->tx_buftype = RANAL_BUF_NONE; tx->tx_msg.ram_type = RANAL_MSG_NONE; - list_add(&tx->tx_list, freelist); + cfs_list_add(&tx->tx_list, freelist); } return 0; @@ -1739,7 +1369,7 @@ kranal_alloc_txdescs(struct list_head *freelist, int n) int kranal_device_init(int id, kra_device_t *dev) { - const int total_ntx = RANAL_NTX + RANAL_NTX_NBLK; + int total_ntx = *kranal_tunables.kra_ntx; RAP_RETURN rrc; dev->rad_id = id; @@ -1760,16 +1390,17 @@ kranal_device_init(int id, kra_device_t *dev) rrc = RapkCreateCQ(dev->rad_handle, total_ntx, RAP_CQTYPE_SEND, &dev->rad_rdma_cqh); if (rrc != RAP_SUCCESS) { - CERROR("Can't create rdma cq size %d" - " for device %d: %d\n", total_ntx, id, rrc); + CERROR("Can't create rdma cq size %d for device %d: %d\n", + total_ntx, id, rrc); goto failed_1; } - rrc = RapkCreateCQ(dev->rad_handle, RANAL_FMA_CQ_SIZE, RAP_CQTYPE_RECV, - &dev->rad_fma_cqh); + rrc = RapkCreateCQ(dev->rad_handle, + *kranal_tunables.kra_fma_cq_size, + RAP_CQTYPE_RECV, &dev->rad_fma_cqh); if (rrc != RAP_SUCCESS) { - CERROR("Can't create fma cq size %d" - " for device %d: %d\n", RANAL_FMA_CQ_SIZE, id, rrc); + CERROR("Can't create fma cq size %d for device %d: %d\n", + *kranal_tunables.kra_fma_cq_size, id, rrc); goto failed_2; } @@ -1786,6 +1417,13 @@ kranal_device_init(int id, kra_device_t *dev) void kranal_device_fini(kra_device_t *dev) { + LASSERT (cfs_list_empty(&dev->rad_ready_conns)); + LASSERT (cfs_list_empty(&dev->rad_new_conns)); + LASSERT (dev->rad_nphysmap == 0); + LASSERT (dev->rad_nppphysmap == 0); + LASSERT (dev->rad_nvirtmap == 0); + LASSERT (dev->rad_nobvirtmap == 0); + LASSERT(dev->rad_scheduler == NULL); RapkDestroyCQ(dev->rad_handle, dev->rad_fma_cqh); RapkDestroyCQ(dev->rad_handle, dev->rad_rdma_cqh); @@ -1793,21 +1431,16 @@ kranal_device_fini(kra_device_t *dev) } void -kranal_api_shutdown (nal_t *nal) +kranal_shutdown (lnet_ni_t *ni) { int i; unsigned long flags; - if (nal->nal_refct != 0) { - /* This module got the first ref */ - PORTAL_MODULE_UNUSE; - return; - } - CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", - atomic_read(&portal_kmemory)); + cfs_atomic_read(&libcfs_kmemory)); - LASSERT (nal == &kranal_api); + LASSERT (ni == kranal_data.kra_ni); + LASSERT (ni->ni_data == &kranal_data); switch (kranal_data.kra_init) { default: @@ -1815,90 +1448,100 @@ kranal_api_shutdown (nal_t *nal) LBUG(); case RANAL_INIT_ALL: - /* stop calls to nal_cmd */ - libcfs_nal_cmd_unregister(RANAL); - /* No new persistent peers */ + /* Prevent new peers from being created */ + write_lock_irqsave(&kranal_data.kra_global_lock, flags); + kranal_data.kra_nonewpeers = 1; + write_unlock_irqrestore(&kranal_data.kra_global_lock, + flags); - /* resetting my NID to unadvertises me, removes my - * listener and nukes all current peers */ - kranal_set_mynid(PTL_NID_ANY); - /* no new peers or conns */ + /* Remove all existing peers from the peer table */ + kranal_del_peer(LNET_NID_ANY); - /* Wait for all peer/conn state to clean up */ + /* Wait for pending conn reqs to be handled */ i = 2; - while (atomic_read(&kranal_data.kra_nconns) != 0 || - atomic_read(&kranal_data.kra_npeers) != 0) { + spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) { + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, + flags); i++; - CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ - "waiting for %d peers and %d conns to close down\n", - atomic_read(&kranal_data.kra_npeers), - atomic_read(&kranal_data.kra_nconns)); - kranal_pause(HZ); + CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */ + "waiting for conn reqs to clean up\n"); + cfs_pause(cfs_time_seconds(1)); + + spin_lock_irqsave(&kranal_data.kra_connd_lock, + flags); } - /* fall through */ + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); - case RANAL_INIT_LIB: - lib_fini(&kranal_lib); + /* Wait for all peers to be freed */ + i = 2; + while (cfs_atomic_read(&kranal_data.kra_npeers) != 0) { + i++; + CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */ + "waiting for %d peers to close down\n", + cfs_atomic_read(&kranal_data.kra_npeers)); + cfs_pause(cfs_time_seconds(1)); + } /* fall through */ case RANAL_INIT_DATA: break; } - /* Conn/Peer state all cleaned up BEFORE setting shutdown, so threads - * don't have to worry about shutdown races */ - LASSERT (atomic_read(&kranal_data.kra_nconns) == 0); - LASSERT (atomic_read(&kranal_data.kra_npeers) == 0); + /* Peer state all cleaned up BEFORE setting shutdown, so threads don't + * have to worry about shutdown races. NB connections may be created + * while there are still active connds, but these will be temporary + * since peer creation always fails after the listener has started to + * shut down. */ + LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0); - /* flag threads to terminate; wake and wait for them to die */ + /* Flag threads to terminate */ kranal_data.kra_shutdown = 1; - for (i = 0; i < kranal_data.kra_ndevs; i++) { - kra_device_t *dev = &kranal_data.kra_devices[i]; - - LASSERT (list_empty(&dev->rad_ready_conns)); - LASSERT (list_empty(&dev->rad_new_conns)); + for (i = 0; i < kranal_data.kra_ndevs; i++) { + kra_device_t *dev = &kranal_data.kra_devices[i]; - spin_lock_irqsave(&dev->rad_lock, flags); - wake_up(&dev->rad_waitq); - spin_unlock_irqrestore(&dev->rad_lock, flags); - } + spin_lock_irqsave(&dev->rad_lock, flags); + wake_up(&dev->rad_waitq); + spin_unlock_irqrestore(&dev->rad_lock, flags); + } - spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags); - wake_up_all(&kranal_data.kra_reaper_waitq); - spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags); + spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags); + wake_up_all(&kranal_data.kra_reaper_waitq); + spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags); - LASSERT (list_empty(&kranal_data.kra_connd_peers)); - spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); - wake_up_all(&kranal_data.kra_connd_waitq); - spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers)); + spin_lock_irqsave(&kranal_data.kra_connd_lock, flags); + wake_up_all(&kranal_data.kra_connd_waitq); + spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags); + /* Wait for threads to exit */ i = 2; - while (atomic_read(&kranal_data.kra_nthreads) != 0) { + while (cfs_atomic_read(&kranal_data.kra_nthreads) != 0) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "Waiting for %d threads to terminate\n", - atomic_read(&kranal_data.kra_nthreads)); - kranal_pause(HZ); + cfs_atomic_read(&kranal_data.kra_nthreads)); + cfs_pause(cfs_time_seconds(1)); } - LASSERT (atomic_read(&kranal_data.kra_npeers) == 0); + LASSERT (cfs_atomic_read(&kranal_data.kra_npeers) == 0); if (kranal_data.kra_peers != NULL) { for (i = 0; i < kranal_data.kra_peer_hash_size; i++) - LASSERT (list_empty(&kranal_data.kra_peers[i])); + LASSERT (cfs_list_empty(&kranal_data.kra_peers[i])); - PORTAL_FREE(kranal_data.kra_peers, - sizeof (struct list_head) * + LIBCFS_FREE(kranal_data.kra_peers, + sizeof (cfs_list_t) * kranal_data.kra_peer_hash_size); } - LASSERT (atomic_read(&kranal_data.kra_nconns) == 0); + LASSERT (cfs_atomic_read(&kranal_data.kra_nconns) == 0); if (kranal_data.kra_conns != NULL) { for (i = 0; i < kranal_data.kra_conn_hash_size; i++) - LASSERT (list_empty(&kranal_data.kra_conns[i])); + LASSERT (cfs_list_empty(&kranal_data.kra_conns[i])); - PORTAL_FREE(kranal_data.kra_conns, - sizeof (struct list_head) * + LIBCFS_FREE(kranal_data.kra_conns, + sizeof (cfs_list_t) * kranal_data.kra_conn_hash_size); } @@ -1906,132 +1549,125 @@ kranal_api_shutdown (nal_t *nal) kranal_device_fini(&kranal_data.kra_devices[i]); kranal_free_txdescs(&kranal_data.kra_idle_txs); - kranal_free_txdescs(&kranal_data.kra_idle_nblk_txs); CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n", - atomic_read(&portal_kmemory)); - printk(KERN_INFO "Lustre: RapidArray NAL unloaded (final mem %d)\n", - atomic_read(&portal_kmemory)); + cfs_atomic_read(&libcfs_kmemory)); - kranal_data.kra_init = RANAL_INIT_NOTHING; + kranal_data.kra_init = RANAL_INIT_NOTHING; + module_put(THIS_MODULE); } int -kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid, - ptl_ni_limits_t *requested_limits, - ptl_ni_limits_t *actual_limits) +kranal_startup (lnet_ni_t *ni) { struct timeval tv; - ptl_process_id_t process_id; - int pkmem = atomic_read(&portal_kmemory); + int pkmem = cfs_atomic_read(&libcfs_kmemory); int rc; int i; kra_device_t *dev; + char name[16]; - LASSERT (nal == &kranal_api); + LASSERT (ni->ni_lnd == &the_kralnd); - if (nal->nal_refct != 0) { - if (actual_limits != NULL) - *actual_limits = kranal_lib.libnal_ni.ni_actual_limits; - /* This module got the first ref */ - PORTAL_MODULE_USE; - return PTL_OK; + /* Only 1 instance supported */ + if (kranal_data.kra_init != RANAL_INIT_NOTHING) { + CERROR ("Only 1 instance supported\n"); + return -EPERM; } - LASSERT (kranal_data.kra_init == RANAL_INIT_NOTHING); + if (lnet_set_ip_niaddr(ni) != 0) { + CERROR ("Can't determine my NID\n"); + return -EPERM; + } + if (*kranal_tunables.kra_credits > *kranal_tunables.kra_ntx) { + CERROR ("Can't set credits(%d) > ntx(%d)\n", + *kranal_tunables.kra_credits, + *kranal_tunables.kra_ntx); + return -EINVAL; + } + memset(&kranal_data, 0, sizeof(kranal_data)); /* zero pointers, flags etc */ - /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and - * a unique (for all time) connstamp so we can uniquely identify - * the sender. The connstamp is an incrementing counter - * initialised with seconds + microseconds at startup time. So we - * rely on NOT creating connections more frequently on average than - * 1MHz to ensure we don't use old connstamps when we reboot. */ - do_gettimeofday(&tv); - kranal_data.kra_connstamp = - kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; + ni->ni_maxtxcredits = *kranal_tunables.kra_credits; + ni->ni_peertxcredits = *kranal_tunables.kra_peercredits; - init_MUTEX(&kranal_data.kra_nid_mutex); - init_MUTEX_LOCKED(&kranal_data.kra_listener_signal); + ni->ni_data = &kranal_data; + kranal_data.kra_ni = ni; - rwlock_init(&kranal_data.kra_global_lock); + /* CAVEAT EMPTOR: Every 'Fma' message includes the sender's NID and + * a unique (for all time) connstamp so we can uniquely identify + * the sender. The connstamp is an incrementing counter + * initialised with seconds + microseconds at startup time. So we + * rely on NOT creating connections more frequently on average than + * 1MHz to ensure we don't use old connstamps when we reboot. */ + do_gettimeofday(&tv); + kranal_data.kra_connstamp = + kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; - for (i = 0; i < RANAL_MAXDEVS; i++ ) { - kra_device_t *dev = &kranal_data.kra_devices[i]; + rwlock_init(&kranal_data.kra_global_lock); - dev->rad_idx = i; - INIT_LIST_HEAD(&dev->rad_ready_conns); - INIT_LIST_HEAD(&dev->rad_new_conns); - init_waitqueue_head(&dev->rad_waitq); - spin_lock_init(&dev->rad_lock); - } + for (i = 0; i < RANAL_MAXDEVS; i++ ) { + kra_device_t *dev = &kranal_data.kra_devices[i]; + + dev->rad_idx = i; + CFS_INIT_LIST_HEAD(&dev->rad_ready_conns); + CFS_INIT_LIST_HEAD(&dev->rad_new_conns); + init_waitqueue_head(&dev->rad_waitq); + spin_lock_init(&dev->rad_lock); + } - kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT; - init_waitqueue_head(&kranal_data.kra_reaper_waitq); - spin_lock_init(&kranal_data.kra_reaper_lock); + kranal_data.kra_new_min_timeout = MAX_SCHEDULE_TIMEOUT; + init_waitqueue_head(&kranal_data.kra_reaper_waitq); + spin_lock_init(&kranal_data.kra_reaper_lock); - INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq); - INIT_LIST_HEAD(&kranal_data.kra_connd_peers); - init_waitqueue_head(&kranal_data.kra_connd_waitq); - spin_lock_init(&kranal_data.kra_connd_lock); + CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq); + CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers); + init_waitqueue_head(&kranal_data.kra_connd_waitq); + spin_lock_init(&kranal_data.kra_connd_lock); - INIT_LIST_HEAD(&kranal_data.kra_idle_txs); - INIT_LIST_HEAD(&kranal_data.kra_idle_nblk_txs); - init_waitqueue_head(&kranal_data.kra_idle_tx_waitq); - spin_lock_init(&kranal_data.kra_tx_lock); + CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs); + spin_lock_init(&kranal_data.kra_tx_lock); - /* OK to call kranal_api_shutdown() to cleanup now */ - kranal_data.kra_init = RANAL_INIT_DATA; + /* OK to call kranal_api_shutdown() to cleanup now */ + kranal_data.kra_init = RANAL_INIT_DATA; + try_module_get(THIS_MODULE); kranal_data.kra_peer_hash_size = RANAL_PEER_HASH_SIZE; - PORTAL_ALLOC(kranal_data.kra_peers, - sizeof(struct list_head) * kranal_data.kra_peer_hash_size); + LIBCFS_ALLOC(kranal_data.kra_peers, + sizeof(cfs_list_t) * + kranal_data.kra_peer_hash_size); if (kranal_data.kra_peers == NULL) goto failed; for (i = 0; i < kranal_data.kra_peer_hash_size; i++) - INIT_LIST_HEAD(&kranal_data.kra_peers[i]); + CFS_INIT_LIST_HEAD(&kranal_data.kra_peers[i]); kranal_data.kra_conn_hash_size = RANAL_PEER_HASH_SIZE; - PORTAL_ALLOC(kranal_data.kra_conns, - sizeof(struct list_head) * kranal_data.kra_conn_hash_size); + LIBCFS_ALLOC(kranal_data.kra_conns, + sizeof(cfs_list_t) * + kranal_data.kra_conn_hash_size); if (kranal_data.kra_conns == NULL) goto failed; for (i = 0; i < kranal_data.kra_conn_hash_size; i++) - INIT_LIST_HEAD(&kranal_data.kra_conns[i]); - - rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, RANAL_NTX); - if (rc != 0) - goto failed; + CFS_INIT_LIST_HEAD(&kranal_data.kra_conns[i]); - rc = kranal_alloc_txdescs(&kranal_data.kra_idle_nblk_txs,RANAL_NTX_NBLK); + rc = kranal_alloc_txdescs(&kranal_data.kra_idle_txs, + *kranal_tunables.kra_ntx); if (rc != 0) goto failed; - process_id.pid = requested_pid; - process_id.nid = PTL_NID_ANY; /* don't know my NID yet */ - - rc = lib_init(&kranal_lib, nal, process_id, - requested_limits, actual_limits); - if (rc != PTL_OK) { - CERROR("lib_init failed: error %d\n", rc); - goto failed; - } - - /* lib interface initialised */ - kranal_data.kra_init = RANAL_INIT_LIB; - /*****************************************************/ - - rc = kranal_thread_start(kranal_reaper, NULL); + rc = kranal_thread_start(kranal_reaper, NULL, "kranal_reaper"); if (rc != 0) { CERROR("Can't spawn ranal reaper: %d\n", rc); goto failed; } - for (i = 0; i < RANAL_N_CONND; i++) { - rc = kranal_thread_start(kranal_connd, (void *)(unsigned long)i); + for (i = 0; i < *kranal_tunables.kra_n_connd; i++) { + snprintf(name, sizeof(name), "kranal_connd_%02ld", i); + rc = kranal_thread_start(kranal_connd, + (void *)(unsigned long)i, name); if (rc != 0) { CERROR("Can't spawn ranal connd[%d]: %d\n", i, rc); @@ -2041,16 +1677,24 @@ kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid, LASSERT (kranal_data.kra_ndevs == 0); - for (i = 0; i < sizeof(kranal_devids)/sizeof(kranal_devids[0]); i++) { - LASSERT (i < RANAL_MAXDEVS); - + /* Use all available RapidArray devices */ + for (i = 0; i < RANAL_MAXDEVS; i++) { dev = &kranal_data.kra_devices[kranal_data.kra_ndevs]; rc = kranal_device_init(kranal_devids[i], dev); if (rc == 0) kranal_data.kra_ndevs++; + } - rc = kranal_thread_start(kranal_scheduler, dev); + if (kranal_data.kra_ndevs == 0) { + CERROR("Can't initialise any RapidArray devices\n"); + goto failed; + } + + for (i = 0; i < kranal_data.kra_ndevs; i++) { + dev = &kranal_data.kra_devices[i]; + snprintf(name, sizeof(name), "kranal_sd_%02d", dev->rad_idx); + rc = kranal_thread_start(kranal_scheduler, dev, name); if (rc != 0) { CERROR("Can't spawn ranal scheduler[%d]: %d\n", i, rc); @@ -2058,39 +1702,23 @@ kranal_api_startup (nal_t *nal, ptl_pid_t requested_pid, } } - if (kranal_data.kra_ndevs == 0) - goto failed; - - rc = libcfs_nal_cmd_register(RANAL, &kranal_cmd, NULL); - if (rc != 0) { - CERROR("Can't initialise command interface (rc = %d)\n", rc); - goto failed; - } - /* flag everything initialised */ kranal_data.kra_init = RANAL_INIT_ALL; /*****************************************************/ - CDEBUG(D_MALLOC, "initial kmem %d\n", atomic_read(&portal_kmemory)); - printk(KERN_INFO "Lustre: RapidArray NAL loaded " - "(initial mem %d)\n", pkmem); - - return PTL_OK; + CDEBUG(D_MALLOC, "initial kmem %d\n", pkmem); + return 0; failed: - kranal_api_shutdown(&kranal_api); - return PTL_FAIL; + kranal_shutdown(ni); + return -ENETDOWN; } void __exit kranal_module_fini (void) { - if (kranal_tunables.kra_sysctl != NULL) - unregister_sysctl_table(kranal_tunables.kra_sysctl); - - PtlNIFini(kranal_ni); - - ptl_unregister_nal(RANAL); + lnet_unregister_lnd(&the_kralnd); + kranal_tunables_fini(); } int __init @@ -2098,51 +1726,17 @@ kranal_module_init (void) { int rc; - /* the following must be sizeof(int) for - * proc_dointvec/kranal_listener_procint() */ - LASSERT (sizeof(kranal_tunables.kra_timeout) == sizeof(int)); - LASSERT (sizeof(kranal_tunables.kra_listener_timeout) == sizeof(int)); - LASSERT (sizeof(kranal_tunables.kra_backlog) == sizeof(int)); - LASSERT (sizeof(kranal_tunables.kra_port) == sizeof(int)); - LASSERT (sizeof(kranal_tunables.kra_max_immediate) == sizeof(int)); - - kranal_api.nal_ni_init = kranal_api_startup; - kranal_api.nal_ni_fini = kranal_api_shutdown; - - /* Initialise dynamic tunables to defaults once only */ - kranal_tunables.kra_timeout = RANAL_TIMEOUT; - kranal_tunables.kra_listener_timeout = RANAL_LISTENER_TIMEOUT; - kranal_tunables.kra_backlog = RANAL_BACKLOG; - kranal_tunables.kra_port = RANAL_PORT; - kranal_tunables.kra_max_immediate = RANAL_MAX_IMMEDIATE; - - rc = ptl_register_nal(RANAL, &kranal_api); - if (rc != PTL_OK) { - CERROR("Can't register RANAL: %d\n", rc); - return -ENOMEM; /* or something... */ - } - - /* Pure gateways want the NAL started up at module load time... */ - rc = PtlNIInit(RANAL, LUSTRE_SRV_PTL_PID, NULL, NULL, &kranal_ni); - if (rc != PTL_OK && rc != PTL_IFACE_DUP) { - ptl_unregister_nal(RANAL); - return -ENODEV; - } - - kranal_tunables.kra_sysctl = - register_sysctl_table(kranal_top_ctl_table, 0); - if (kranal_tunables.kra_sysctl == NULL) { - CERROR("Can't register sysctl table\n"); - PtlNIFini(kranal_ni); - ptl_unregister_nal(RANAL); - return -ENOMEM; - } + rc = kranal_tunables_init(); + if (rc != 0) + return rc; + + lnet_register_lnd(&the_kralnd); return 0; } -MODULE_AUTHOR("Cluster File Systems, Inc. "); -MODULE_DESCRIPTION("Kernel RapidArray NAL v0.01"); +MODULE_AUTHOR("Sun Microsystems, Inc. "); +MODULE_DESCRIPTION("Kernel RapidArray LND v0.01"); MODULE_LICENSE("GPL"); module_init(kranal_module_init);