X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fmxlnd%2Fmxlnd_cb.c;h=f2a05681c3f448eb3f08e4ff1d8cf26821135e74;hb=3315344dfa4c25006e213afbd8872b5cc5c9a02c;hp=ddc3bed4b30568e7eec0ff74c32e69ae556c01f3;hpb=c2d26c52d413e7525fcc419dafbb3e381d4b1505;p=fs%2Flustre-release.git diff --git a/lnet/klnds/mxlnd/mxlnd_cb.c b/lnet/klnds/mxlnd/mxlnd_cb.c index ddc3bed..f2a0568 100644 --- a/lnet/klnds/mxlnd/mxlnd_cb.c +++ b/lnet/klnds/mxlnd/mxlnd_cb.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,9 +24,11 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * + * Copyright (c) 2012, Intel Corporation. + * * Copyright (C) 2006 Myricom, Inc. */ /* @@ -166,36 +166,36 @@ mxlnd_parse_match(u64 match, u8 *msg_type, u8 *error, u64 *cookie) kmx_ctx_t * mxlnd_get_idle_rx(kmx_conn_t *conn) { - struct list_head *rxs = NULL; + cfs_list_t *rxs = NULL; kmx_ctx_t *rx = NULL; LASSERT(conn != NULL); rxs = &conn->mxk_rx_idle; - spin_lock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); - if (list_empty (rxs)) { - spin_unlock(&conn->mxk_lock); - return NULL; - } + if (cfs_list_empty (rxs)) { + spin_unlock(&conn->mxk_lock); + return NULL; + } - rx = list_entry (rxs->next, kmx_ctx_t, mxc_list); - list_del_init(&rx->mxc_list); - spin_unlock(&conn->mxk_lock); + rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list); + cfs_list_del_init(&rx->mxc_list); + spin_unlock(&conn->mxk_lock); #if MXLND_DEBUG if (rx->mxc_get != rx->mxc_put) { - CDEBUG(D_NETERROR, "*** RX get (%llu) != put (%llu) ***\n", rx->mxc_get, rx->mxc_put); - CDEBUG(D_NETERROR, "*** incarnation= %lld ***\n", rx->mxc_incarnation); - CDEBUG(D_NETERROR, "*** deadline= %ld ***\n", rx->mxc_deadline); - CDEBUG(D_NETERROR, "*** state= %s ***\n", mxlnd_ctxstate_to_str(rx->mxc_state)); - CDEBUG(D_NETERROR, "*** listed?= %d ***\n", !list_empty(&rx->mxc_list)); - CDEBUG(D_NETERROR, "*** nid= 0x%llx ***\n", rx->mxc_nid); - CDEBUG(D_NETERROR, "*** peer= 0x%p ***\n", rx->mxc_peer); - CDEBUG(D_NETERROR, "*** msg_type= %s ***\n", mxlnd_msgtype_to_str(rx->mxc_msg_type)); - CDEBUG(D_NETERROR, "*** cookie= 0x%llx ***\n", rx->mxc_cookie); - CDEBUG(D_NETERROR, "*** nob= %d ***\n", rx->mxc_nob); + CNETERR("*** RX get (%llu) != put (%llu) ***\n", rx->mxc_get, rx->mxc_put); + CNETERR("*** incarnation= %lld ***\n", rx->mxc_incarnation); + CNETERR("*** deadline= %ld ***\n", rx->mxc_deadline); + CNETERR("*** state= %s ***\n", mxlnd_ctxstate_to_str(rx->mxc_state)); + CNETERR("*** listed?= %d ***\n", !cfs_list_empty(&rx->mxc_list)); + CNETERR("*** nid= 0x%llx ***\n", rx->mxc_nid); + CNETERR("*** peer= 0x%p ***\n", rx->mxc_peer); + CNETERR("*** msg_type= %s ***\n", mxlnd_msgtype_to_str(rx->mxc_msg_type)); + CNETERR("*** cookie= 0x%llx ***\n", rx->mxc_cookie); + CNETERR("*** nob= %d ***\n", rx->mxc_nob); } #endif LASSERT (rx->mxc_get == rx->mxc_put); @@ -213,7 +213,7 @@ int mxlnd_put_idle_rx(kmx_ctx_t *rx) { kmx_conn_t *conn = rx->mxc_conn; - struct list_head *rxs = &conn->mxk_rx_idle; + cfs_list_t *rxs = &conn->mxk_rx_idle; LASSERT(rx->mxc_type == MXLND_REQ_RX); @@ -222,29 +222,29 @@ mxlnd_put_idle_rx(kmx_ctx_t *rx) rx->mxc_put++; LASSERT(rx->mxc_get == rx->mxc_put); - spin_lock(&conn->mxk_lock); - list_add(&rx->mxc_list, rxs); - spin_unlock(&conn->mxk_lock); - return 0; + spin_lock(&conn->mxk_lock); + cfs_list_add(&rx->mxc_list, rxs); + spin_unlock(&conn->mxk_lock); + return 0; } kmx_ctx_t * mxlnd_get_idle_tx(void) { - struct list_head *tmp = &kmxlnd_data.kmx_tx_idle; - kmx_ctx_t *tx = NULL; + cfs_list_t *tmp = &kmxlnd_data.kmx_tx_idle; + kmx_ctx_t *tx = NULL; - spin_lock(&kmxlnd_data.kmx_tx_idle_lock); + spin_lock(&kmxlnd_data.kmx_tx_idle_lock); - if (list_empty (&kmxlnd_data.kmx_tx_idle)) { - CDEBUG(D_NETERROR, "%d txs in use\n", kmxlnd_data.kmx_tx_used); - spin_unlock(&kmxlnd_data.kmx_tx_idle_lock); - return NULL; - } + if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) { + CNETERR("%d txs in use\n", kmxlnd_data.kmx_tx_used); + spin_unlock(&kmxlnd_data.kmx_tx_idle_lock); + return NULL; + } tmp = &kmxlnd_data.kmx_tx_idle; - tx = list_entry (tmp->next, kmx_ctx_t, mxc_list); - list_del_init(&tx->mxc_list); + tx = cfs_list_entry (tmp->next, kmx_ctx_t, mxc_list); + cfs_list_del_init(&tx->mxc_list); /* Allocate a new completion cookie. It might not be needed, * but we've got a lock right now and we're unlikely to @@ -254,7 +254,7 @@ mxlnd_get_idle_tx(void) kmxlnd_data.kmx_tx_next_cookie = 1; } kmxlnd_data.kmx_tx_used++; - spin_unlock(&kmxlnd_data.kmx_tx_idle_lock); + spin_unlock(&kmxlnd_data.kmx_tx_idle_lock); LASSERT (tx->mxc_get == tx->mxc_put); @@ -298,21 +298,23 @@ mxlnd_put_idle_tx(kmx_ctx_t *tx) tx->mxc_put++; LASSERT(tx->mxc_get == tx->mxc_put); - spin_lock(&kmxlnd_data.kmx_tx_idle_lock); - list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle); - kmxlnd_data.kmx_tx_used--; - spin_unlock(&kmxlnd_data.kmx_tx_idle_lock); + spin_lock(&kmxlnd_data.kmx_tx_idle_lock); + cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle); + kmxlnd_data.kmx_tx_used--; + spin_unlock(&kmxlnd_data.kmx_tx_idle_lock); - if (lntmsg[0] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result); - if (lntmsg[1] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result); - return 0; + if (lntmsg[0] != NULL) + lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result); + if (lntmsg[1] != NULL) + lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result); + return 0; } void mxlnd_connparams_free(kmx_connparams_t *cp) { - LASSERT(list_empty(&cp->mxr_list)); + LASSERT(cfs_list_empty(&cp->mxr_list)); MXLND_FREE(cp, sizeof(*cp)); return; } @@ -327,7 +329,7 @@ mxlnd_connparams_alloc(kmx_connparams_t **cp, void *context, MXLND_ALLOC(c, sizeof(*c)); if (!c) return -ENOMEM; - INIT_LIST_HEAD(&c->mxr_list); + CFS_INIT_LIST_HEAD(&c->mxr_list); c->mxr_context = context; c->mxr_epa = epa; c->mxr_match = match; @@ -343,8 +345,8 @@ mxlnd_connparams_alloc(kmx_connparams_t **cp, void *context, static inline void mxlnd_set_conn_status(kmx_conn_t *conn, int status) { - conn->mxk_status = status; - mb(); + conn->mxk_status = status; + smp_mb(); } /** @@ -361,11 +363,11 @@ mxlnd_conn_free_locked(kmx_conn_t *conn) kmx_peer_t *peer = conn->mxk_peer; CDEBUG(D_NET, "freeing conn 0x%p *****\n", conn); - LASSERT (list_empty (&conn->mxk_tx_credit_queue) && - list_empty (&conn->mxk_tx_free_queue) && - list_empty (&conn->mxk_pending)); - if (!list_empty(&conn->mxk_list)) { - list_del_init(&conn->mxk_list); + LASSERT (cfs_list_empty (&conn->mxk_tx_credit_queue) && + cfs_list_empty (&conn->mxk_tx_free_queue) && + cfs_list_empty (&conn->mxk_pending)); + if (!cfs_list_empty(&conn->mxk_list)) { + cfs_list_del_init(&conn->mxk_list); if (peer->mxp_conn == conn) { peer->mxp_conn = NULL; if (valid) { @@ -379,7 +381,7 @@ mxlnd_conn_free_locked(kmx_conn_t *conn) } } /* unlink from global list and drop its ref */ - list_del_init(&peer->mxp_list); + cfs_list_del_init(&peer->mxp_list); mxlnd_peer_decref(peer); } } @@ -421,73 +423,73 @@ mxlnd_conn_cancel_pending_rxs(kmx_conn_t *conn) do { found = 0; - spin_lock(&conn->mxk_lock); - list_for_each_entry_safe(ctx, next, &conn->mxk_pending, mxc_list) { - list_del_init(&ctx->mxc_list); + spin_lock(&conn->mxk_lock); + cfs_list_for_each_entry_safe(ctx, next, &conn->mxk_pending, + mxc_list) { + cfs_list_del_init(&ctx->mxc_list); if (ctx->mxc_type == MXLND_REQ_RX) { found = 1; mxret = mx_cancel(kmxlnd_data.kmx_endpt, &ctx->mxc_mxreq, &result); if (mxret != MX_SUCCESS) { - CDEBUG(D_NETERROR, "mx_cancel() returned %s (%d)\n", mx_strerror(mxret), mxret); + CNETERR("mx_cancel() returned %s (%d)\n", mx_strerror(mxret), mxret); } if (result == 1) { ctx->mxc_errno = -ECONNABORTED; ctx->mxc_state = MXLND_CTX_CANCELED; - spin_unlock(&conn->mxk_lock); - spin_lock(&kmxlnd_data.kmx_conn_lock); + spin_unlock(&conn->mxk_lock); + spin_lock(&kmxlnd_data.kmx_conn_lock); /* we may be holding the global lock, * move to orphan list so that it can free it */ - list_add_tail(&ctx->mxc_list, - &kmxlnd_data.kmx_orphan_msgs); + cfs_list_add_tail(&ctx->mxc_list, + &kmxlnd_data.kmx_orphan_msgs); count++; - spin_unlock(&kmxlnd_data.kmx_conn_lock); - spin_lock(&conn->mxk_lock); - } - break; - } - } - spin_unlock(&conn->mxk_lock); - } - while (found); + spin_unlock(&kmxlnd_data.kmx_conn_lock); + spin_lock(&conn->mxk_lock); + } + break; + } + } + spin_unlock(&conn->mxk_lock); + } while (found); - return count; + return count; } int mxlnd_cancel_queued_txs(kmx_conn_t *conn) { - int count = 0; - struct list_head *tmp = NULL; + int count = 0; + cfs_list_t *tmp = NULL; - spin_lock(&conn->mxk_lock); - while (!list_empty(&conn->mxk_tx_free_queue) || - !list_empty(&conn->mxk_tx_credit_queue)) { + spin_lock(&conn->mxk_lock); + while (!cfs_list_empty(&conn->mxk_tx_free_queue) || + !cfs_list_empty(&conn->mxk_tx_credit_queue)) { kmx_ctx_t *tx = NULL; - if (!list_empty(&conn->mxk_tx_free_queue)) { + if (!cfs_list_empty(&conn->mxk_tx_free_queue)) { tmp = &conn->mxk_tx_free_queue; } else { tmp = &conn->mxk_tx_credit_queue; } - tx = list_entry(tmp->next, kmx_ctx_t, mxc_list); - list_del_init(&tx->mxc_list); - spin_unlock(&conn->mxk_lock); - tx->mxc_errno = -ECONNABORTED; - tx->mxc_state = MXLND_CTX_CANCELED; - /* move to orphan list and then abort */ - spin_lock(&kmxlnd_data.kmx_conn_lock); - list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs); - spin_unlock(&kmxlnd_data.kmx_conn_lock); - count++; - spin_lock(&conn->mxk_lock); - } - spin_unlock(&conn->mxk_lock); + tx = cfs_list_entry(tmp->next, kmx_ctx_t, mxc_list); + cfs_list_del_init(&tx->mxc_list); + spin_unlock(&conn->mxk_lock); + tx->mxc_errno = -ECONNABORTED; + tx->mxc_state = MXLND_CTX_CANCELED; + /* move to orphan list and then abort */ + spin_lock(&kmxlnd_data.kmx_conn_lock); + cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs); + spin_unlock(&kmxlnd_data.kmx_conn_lock); + count++; + spin_lock(&conn->mxk_lock); + } + spin_unlock(&conn->mxk_lock); - return count; + return count; } void @@ -514,24 +516,24 @@ mxlnd_send_message(mx_endpoint_addr_t epa, u8 msg_type, int error, u64 cookie) void mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye) { - mx_endpoint_addr_t epa = conn->mxk_epa; - int valid = !mxlnd_endpoint_addr_null(epa); - int count = 0; + mx_endpoint_addr_t epa = conn->mxk_epa; + int valid = !mxlnd_endpoint_addr_null(epa); + int count = 0; - spin_lock(&conn->mxk_lock); - if (conn->mxk_status == MXLND_CONN_DISCONNECT) { - spin_unlock(&conn->mxk_lock); - return; - } - mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT); - conn->mxk_timeout = 0; - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + if (conn->mxk_status == MXLND_CONN_DISCONNECT) { + spin_unlock(&conn->mxk_lock); + return; + } + mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT); + conn->mxk_timeout = 0; + spin_unlock(&conn->mxk_lock); - count = mxlnd_cancel_queued_txs(conn); - count += mxlnd_conn_cancel_pending_rxs(conn); + count = mxlnd_cancel_queued_txs(conn); + count += mxlnd_conn_cancel_pending_rxs(conn); - if (count) - up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */ + if (count) /* let connd call kmxlnd_abort_msgs() */ + up(&kmxlnd_data.kmx_conn_sem); if (send_bye && valid && conn->mxk_peer->mxp_nid != kmxlnd_data.kmx_ni->ni_nid) { @@ -543,11 +545,11 @@ mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye) mxlnd_sleep(msecs_to_jiffies(20)); } - if (atomic_read(&kmxlnd_data.kmx_shutdown) != 1) { - unsigned long last_msg = 0; + if (atomic_read(&kmxlnd_data.kmx_shutdown) != 1) { + unsigned long last_msg = 0; /* notify LNET that we are giving up on this peer */ - if (time_after(conn->mxk_last_rx, conn->mxk_last_tx)) + if (cfs_time_after(conn->mxk_last_rx, conn->mxk_last_tx)) last_msg = conn->mxk_last_rx; else last_msg = conn->mxk_last_tx; @@ -587,7 +589,7 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer) MXLND_ALLOC(conn, sizeof (*conn)); if (conn == NULL) { - CDEBUG(D_NETERROR, "Cannot allocate conn\n"); + CNETERR("Cannot allocate conn\n"); return -ENOMEM; } CDEBUG(D_NET, "allocated conn 0x%p for peer 0x%p\n", conn, peer); @@ -612,13 +614,13 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer) memset(conn->mxk_rxs, 0, MXLND_RX_MSGS() * sizeof(kmx_ctx_t)); - conn->mxk_peer = peer; - INIT_LIST_HEAD(&conn->mxk_list); - INIT_LIST_HEAD(&conn->mxk_zombie); - atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer - and one for the caller */ - if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) { - u64 nic_id = 0ULL; + conn->mxk_peer = peer; + CFS_INIT_LIST_HEAD(&conn->mxk_list); + CFS_INIT_LIST_HEAD(&conn->mxk_zombie); + atomic_set(&conn->mxk_refcount, 2); /* ref for owning peer + and one for the caller */ + if (peer->mxp_nid == kmxlnd_data.kmx_ni->ni_nid) { + u64 nic_id = 0ULL; u32 ep_id = 0; /* this is localhost, set the epa and status as up */ @@ -637,28 +639,28 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer) mxlnd_set_conn_status(conn, MXLND_CONN_INIT); /* mxk_epa - to be set after mx_iconnect() */ } - spin_lock_init(&conn->mxk_lock); + spin_lock_init(&conn->mxk_lock); /* conn->mxk_timeout = 0 */ /* conn->mxk_last_tx = 0 */ /* conn->mxk_last_rx = 0 */ - INIT_LIST_HEAD(&conn->mxk_rx_idle); + CFS_INIT_LIST_HEAD(&conn->mxk_rx_idle); conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits; /* mxk_outstanding = 0 */ - INIT_LIST_HEAD(&conn->mxk_tx_credit_queue); - INIT_LIST_HEAD(&conn->mxk_tx_free_queue); + CFS_INIT_LIST_HEAD(&conn->mxk_tx_credit_queue); + CFS_INIT_LIST_HEAD(&conn->mxk_tx_free_queue); /* conn->mxk_ntx_msgs = 0 */ /* conn->mxk_ntx_data = 0 */ /* conn->mxk_ntx_posted = 0 */ /* conn->mxk_data_posted = 0 */ - INIT_LIST_HEAD(&conn->mxk_pending); + CFS_INIT_LIST_HEAD(&conn->mxk_pending); for (i = 0; i < MXLND_RX_MSGS(); i++) { rx = &conn->mxk_rxs[i]; rx->mxc_type = MXLND_REQ_RX; - INIT_LIST_HEAD(&rx->mxc_list); + CFS_INIT_LIST_HEAD(&rx->mxc_list); /* map mxc_msg to page */ page = pages->mxg_pages[ipage]; @@ -682,7 +684,7 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer) LASSERT (ipage <= MXLND_TX_MSG_PAGES()); } - list_add_tail(&rx->mxc_list, &conn->mxk_rx_idle); + cfs_list_add_tail(&rx->mxc_list, &conn->mxk_rx_idle); } *connp = conn; @@ -690,7 +692,7 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer) mxlnd_peer_addref(peer); /* add a ref for this conn */ /* add to front of peer's conns list */ - list_add(&conn->mxk_list, &peer->mxp_conns); + cfs_list_add(&conn->mxk_list, &peer->mxp_conns); peer->mxp_conn = conn; return 0; } @@ -699,25 +701,25 @@ int mxlnd_conn_alloc(kmx_conn_t **connp, kmx_peer_t *peer) { int ret = 0; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; - write_lock(g_lock); + write_lock(g_lock); ret = mxlnd_conn_alloc_locked(connp, peer); - write_unlock(g_lock); + write_unlock(g_lock); return ret; } int mxlnd_q_pending_ctx(kmx_ctx_t *ctx) { - int ret = 0; - kmx_conn_t *conn = ctx->mxc_conn; + int ret = 0; + kmx_conn_t *conn = ctx->mxc_conn; - ctx->mxc_state = MXLND_CTX_PENDING; - if (conn != NULL) { - spin_lock(&conn->mxk_lock); + ctx->mxc_state = MXLND_CTX_PENDING; + if (conn != NULL) { + spin_lock(&conn->mxk_lock); if (conn->mxk_status >= MXLND_CONN_INIT) { - list_add_tail(&ctx->mxc_list, &conn->mxk_pending); + cfs_list_add_tail(&ctx->mxc_list, &conn->mxk_pending); if (conn->mxk_timeout == 0 || ctx->mxc_deadline < conn->mxk_timeout) { conn->mxk_timeout = ctx->mxc_deadline; } @@ -725,9 +727,9 @@ mxlnd_q_pending_ctx(kmx_ctx_t *ctx) ctx->mxc_state = MXLND_CTX_COMPLETED; ret = -1; } - spin_unlock(&conn->mxk_lock); - } - return ret; + spin_unlock(&conn->mxk_lock); + } + return ret; } int @@ -737,25 +739,26 @@ mxlnd_deq_pending_ctx(kmx_ctx_t *ctx) ctx->mxc_state == MXLND_CTX_COMPLETED); if (ctx->mxc_state != MXLND_CTX_PENDING && ctx->mxc_state != MXLND_CTX_COMPLETED) { - CDEBUG(D_NETERROR, "deq ctx->mxc_state = %s\n", - mxlnd_ctxstate_to_str(ctx->mxc_state)); + CNETERR("deq ctx->mxc_state = %s\n", + mxlnd_ctxstate_to_str(ctx->mxc_state)); } ctx->mxc_state = MXLND_CTX_COMPLETED; - if (!list_empty(&ctx->mxc_list)) { + if (!cfs_list_empty(&ctx->mxc_list)) { kmx_conn_t *conn = ctx->mxc_conn; kmx_ctx_t *next = NULL; LASSERT(conn != NULL); - spin_lock(&conn->mxk_lock); - list_del_init(&ctx->mxc_list); + spin_lock(&conn->mxk_lock); + cfs_list_del_init(&ctx->mxc_list); conn->mxk_timeout = 0; - if (!list_empty(&conn->mxk_pending)) { - next = list_entry(conn->mxk_pending.next, kmx_ctx_t, mxc_list); + if (!cfs_list_empty(&conn->mxk_pending)) { + next = cfs_list_entry(conn->mxk_pending.next, + kmx_ctx_t, mxc_list); conn->mxk_timeout = next->mxc_deadline; } - spin_unlock(&conn->mxk_lock); - } - return 0; + spin_unlock(&conn->mxk_lock); + } + return 0; } /** @@ -768,18 +771,18 @@ mxlnd_deq_pending_ctx(kmx_ctx_t *ctx) void mxlnd_peer_free(kmx_peer_t *peer) { - CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid)); + CDEBUG(D_NET, "freeing peer 0x%p %s\n", peer, libcfs_nid2str(peer->mxp_nid)); - LASSERT (atomic_read(&peer->mxp_refcount) == 0); + LASSERT (atomic_read(&peer->mxp_refcount) == 0); - if (!list_empty(&peer->mxp_list)) { - /* assume we are locked */ - list_del_init(&peer->mxp_list); - } + if (!cfs_list_empty(&peer->mxp_list)) { + /* assume we are locked */ + cfs_list_del_init(&peer->mxp_list); + } - MXLND_FREE(peer, sizeof (*peer)); - atomic_dec(&kmxlnd_data.kmx_npeers); - return; + MXLND_FREE(peer, sizeof (*peer)); + atomic_dec(&kmxlnd_data.kmx_npeers); + return; } static int @@ -840,11 +843,12 @@ mxlnd_ip2nic_id(u32 ip, u64 *nic_id, int tries) if (tmp_id != 0ULL) ret = 0; break; - } else if (ret == -EHOSTUNREACH && try < tries) { - /* add a little backoff */ - CDEBUG(D_NET, "sleeping for %d jiffies\n", HZ/4); - mxlnd_sleep(HZ/4); - } + } else if (ret == -EHOSTUNREACH && try < tries) { + /* add a little backoff */ + CDEBUG(D_NET, "sleeping for %d jiffies\n", + HZ/4); + mxlnd_sleep(HZ/4); + } } } while (try++ < tries); CDEBUG(D_NET, "done trying. ret = %d\n", ret); @@ -877,29 +881,30 @@ mxlnd_peer_alloc(kmx_peer_t **peerp, lnet_nid_t nid, u32 board, u32 ep_id, u64 n MXLND_ALLOC(peer, sizeof (*peer)); if (peer == NULL) { - CDEBUG(D_NETERROR, "Cannot allocate peer for NID 0x%llx\n", nid); + CNETERR("Cannot allocate peer for NID 0x%llx\n", + nid); return -ENOMEM; } CDEBUG(D_NET, "allocated peer 0x%p for NID 0x%llx\n", peer, nid); memset(peer, 0, sizeof(*peer)); - INIT_LIST_HEAD(&peer->mxp_list); - peer->mxp_nid = nid; - /* peer->mxp_ni unused - may be used for multi-rail */ - atomic_set(&peer->mxp_refcount, 1); /* ref for kmx_peers list */ + CFS_INIT_LIST_HEAD(&peer->mxp_list); + peer->mxp_nid = nid; + /* peer->mxp_ni unused - may be used for multi-rail */ + atomic_set(&peer->mxp_refcount, 1); /* ref for kmx_peers list */ peer->mxp_board = board; peer->mxp_ep_id = ep_id; peer->mxp_nic_id = nic_id; - INIT_LIST_HEAD(&peer->mxp_conns); + CFS_INIT_LIST_HEAD(&peer->mxp_conns); ret = mxlnd_conn_alloc(&peer->mxp_conn, peer); /* adds 2nd conn ref here... */ if (ret != 0) { mxlnd_peer_decref(peer); return ret; } - INIT_LIST_HEAD(&peer->mxp_tx_queue); + CFS_INIT_LIST_HEAD(&peer->mxp_tx_queue); if (peer->mxp_nic_id != 0ULL) nic_id = peer->mxp_nic_id; @@ -930,7 +935,7 @@ mxlnd_find_peer_by_nid_locked(lnet_nid_t nid) hash = mxlnd_nid_to_hash(nid); - list_for_each_entry(peer, &kmxlnd_data.kmx_peers[hash], mxp_list) { + cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[hash], mxp_list) { if (peer->mxp_nid == nid) { found = 1; mxlnd_peer_addref(peer); @@ -947,37 +952,37 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create) int hash = 0; kmx_peer_t *peer = NULL; kmx_peer_t *old = NULL; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; - read_lock(g_lock); + read_lock(g_lock); peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */ if ((peer && peer->mxp_conn) || /* found peer with conn or */ (!peer && !create)) { /* did not find peer and do not create one */ - read_unlock(g_lock); + read_unlock(g_lock); return peer; } - read_unlock(g_lock); + read_unlock(g_lock); /* if peer but _not_ conn */ if (peer && !peer->mxp_conn) { if (create) { - write_lock(g_lock); + write_lock(g_lock); if (!peer->mxp_conn) { /* check again */ /* create the conn */ ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer); if (ret != 0) { /* we tried, return the peer only. * the caller needs to see if the conn exists */ - CDEBUG(D_NETERROR, "%s: %s could not alloc conn\n", + CNETERR("%s: %s could not alloc conn\n", __func__, libcfs_nid2str(peer->mxp_nid)); } else { /* drop extra conn ref */ mxlnd_conn_decref(peer->mxp_conn); } } - write_unlock(g_lock); + write_unlock(g_lock); } return peer; } @@ -992,7 +997,7 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create) if (ret != 0) /* no memory, peer is NULL */ return NULL; - write_lock(g_lock); + write_lock(g_lock); /* look again */ old = mxlnd_find_peer_by_nid_locked(nid); @@ -1003,14 +1008,15 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create) mxlnd_peer_decref(peer); peer = old; } else { - /* no other peer, use this one */ - list_add_tail(&peer->mxp_list, &kmxlnd_data.kmx_peers[hash]); - atomic_inc(&kmxlnd_data.kmx_npeers); - mxlnd_peer_addref(peer); - mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */ + /* no other peer, use this one */ + cfs_list_add_tail(&peer->mxp_list, + &kmxlnd_data.kmx_peers[hash]); + atomic_inc(&kmxlnd_data.kmx_npeers); + mxlnd_peer_addref(peer); + mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */ } - write_unlock(g_lock); + write_unlock(g_lock); return peer; } @@ -1119,7 +1125,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) /* 6 bytes are enough to have received magic + version */ if (nob < 6) { - CDEBUG(D_NETERROR, "not enough bytes for magic + hdr: %d\n", nob); + CNETERR("not enough bytes for magic + hdr: %d\n", nob); return -EPROTO; } @@ -1128,24 +1134,24 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) } else if (msg->mxm_magic == __swab32(MXLND_MSG_MAGIC)) { flip = 1; } else { - CDEBUG(D_NETERROR, "Bad magic: %08x\n", msg->mxm_magic); + CNETERR("Bad magic: %08x\n", msg->mxm_magic); return -EPROTO; } if (msg->mxm_version != (flip ? __swab16(MXLND_MSG_VERSION) : MXLND_MSG_VERSION)) { - CDEBUG(D_NETERROR, "Bad version: %d\n", msg->mxm_version); + CNETERR("Bad version: %d\n", msg->mxm_version); return -EPROTO; } if (nob < hdr_size) { - CDEBUG(D_NETERROR, "not enough for a header: %d\n", nob); + CNETERR("not enough for a header: %d\n", nob); return -EPROTO; } msg_nob = flip ? __swab32(msg->mxm_nob) : msg->mxm_nob; if (msg_nob > nob) { - CDEBUG(D_NETERROR, "Short message: got %d, wanted %d\n", nob, msg_nob); + CNETERR("Short message: got %d, wanted %d\n", nob, msg_nob); return -EPROTO; } @@ -1154,7 +1160,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) msg_cksum = flip ? __swab32(msg->mxm_cksum) : msg->mxm_cksum; msg->mxm_cksum = 0; if (msg_cksum != 0 && msg_cksum != mxlnd_cksum(msg, msg_nob)) { - CDEBUG(D_NETERROR, "Bad checksum\n"); + CNETERR("Bad checksum\n"); return -EPROTO; } msg->mxm_cksum = msg_cksum; @@ -1172,13 +1178,13 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) } if (msg->mxm_srcnid == LNET_NID_ANY) { - CDEBUG(D_NETERROR, "Bad src nid: %s\n", libcfs_nid2str(msg->mxm_srcnid)); + CNETERR("Bad src nid: %s\n", libcfs_nid2str(msg->mxm_srcnid)); return -EPROTO; } switch (msg->mxm_type) { default: - CDEBUG(D_NETERROR, "Unknown message type %x\n", msg->mxm_type); + CNETERR("Unknown message type %x\n", msg->mxm_type); return -EPROTO; case MXLND_MSG_NOOP: @@ -1186,7 +1192,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) case MXLND_MSG_EAGER: if (msg_nob < offsetof(kmx_msg_t, mxm_u.eager.mxem_payload[0])) { - CDEBUG(D_NETERROR, "Short EAGER: %d(%d)\n", msg_nob, + CNETERR("Short EAGER: %d(%d)\n", msg_nob, (int)offsetof(kmx_msg_t, mxm_u.eager.mxem_payload[0])); return -EPROTO; } @@ -1194,7 +1200,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) case MXLND_MSG_PUT_REQ: if (msg_nob < hdr_size + sizeof(msg->mxm_u.put_req)) { - CDEBUG(D_NETERROR, "Short PUT_REQ: %d(%d)\n", msg_nob, + CNETERR("Short PUT_REQ: %d(%d)\n", msg_nob, (int)(hdr_size + sizeof(msg->mxm_u.put_req))); return -EPROTO; } @@ -1204,7 +1210,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) case MXLND_MSG_PUT_ACK: if (msg_nob < hdr_size + sizeof(msg->mxm_u.put_ack)) { - CDEBUG(D_NETERROR, "Short PUT_ACK: %d(%d)\n", msg_nob, + CNETERR("Short PUT_ACK: %d(%d)\n", msg_nob, (int)(hdr_size + sizeof(msg->mxm_u.put_ack))); return -EPROTO; } @@ -1216,7 +1222,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) case MXLND_MSG_GET_REQ: if (msg_nob < hdr_size + sizeof(msg->mxm_u.get_req)) { - CDEBUG(D_NETERROR, "Short GET_REQ: %d(%d)\n", msg_nob, + CNETERR("Short GET_REQ: %d(%d)\n", msg_nob, (int)(hdr_size + sizeof(msg->mxm_u.get_req))); return -EPROTO; } @@ -1228,7 +1234,7 @@ mxlnd_unpack_msg(kmx_msg_t *msg, int nob) case MXLND_MSG_CONN_REQ: case MXLND_MSG_CONN_ACK: if (msg_nob < hdr_size + sizeof(msg->mxm_u.conn_req)) { - CDEBUG(D_NETERROR, "Short connreq/ack: %d(%d)\n", msg_nob, + CNETERR("Short connreq/ack: %d(%d)\n", msg_nob, (int)(hdr_size + sizeof(msg->mxm_u.conn_req))); return -EPROTO; } @@ -1277,8 +1283,8 @@ mxlnd_recv_msg(lnet_msg_t *lntmsg, kmx_ctx_t *rx, u8 msg_type, u64 cookie, u32 l cookie, mask, (void *) rx, &rx->mxc_mxreq); if (mxret != MX_SUCCESS) { mxlnd_deq_pending_ctx(rx); - CDEBUG(D_NETERROR, "mx_kirecv() failed with %s (%d)\n", - mx_strerror(mxret), (int) mxret); + CNETERR("mx_kirecv() failed with %s (%d)\n", + mx_strerror(mxret), (int) mxret); return -1; } return 0; @@ -1323,7 +1329,7 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, /* TODO this will change to the net struct */ if (context != NULL) { - CDEBUG(D_NETERROR, "non-NULL context\n"); + CNETERR("non-NULL context\n"); } #if MXLND_DEBUG @@ -1332,13 +1338,13 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, mx_decompose_endpoint_addr2(source, &nic_id, &ep_id, &sid); mxlnd_parse_match(match_value, &msg_type, &error, &cookie); - read_lock(&kmxlnd_data.kmx_global_lock); + read_lock(&kmxlnd_data.kmx_global_lock); mx_get_endpoint_addr_context(source, (void **) &conn); if (conn) { mxlnd_conn_addref(conn); /* add ref for this function */ peer = conn->mxk_peer; } - read_unlock(&kmxlnd_data.kmx_global_lock); + read_unlock(&kmxlnd_data.kmx_global_lock); if (msg_type == MXLND_MSG_BYE) { if (conn) { @@ -1357,8 +1363,8 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, if (conn) mxlnd_conn_decref(conn); /* drop ref taken above */ if (unlikely(length != expected || !data_if_available)) { - CDEBUG(D_NETERROR, "received invalid CONN_REQ from %llx " - "length=%d (expected %d)\n", nic_id, length, expected); + CNETERR("received invalid CONN_REQ from %llx " + "length=%d (expected %d)\n", nic_id, length, expected); mxlnd_send_message(source, MXLND_MSG_CONN_ACK, EPROTO, 0); return MX_RECV_FINISHED; } @@ -1366,17 +1372,17 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, ret = mxlnd_connparams_alloc(&cp, context, source, match_value, length, conn, peer, data_if_available); if (unlikely(ret != 0)) { - CDEBUG(D_NETERROR, "unable to alloc CONN_REQ from %llx:%d\n", - nic_id, ep_id); + CNETERR("unable to alloc CONN_REQ from %llx:%d\n", + nic_id, ep_id); mxlnd_send_message(source, MXLND_MSG_CONN_ACK, ENOMEM, 0); return MX_RECV_FINISHED; } - spin_lock(&kmxlnd_data.kmx_conn_lock); - list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs); - spin_unlock(&kmxlnd_data.kmx_conn_lock); - up(&kmxlnd_data.kmx_conn_sem); - return MX_RECV_FINISHED; - } + spin_lock(&kmxlnd_data.kmx_conn_lock); + cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs); + spin_unlock(&kmxlnd_data.kmx_conn_lock); + up(&kmxlnd_data.kmx_conn_sem); + return MX_RECV_FINISHED; + } if (msg_type == MXLND_MSG_CONN_ACK) { kmx_connparams_t *cp = NULL; const int expected = offsetof(kmx_msg_t, mxm_u) + @@ -1384,12 +1390,11 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, LASSERT(conn); if (unlikely(error != 0)) { - CDEBUG(D_NETERROR, "received CONN_ACK from %s " - "with error -%d\n", + CNETERR("received CONN_ACK from %s with error -%d\n", libcfs_nid2str(peer->mxp_nid), (int) error); mxlnd_conn_disconnect(conn, 1, 0); } else if (unlikely(length != expected || !data_if_available)) { - CDEBUG(D_NETERROR, "received %s CONN_ACK from %s " + CNETERR("received %s CONN_ACK from %s " "length=%d (expected %d)\n", data_if_available ? "short" : "missing", libcfs_nid2str(peer->mxp_nid), length, expected); @@ -1399,15 +1404,16 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, ret = mxlnd_connparams_alloc(&cp, context, source, match_value, length, conn, peer, data_if_available); if (unlikely(ret != 0)) { - CDEBUG(D_NETERROR, "unable to alloc kmx_connparams_t" + CNETERR("unable to alloc kmx_connparams_t" " from %llx:%d\n", nic_id, ep_id); mxlnd_conn_disconnect(conn, 1, 1); - } else { - spin_lock(&kmxlnd_data.kmx_conn_lock); - list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs); - spin_unlock(&kmxlnd_data.kmx_conn_lock); - up(&kmxlnd_data.kmx_conn_sem); - } + } else { + spin_lock(&kmxlnd_data.kmx_conn_lock); + cfs_list_add_tail(&cp->mxr_list, + &kmxlnd_data.kmx_conn_reqs); + spin_unlock(&kmxlnd_data.kmx_conn_lock); + up(&kmxlnd_data.kmx_conn_sem); + } } mxlnd_conn_decref(conn); /* drop ref taken above */ @@ -1423,9 +1429,9 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, if (length <= MXLND_MSG_SIZE) { ret = mxlnd_recv_msg(NULL, rx, msg_type, match_value, length); } else { - CDEBUG(D_NETERROR, "unexpected large receive with " - "match_value=0x%llx length=%d\n", - match_value, length); + CNETERR("unexpected large receive with " + "match_value=0x%llx length=%d\n", + match_value, length); ret = mxlnd_recv_msg(NULL, rx, msg_type, match_value, 0); } @@ -1435,7 +1441,7 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, rx->mxc_peer = peer; rx->mxc_nid = peer->mxp_nid; } else { - CDEBUG(D_NETERROR, "could not post receive\n"); + CNETERR("could not post receive\n"); mxlnd_put_idle_rx(rx); } } @@ -1447,12 +1453,12 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source, if (rx == NULL || ret != 0) { mxlnd_conn_decref(conn); /* drop ref taken above */ if (rx == NULL) { - CDEBUG(D_NETERROR, "no idle rxs available - dropping rx" - " 0x%llx from %s\n", match_value, - libcfs_nid2str(peer->mxp_nid)); + CNETERR("no idle rxs available - dropping rx" + " 0x%llx from %s\n", match_value, + libcfs_nid2str(peer->mxp_nid)); } else { /* ret != 0 */ - CDEBUG(D_NETERROR, "disconnected peer - dropping rx\n"); + CNETERR("disconnected peer - dropping rx\n"); } seg.segment_ptr = 0ULL; seg.segment_length = 0; @@ -1471,18 +1477,19 @@ mxlnd_get_peer_info(int index, lnet_nid_t *nidp, int *count) int ret = -ENOENT; kmx_peer_t *peer = NULL; - read_lock(&kmxlnd_data.kmx_global_lock); + read_lock(&kmxlnd_data.kmx_global_lock); for (i = 0; i < MXLND_HASH_SIZE; i++) { - list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) { - if (index-- == 0) { - *nidp = peer->mxp_nid; - *count = atomic_read(&peer->mxp_refcount); - ret = 0; - break; - } + cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], + mxp_list) { + if (index-- == 0) { + *nidp = peer->mxp_nid; + *count = atomic_read(&peer->mxp_refcount); + ret = 0; + break; + } } } - read_unlock(&kmxlnd_data.kmx_global_lock); + read_unlock(&kmxlnd_data.kmx_global_lock); return ret; } @@ -1493,7 +1500,7 @@ mxlnd_del_peer_locked(kmx_peer_t *peer) if (peer->mxp_conn) { mxlnd_conn_disconnect(peer->mxp_conn, 1, 1); } else { - list_del_init(&peer->mxp_list); /* remove from the global list */ + cfs_list_del_init(&peer->mxp_list); /* remove from the global list */ mxlnd_peer_decref(peer); /* drop global list ref */ } return; @@ -1510,7 +1517,7 @@ mxlnd_del_peer(lnet_nid_t nid) if (nid != LNET_NID_ANY) { peer = mxlnd_find_peer_by_nid(nid, 0); /* adds peer ref */ } - write_lock(&kmxlnd_data.kmx_global_lock); + write_lock(&kmxlnd_data.kmx_global_lock); if (nid != LNET_NID_ANY) { if (peer == NULL) { ret = -ENOENT; @@ -1520,13 +1527,14 @@ mxlnd_del_peer(lnet_nid_t nid) } } else { /* LNET_NID_ANY */ for (i = 0; i < MXLND_HASH_SIZE; i++) { - list_for_each_entry_safe(peer, next, - &kmxlnd_data.kmx_peers[i], mxp_list) { + cfs_list_for_each_entry_safe(peer, next, + &kmxlnd_data.kmx_peers[i], + mxp_list) { mxlnd_del_peer_locked(peer); } } } - write_unlock(&kmxlnd_data.kmx_global_lock); + write_unlock(&kmxlnd_data.kmx_global_lock); return ret; } @@ -1538,21 +1546,23 @@ mxlnd_get_conn_by_idx(int index) kmx_peer_t *peer = NULL; kmx_conn_t *conn = NULL; - read_lock(&kmxlnd_data.kmx_global_lock); + read_lock(&kmxlnd_data.kmx_global_lock); for (i = 0; i < MXLND_HASH_SIZE; i++) { - list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) { - list_for_each_entry(conn, &peer->mxp_conns, mxk_list) { + cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], + mxp_list) { + cfs_list_for_each_entry(conn, &peer->mxp_conns, + mxk_list) { if (index-- > 0) { continue; } mxlnd_conn_addref(conn); /* add ref here, dec in ctl() */ - read_unlock(&kmxlnd_data.kmx_global_lock); + read_unlock(&kmxlnd_data.kmx_global_lock); return conn; } } } - read_unlock(&kmxlnd_data.kmx_global_lock); + read_unlock(&kmxlnd_data.kmx_global_lock); return NULL; } @@ -1563,7 +1573,7 @@ mxlnd_close_matching_conns_locked(kmx_peer_t *peer) kmx_conn_t *conn = NULL; kmx_conn_t *next = NULL; - list_for_each_entry_safe(conn, next, &peer->mxp_conns, mxk_list) + cfs_list_for_each_entry_safe(conn, next, &peer->mxp_conns, mxk_list) mxlnd_conn_disconnect(conn, 0, 1); return; @@ -1576,7 +1586,7 @@ mxlnd_close_matching_conns(lnet_nid_t nid) int ret = 0; kmx_peer_t *peer = NULL; - write_lock(&kmxlnd_data.kmx_global_lock); + write_lock(&kmxlnd_data.kmx_global_lock); if (nid != LNET_NID_ANY) { peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */ if (peer == NULL) { @@ -1587,11 +1597,11 @@ mxlnd_close_matching_conns(lnet_nid_t nid) } } else { /* LNET_NID_ANY */ for (i = 0; i < MXLND_HASH_SIZE; i++) { - list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) + cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) mxlnd_close_matching_conns_locked(peer); } } - write_unlock(&kmxlnd_data.kmx_global_lock); + write_unlock(&kmxlnd_data.kmx_global_lock); return ret; } @@ -1642,7 +1652,7 @@ mxlnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) break; } default: - CDEBUG(D_NETERROR, "unknown ctl(%d)\n", cmd); + CNETERR("unknown ctl(%d)\n", cmd); break; } @@ -1672,20 +1682,22 @@ mxlnd_peer_queue_tx_locked(kmx_ctx_t *tx) msg_type != MXLND_MSG_GET_DATA) { /* msg style tx */ if (mxlnd_tx_requires_credit(tx)) { - list_add_tail(&tx->mxc_list, &conn->mxk_tx_credit_queue); + cfs_list_add_tail(&tx->mxc_list, + &conn->mxk_tx_credit_queue); conn->mxk_ntx_msgs++; } else if (msg_type == MXLND_MSG_CONN_REQ || msg_type == MXLND_MSG_CONN_ACK) { /* put conn msgs at the front of the queue */ - list_add(&tx->mxc_list, &conn->mxk_tx_free_queue); + cfs_list_add(&tx->mxc_list, &conn->mxk_tx_free_queue); } else { /* PUT_ACK, PUT_NAK */ - list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue); + cfs_list_add_tail(&tx->mxc_list, + &conn->mxk_tx_free_queue); conn->mxk_ntx_msgs++; } } else { /* data style tx */ - list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue); + cfs_list_add_tail(&tx->mxc_list, &conn->mxk_tx_free_queue); conn->mxk_ntx_data++; } @@ -1701,13 +1713,13 @@ mxlnd_peer_queue_tx_locked(kmx_ctx_t *tx) static inline void mxlnd_peer_queue_tx(kmx_ctx_t *tx) { - LASSERT(tx->mxc_peer != NULL); - LASSERT(tx->mxc_conn != NULL); - spin_lock(&tx->mxc_conn->mxk_lock); - mxlnd_peer_queue_tx_locked(tx); - spin_unlock(&tx->mxc_conn->mxk_lock); + LASSERT(tx->mxc_peer != NULL); + LASSERT(tx->mxc_conn != NULL); + spin_lock(&tx->mxc_conn->mxk_lock); + mxlnd_peer_queue_tx_locked(tx); + spin_unlock(&tx->mxc_conn->mxk_lock); - return; + return; } /** @@ -1748,13 +1760,13 @@ mxlnd_queue_tx(kmx_ctx_t *tx) mxlnd_peer_queue_tx(tx); mxlnd_check_sends(peer); } else { - spin_lock(&kmxlnd_data.kmx_tx_queue_lock); - list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue); - spin_unlock(&kmxlnd_data.kmx_tx_queue_lock); - up(&kmxlnd_data.kmx_tx_queue_sem); - } + spin_lock(&kmxlnd_data.kmx_tx_queue_lock); + cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue); + spin_unlock(&kmxlnd_data.kmx_tx_queue_lock); + up(&kmxlnd_data.kmx_tx_queue_sem); + } done: - return; + return; } int @@ -1797,7 +1809,7 @@ mxlnd_setup_iov(kmx_ctx_t *ctx, u32 niov, struct iovec *iov, u32 offset, u32 nob MXLND_ALLOC(seg, nseg * sizeof(*seg)); if (seg == NULL) { - CDEBUG(D_NETERROR, "MXLND_ALLOC() failed\n"); + CNETERR("MXLND_ALLOC() failed\n"); return -1; } memset(seg, 0, nseg * sizeof(*seg)); @@ -1866,7 +1878,7 @@ mxlnd_setup_kiov(kmx_ctx_t *ctx, u32 niov, lnet_kiov_t *kiov, u32 offset, u32 no MXLND_ALLOC(seg, nseg * sizeof(*seg)); if (seg == NULL) { - CDEBUG(D_NETERROR, "MXLND_ALLOC() failed\n"); + CNETERR("MXLND_ALLOC() failed\n"); return -1; } memset(seg, 0, niov * sizeof(*seg)); @@ -1940,7 +1952,7 @@ mxlnd_send_data(lnet_ni_t *ni, lnet_msg_t *lntmsg, kmx_peer_t *peer, u8 msg_type tx = mxlnd_get_idle_tx(); if (tx == NULL) { - CDEBUG(D_NETERROR, "Can't allocate %s tx for %s\n", + CNETERR("Can't allocate %s tx for %s\n", msg_type == MXLND_MSG_PUT_DATA ? "PUT_DATA" : "GET_DATA", libcfs_nid2str(target.nid)); goto failed_0; @@ -1958,8 +1970,8 @@ mxlnd_send_data(lnet_ni_t *ni, lnet_msg_t *lntmsg, kmx_peer_t *peer, u8 msg_type /* This setups up the mx_ksegment_t to send the DATA payload */ if (nob == 0) { /* do not setup the segments */ - CDEBUG(D_NETERROR, "nob = 0; why didn't we use an EAGER reply " - "to %s?\n", libcfs_nid2str(target.nid)); + CNETERR("nob = 0; why didn't we use an EAGER reply " + "to %s?\n", libcfs_nid2str(target.nid)); ret = 0; } else if (kiov == NULL) { ret = mxlnd_setup_iov(tx, niov, iov, offset, nob); @@ -1967,8 +1979,8 @@ mxlnd_send_data(lnet_ni_t *ni, lnet_msg_t *lntmsg, kmx_peer_t *peer, u8 msg_type ret = mxlnd_setup_kiov(tx, niov, kiov, offset, nob); } if (ret != 0) { - CDEBUG(D_NETERROR, "Can't setup send DATA for %s\n", - libcfs_nid2str(target.nid)); + CNETERR("Can't setup send DATA for %s\n", + libcfs_nid2str(target.nid)); tx->mxc_errno = -EIO; goto failed_1; } @@ -1981,7 +1993,7 @@ failed_1: return; failed_0: - CDEBUG(D_NETERROR, "no tx avail\n"); + CNETERR("no tx avail\n"); lnet_finalize(ni, lntmsg, -EIO); return; } @@ -2044,13 +2056,13 @@ mxlnd_recv_data(lnet_ni_t *ni, lnet_msg_t *lntmsg, kmx_ctx_t *rx, u8 msg_type, u if (msg_type == MXLND_MSG_GET_DATA) { rx->mxc_lntmsg[1] = lnet_create_reply_msg(kmxlnd_data.kmx_ni, lntmsg); if (rx->mxc_lntmsg[1] == NULL) { - CDEBUG(D_NETERROR, "Can't create reply for GET -> %s\n", - libcfs_nid2str(target.nid)); + CNETERR("Can't create reply for GET -> %s\n", + libcfs_nid2str(target.nid)); ret = -1; } } if (ret != 0) { - CDEBUG(D_NETERROR, "Can't setup %s rx for %s\n", + CNETERR("Can't setup %s rx for %s\n", msg_type == MXLND_MSG_PUT_DATA ? "PUT_DATA" : "GET_DATA", libcfs_nid2str(target.nid)); return -1; @@ -2069,8 +2081,8 @@ mxlnd_recv_data(lnet_ni_t *ni, lnet_msg_t *lntmsg, kmx_ctx_t *rx, u8 msg_type, u if (rx->mxc_conn != NULL) { mxlnd_deq_pending_ctx(rx); } - CDEBUG(D_NETERROR, "mx_kirecv() failed with %d for %s\n", - (int) mxret, libcfs_nid2str(target.nid)); + CNETERR("mx_kirecv() failed with %d for %s\n", + (int) mxret, libcfs_nid2str(target.nid)); return -1; } @@ -2110,7 +2122,7 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) int nob = 0; uint32_t length = 0; kmx_peer_t *peer = NULL; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + rwlock_t *g_lock =&kmxlnd_data.kmx_global_lock; CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); @@ -2142,14 +2154,13 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (unlikely(peer->mxp_incompatible)) { mxlnd_peer_decref(peer); /* drop ref taken above */ } else { - read_lock(g_lock); - conn = peer->mxp_conn; - if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT) { - mxlnd_conn_addref(conn); - } else { - conn = NULL; - } - read_unlock(g_lock); + read_lock(g_lock); + conn = peer->mxp_conn; + if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT) + mxlnd_conn_addref(conn); + else + conn = NULL; + read_unlock(g_lock); mxlnd_peer_decref(peer); /* drop peer ref taken above */ if (!conn) return -ENOTCONN; @@ -2173,9 +2184,9 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx = mxlnd_get_idle_tx(); if (unlikely(tx == NULL)) { - CDEBUG(D_NETERROR, "Can't allocate %s tx for %s\n", - type == LNET_MSG_PUT ? "PUT" : "REPLY", - libcfs_nid2str(nid)); + CNETERR("Can't allocate %s tx for %s\n", + type == LNET_MSG_PUT ? "PUT" : "REPLY", + libcfs_nid2str(nid)); if (conn) mxlnd_conn_decref(conn); return -ENOMEM; } @@ -2195,8 +2206,8 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) rx = mxlnd_get_idle_rx(conn); if (unlikely(rx == NULL)) { - CDEBUG(D_NETERROR, "Can't allocate rx for PUT_ACK for %s\n", - libcfs_nid2str(nid)); + CNETERR("Can't allocate rx for PUT_ACK for %s\n", + libcfs_nid2str(nid)); mxlnd_put_idle_tx(tx); if (conn) mxlnd_conn_decref(conn); /* for the ref taken above */ return -ENOMEM; @@ -2212,7 +2223,7 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) length = offsetof(kmx_msg_t, mxm_u) + sizeof(kmx_putack_msg_t); ret = mxlnd_recv_msg(lntmsg, rx, MXLND_MSG_PUT_ACK, rx->mxc_match, length); if (unlikely(ret != 0)) { - CDEBUG(D_NETERROR, "recv_msg() failed for PUT_ACK for %s\n", + CNETERR("recv_msg() failed for PUT_ACK for %s\n", libcfs_nid2str(nid)); rx->mxc_lntmsg[0] = NULL; mxlnd_put_idle_rx(rx); @@ -2238,15 +2249,15 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) * then post GET_REQ tx */ tx = mxlnd_get_idle_tx(); if (unlikely(tx == NULL)) { - CDEBUG(D_NETERROR, "Can't allocate GET tx for %s\n", - libcfs_nid2str(nid)); + CNETERR("Can't allocate GET tx for %s\n", + libcfs_nid2str(nid)); mxlnd_conn_decref(conn); /* for the ref taken above */ return -ENOMEM; } rx_data = mxlnd_get_idle_rx(conn); if (unlikely(rx_data == NULL)) { - CDEBUG(D_NETERROR, "Can't allocate DATA rx for %s\n", - libcfs_nid2str(nid)); + CNETERR("Can't allocate DATA rx for %s\n", + libcfs_nid2str(nid)); mxlnd_put_idle_tx(tx); mxlnd_conn_decref(conn); /* for the ref taken above */ return -ENOMEM; @@ -2259,8 +2270,8 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) ret = mxlnd_recv_data(ni, lntmsg, rx_data, MXLND_MSG_GET_DATA, tx->mxc_cookie); if (unlikely(ret != 0)) { - CDEBUG(D_NETERROR, "Can't setup GET sink for %s\n", - libcfs_nid2str(nid)); + CNETERR("Can't setup GET sink for %s\n", + libcfs_nid2str(nid)); mxlnd_put_idle_rx(rx_data); mxlnd_put_idle_tx(tx); mxlnd_conn_decref(conn); /* for the rx_data... */ @@ -2293,8 +2304,8 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) tx = mxlnd_get_idle_tx(); if (unlikely(tx == NULL)) { - CDEBUG(D_NETERROR, "Can't send %s to %s: tx descs exhausted\n", - mxlnd_lnetmsg_to_str(type), libcfs_nid2str(nid)); + CNETERR("Can't send %s to %s: tx descs exhausted\n", + mxlnd_lnetmsg_to_str(type), libcfs_nid2str(nid)); mxlnd_conn_decref(conn); /* drop ref taken above */ return -ENOMEM; } @@ -2370,8 +2381,8 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, nob = offsetof(kmx_msg_t, mxm_u.eager.mxem_payload[rlen]); len = rx->mxc_status.xfer_length; if (unlikely(nob > len)) { - CDEBUG(D_NETERROR, "Eager message from %s too big: %d(%d)\n", - libcfs_nid2str(nid), nob, len); + CNETERR("Eager message from %s too big: %d(%d)\n", + libcfs_nid2str(nid), nob, len); ret = -EPROTO; break; } @@ -2398,7 +2409,7 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, tx = mxlnd_get_idle_tx(); if (unlikely(tx == NULL)) { - CDEBUG(D_NETERROR, "Can't allocate tx for %s\n", libcfs_nid2str(nid)); + CNETERR("Can't allocate tx for %s\n", libcfs_nid2str(nid)); /* Not replying will break the connection */ ret = -ENOMEM; break; @@ -2435,8 +2446,8 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, if (unlikely(ret != 0)) { /* Notify peer that it's over */ - CDEBUG(D_NETERROR, "Can't setup PUT_DATA rx for %s: %d\n", - libcfs_nid2str(nid), ret); + CNETERR("Can't setup PUT_DATA rx for %s: %d\n", + libcfs_nid2str(nid), ret); mxlnd_ctx_init(tx); tx->mxc_state = MXLND_CTX_PREP; tx->mxc_peer = peer; @@ -2468,8 +2479,8 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, * Send the error in bits 52-59 and the cookie in bits 0-51 */ tx = mxlnd_get_idle_tx(); if (unlikely(tx == NULL)) { - CDEBUG(D_NETERROR, "Can't get tx for GET NAK for %s\n", - libcfs_nid2str(nid)); + CNETERR("Can't get tx for GET NAK for %s\n", + libcfs_nid2str(nid)); /* we can't get a tx, notify the peer that the GET failed */ mxlnd_send_message(conn->mxk_epa, MXLND_MSG_GET_DATA, ENODATA, cookie); @@ -2497,11 +2508,11 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, if (repost) { /* we received a message, increment peer's outstanding credits */ - if (credit == 1) { - spin_lock(&conn->mxk_lock); - conn->mxk_outstanding++; - spin_unlock(&conn->mxk_lock); - } + if (credit == 1) { + spin_lock(&conn->mxk_lock); + conn->mxk_outstanding++; + spin_unlock(&conn->mxk_lock); + } /* we are done with the rx */ mxlnd_put_idle_rx(rx); mxlnd_conn_decref(conn); @@ -2518,9 +2529,9 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, void mxlnd_sleep(unsigned long timeout) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(timeout); - return; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(timeout); + return; } /** @@ -2539,47 +2550,46 @@ mxlnd_tx_queued(void *arg) int found = 0; kmx_ctx_t *tx = NULL; kmx_peer_t *peer = NULL; - struct list_head *queue = &kmxlnd_data.kmx_tx_queue; - spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; - - cfs_daemonize("mxlnd_tx_queued"); - - while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { - ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem); - if (atomic_read(&kmxlnd_data.kmx_shutdown)) - break; - if (ret != 0) // Should we check for -EINTR? - continue; - spin_lock(tx_q_lock); - if (list_empty (&kmxlnd_data.kmx_tx_queue)) { - spin_unlock(tx_q_lock); - continue; - } - tx = list_entry (queue->next, kmx_ctx_t, mxc_list); - list_del_init(&tx->mxc_list); - spin_unlock(tx_q_lock); - - found = 0; - peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds peer ref */ - if (peer != NULL) { - tx->mxc_peer = peer; - write_lock(g_lock); - if (peer->mxp_conn == NULL) { - ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer); - if (ret != 0) { - /* out of memory, give up and fail tx */ - tx->mxc_errno = -ENOMEM; - mxlnd_peer_decref(peer); - write_unlock(g_lock); - mxlnd_put_idle_tx(tx); - continue; - } - } - tx->mxc_conn = peer->mxp_conn; - mxlnd_conn_addref(tx->mxc_conn); /* for this tx */ - mxlnd_peer_decref(peer); /* drop peer ref taken above */ - write_unlock(g_lock); + cfs_list_t *queue = &kmxlnd_data.kmx_tx_queue; + spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + + while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { + ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem); + if (atomic_read(&kmxlnd_data.kmx_shutdown)) + break; + if (ret != 0) /* Should we check for -EINTR? */ + continue; + spin_lock(tx_q_lock); + if (cfs_list_empty(&kmxlnd_data.kmx_tx_queue)) { + spin_unlock(tx_q_lock); + continue; + } + tx = cfs_list_entry(queue->next, kmx_ctx_t, mxc_list); + cfs_list_del_init(&tx->mxc_list); + spin_unlock(tx_q_lock); + + found = 0; + peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds ref*/ + if (peer != NULL) { + tx->mxc_peer = peer; + write_lock(g_lock); + if (peer->mxp_conn == NULL) { + ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, + peer); + if (ret != 0) { + /* out of memory: give up, fail tx */ + tx->mxc_errno = -ENOMEM; + mxlnd_peer_decref(peer); + write_unlock(g_lock); + mxlnd_put_idle_tx(tx); + continue; + } + } + tx->mxc_conn = peer->mxp_conn; + mxlnd_conn_addref(tx->mxc_conn); /* for this tx */ + mxlnd_peer_decref(peer); /* drop peer ref taken above */ + write_unlock(g_lock); mxlnd_queue_tx(tx); found = 1; } @@ -2610,7 +2620,7 @@ mxlnd_tx_queued(void *arg) /* add peer to global peer list, but look to see * if someone already created it after we released * the read lock */ - write_lock(g_lock); + write_lock(g_lock); old = mxlnd_find_peer_by_nid_locked(peer->mxp_nid); if (old) { /* we have a peer ref on old */ @@ -2625,10 +2635,11 @@ mxlnd_tx_queued(void *arg) } } - if (found == 0) { - list_add_tail(&peer->mxp_list, &kmxlnd_data.kmx_peers[hash]); - atomic_inc(&kmxlnd_data.kmx_npeers); - } else { + if (found == 0) { + cfs_list_add_tail(&peer->mxp_list, + &kmxlnd_data.kmx_peers[hash]); + atomic_inc(&kmxlnd_data.kmx_npeers); + } else { tx->mxc_peer = old; tx->mxc_conn = old->mxp_conn; LASSERT(old->mxp_conn != NULL); @@ -2637,7 +2648,7 @@ mxlnd_tx_queued(void *arg) mxlnd_conn_decref(peer->mxp_conn); /* drop peer's ref */ mxlnd_peer_decref(peer); } - write_unlock(g_lock); + write_unlock(g_lock); mxlnd_queue_tx(tx); } @@ -2674,14 +2685,15 @@ mxlnd_iconnect(kmx_peer_t *peer, u8 msg_type) mx_nic_id_to_board_number(peer->mxp_nic_id, &peer->mxp_board); } if (peer->mxp_nic_id == 0ULL && conn->mxk_status == MXLND_CONN_WAIT) { - /* not mapped yet, return */ - spin_lock(&conn->mxk_lock); - mxlnd_set_conn_status(conn, MXLND_CONN_INIT); - spin_unlock(&conn->mxk_lock); + /* not mapped yet, return */ + spin_lock(&conn->mxk_lock); + mxlnd_set_conn_status(conn, MXLND_CONN_INIT); + spin_unlock(&conn->mxk_lock); } } - if (time_after(jiffies, peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT) && + if (cfs_time_after(jiffies, + peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT) && conn->mxk_status != MXLND_CONN_DISCONNECT) { /* give up and notify LNET */ CDEBUG(D_NET, "timeout trying to connect to %s\n", @@ -2695,15 +2707,16 @@ mxlnd_iconnect(kmx_peer_t *peer, u8 msg_type) peer->mxp_ep_id, MXLND_MSG_MAGIC, match, (void *) peer, &request); if (unlikely(mxret != MX_SUCCESS)) { - spin_lock(&conn->mxk_lock); - mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); - spin_unlock(&conn->mxk_lock); - CDEBUG(D_NETERROR, "mx_iconnect() failed with %s (%d) to %s\n", + spin_lock(&conn->mxk_lock); + mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); + spin_unlock(&conn->mxk_lock); + CNETERR("mx_iconnect() failed with %s (%d) to %s\n", mx_strerror(mxret), mxret, libcfs_nid2str(peer->mxp_nid)); mxlnd_conn_decref(conn); } - mx_set_request_timeout(kmxlnd_data.kmx_endpt, request, MXLND_CONNECT_TIMEOUT/HZ*1000); - return; + mx_set_request_timeout(kmxlnd_data.kmx_endpt, request, + MXLND_CONNECT_TIMEOUT/HZ*1000); + return; } #define MXLND_STATS 0 @@ -2729,39 +2742,39 @@ mxlnd_check_sends(kmx_peer_t *peer) LASSERT(peer != NULL); return -1; } - write_lock(&kmxlnd_data.kmx_global_lock); - conn = peer->mxp_conn; - /* NOTE take a ref for the duration of this function since it is called - * when there might not be any queued txs for this peer */ - if (conn) { - if (conn->mxk_status == MXLND_CONN_DISCONNECT) { - write_unlock(&kmxlnd_data.kmx_global_lock); - return -1; - } - mxlnd_conn_addref(conn); /* for duration of this function */ - } - write_unlock(&kmxlnd_data.kmx_global_lock); + write_lock(&kmxlnd_data.kmx_global_lock); + conn = peer->mxp_conn; + /* NOTE take a ref for the duration of this function since it is + * called when there might not be any queued txs for this peer */ + if (conn) { + if (conn->mxk_status == MXLND_CONN_DISCONNECT) { + write_unlock(&kmxlnd_data.kmx_global_lock); + return -1; + } + mxlnd_conn_addref(conn); /* for duration of this function */ + } + write_unlock(&kmxlnd_data.kmx_global_lock); /* do not add another ref for this tx */ if (conn == NULL) { /* we do not have any conns */ - CDEBUG(D_NETERROR, "peer %s has no conn\n", libcfs_nid2str(peer->mxp_nid)); + CNETERR("peer %s has no conn\n", libcfs_nid2str(peer->mxp_nid)); return -1; } #if MXLND_STATS - if (time_after(jiffies, last)) { - last = jiffies + HZ; - CDEBUG(D_NET, "status= %s credits= %d outstanding= %d ntx_msgs= %d " - "ntx_posted= %d ntx_data= %d data_posted= %d\n", - mxlnd_connstatus_to_str(conn->mxk_status), conn->mxk_credits, - conn->mxk_outstanding, conn->mxk_ntx_msgs, conn->mxk_ntx_posted, - conn->mxk_ntx_data, conn->mxk_data_posted); - } + if (cfs_time_after(jiffies, last)) { + last = jiffies + HZ; + CDEBUG(D_NET, "status= %s credits= %d outstanding= %d ntx_msgs= %d " + "ntx_posted= %d ntx_data= %d data_posted= %d\n", + mxlnd_connstatus_to_str(conn->mxk_status), conn->mxk_credits, + conn->mxk_outstanding, conn->mxk_ntx_msgs, conn->mxk_ntx_posted, + conn->mxk_ntx_data, conn->mxk_data_posted); + } #endif - spin_lock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); ntx_posted = conn->mxk_ntx_posted; credits = conn->mxk_credits; @@ -2774,7 +2787,8 @@ mxlnd_check_sends(kmx_peer_t *peer) /* check number of queued msgs, ignore data */ if (conn->mxk_outstanding >= MXLND_CREDIT_HIGHWATER()) { /* check if any txs queued that could return credits... */ - if (list_empty(&conn->mxk_tx_credit_queue) || conn->mxk_ntx_msgs == 0) { + if (cfs_list_empty(&conn->mxk_tx_credit_queue) || + conn->mxk_ntx_msgs == 0) { /* if not, send a NOOP */ tx = mxlnd_get_idle_tx(); if (likely(tx != NULL)) { @@ -2795,24 +2809,24 @@ mxlnd_check_sends(kmx_peer_t *peer) conn->mxk_status == MXLND_CONN_FAIL)) { CDEBUG(D_NET, "status=%s\n", mxlnd_connstatus_to_str(conn->mxk_status)); mxlnd_set_conn_status(conn, MXLND_CONN_WAIT); - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_REQ); goto done; } - while (!list_empty(&conn->mxk_tx_free_queue) || - !list_empty(&conn->mxk_tx_credit_queue)) { + while (!cfs_list_empty(&conn->mxk_tx_free_queue) || + !cfs_list_empty(&conn->mxk_tx_credit_queue)) { /* We have something to send. If we have a queued tx that does not * require a credit (free), choose it since its completion will * return a credit (here or at the peer), complete a DATA or * CONN_REQ or CONN_ACK. */ - struct list_head *tmp_tx = NULL; - if (!list_empty(&conn->mxk_tx_free_queue)) { + cfs_list_t *tmp_tx = NULL; + if (!cfs_list_empty(&conn->mxk_tx_free_queue)) { tmp_tx = &conn->mxk_tx_free_queue; } else { tmp_tx = &conn->mxk_tx_credit_queue; } - tx = list_entry(tmp_tx->next, kmx_ctx_t, mxc_list); + tx = cfs_list_entry(tmp_tx->next, kmx_ctx_t, mxc_list); msg_type = tx->mxc_msg_type; @@ -2863,10 +2877,10 @@ mxlnd_check_sends(kmx_peer_t *peer) tx->mxc_cookie, mxlnd_msgtype_to_str(tx->mxc_msg_type)); if (conn->mxk_status == MXLND_CONN_DISCONNECT || - time_after_eq(jiffies, tx->mxc_deadline)) { - list_del_init(&tx->mxc_list); + cfs_time_aftereq(jiffies, tx->mxc_deadline)) { + cfs_list_del_init(&tx->mxc_list); tx->mxc_errno = -ECONNABORTED; - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); mxlnd_put_idle_tx(tx); mxlnd_conn_decref(conn); goto done; @@ -2875,7 +2889,7 @@ mxlnd_check_sends(kmx_peer_t *peer) } } - list_del_init(&tx->mxc_list); + cfs_list_del_init(&tx->mxc_list); /* handle credits, etc now while we have the lock to avoid races */ if (credit) { @@ -2902,7 +2916,7 @@ mxlnd_check_sends(kmx_peer_t *peer) (conn->mxk_ntx_msgs >= 1)) { conn->mxk_credits++; conn->mxk_ntx_posted--; - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); /* redundant NOOP */ mxlnd_put_idle_tx(tx); mxlnd_conn_decref(conn); @@ -2922,7 +2936,7 @@ mxlnd_check_sends(kmx_peer_t *peer) mxret = MX_SUCCESS; status = conn->mxk_status; - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); if (likely((status == MXLND_CONN_READY) || (msg_type == MXLND_MSG_CONN_REQ) || @@ -2956,10 +2970,10 @@ mxlnd_check_sends(kmx_peer_t *peer) &tx->mxc_mxreq); } else { /* send a DATA tx */ - spin_lock(&conn->mxk_lock); - conn->mxk_ntx_data--; - conn->mxk_data_posted++; - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + conn->mxk_ntx_data--; + conn->mxk_data_posted++; + spin_unlock(&conn->mxk_lock); CDEBUG(D_NET, "sending %s 0x%llx\n", mxlnd_msgtype_to_str(msg_type), tx->mxc_cookie); @@ -2979,8 +2993,8 @@ mxlnd_check_sends(kmx_peer_t *peer) if (likely(mxret == MX_SUCCESS)) { ret = 0; } else { - CDEBUG(D_NETERROR, "mx_kisend() failed with %s (%d) " - "sending to %s\n", mx_strerror(mxret), (int) mxret, + CNETERR("mx_kisend() failed with %s (%d) " + "sending to %s\n", mx_strerror(mxret), (int) mxret, libcfs_nid2str(peer->mxp_nid)); /* NOTE mx_kisend() only fails if there are not enough * resources. Do not change the connection status. */ @@ -2990,23 +3004,24 @@ mxlnd_check_sends(kmx_peer_t *peer) tx->mxc_errno = -ECONNABORTED; } if (credit) { - spin_lock(&conn->mxk_lock); - conn->mxk_ntx_posted--; - conn->mxk_credits++; - spin_unlock(&conn->mxk_lock); - } else if (msg_type == MXLND_MSG_PUT_DATA || - msg_type == MXLND_MSG_GET_DATA) { - spin_lock(&conn->mxk_lock); - conn->mxk_data_posted--; - spin_unlock(&conn->mxk_lock); - } - if (msg_type != MXLND_MSG_PUT_DATA && - msg_type != MXLND_MSG_GET_DATA && - msg_type != MXLND_MSG_CONN_REQ && - msg_type != MXLND_MSG_CONN_ACK) { - spin_lock(&conn->mxk_lock); - conn->mxk_outstanding += tx->mxc_msg->mxm_credits; - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + conn->mxk_ntx_posted--; + conn->mxk_credits++; + spin_unlock(&conn->mxk_lock); + } else if (msg_type == MXLND_MSG_PUT_DATA || + msg_type == MXLND_MSG_GET_DATA) { + spin_lock(&conn->mxk_lock); + conn->mxk_data_posted--; + spin_unlock(&conn->mxk_lock); + } + if (msg_type != MXLND_MSG_PUT_DATA && + msg_type != MXLND_MSG_GET_DATA && + msg_type != MXLND_MSG_CONN_REQ && + msg_type != MXLND_MSG_CONN_ACK) { + spin_lock(&conn->mxk_lock); + conn->mxk_outstanding += + tx->mxc_msg->mxm_credits; + spin_unlock(&conn->mxk_lock); } if (msg_type != MXLND_MSG_CONN_REQ && msg_type != MXLND_MSG_CONN_ACK) { @@ -3017,13 +3032,13 @@ mxlnd_check_sends(kmx_peer_t *peer) mxlnd_conn_decref(conn); } } - spin_lock(&conn->mxk_lock); - } + spin_lock(&conn->mxk_lock); + } done_locked: - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); done: - mxlnd_conn_decref(conn); /* drop ref taken at start of function */ - return found; + mxlnd_conn_decref(conn); /* drop ref taken at start of function */ + return found; } @@ -3060,29 +3075,29 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx) if (failed) { if (tx->mxc_errno == 0) tx->mxc_errno = -EIO; } else { - spin_lock(&conn->mxk_lock); - conn->mxk_last_tx = cfs_time_current(); /* jiffies */ - spin_unlock(&conn->mxk_lock); - } - - switch (type) { - - case MXLND_MSG_GET_DATA: - spin_lock(&conn->mxk_lock); - if (conn->mxk_incarnation == tx->mxc_incarnation) { - conn->mxk_outstanding++; - conn->mxk_data_posted--; - } - spin_unlock(&conn->mxk_lock); - break; - - case MXLND_MSG_PUT_DATA: - spin_lock(&conn->mxk_lock); - if (conn->mxk_incarnation == tx->mxc_incarnation) { - conn->mxk_data_posted--; - } - spin_unlock(&conn->mxk_lock); - break; + spin_lock(&conn->mxk_lock); + conn->mxk_last_tx = cfs_time_current(); /* jiffies */ + spin_unlock(&conn->mxk_lock); + } + + switch (type) { + + case MXLND_MSG_GET_DATA: + spin_lock(&conn->mxk_lock); + if (conn->mxk_incarnation == tx->mxc_incarnation) { + conn->mxk_outstanding++; + conn->mxk_data_posted--; + } + spin_unlock(&conn->mxk_lock); + break; + + case MXLND_MSG_PUT_DATA: + spin_lock(&conn->mxk_lock); + if (conn->mxk_incarnation == tx->mxc_incarnation) { + conn->mxk_data_posted--; + } + spin_unlock(&conn->mxk_lock); + break; case MXLND_MSG_NOOP: case MXLND_MSG_PUT_REQ: @@ -3098,33 +3113,34 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx) } case MXLND_MSG_CONN_REQ: if (failed) { - CDEBUG(D_NETERROR, "%s failed with %s (%d) (errno = %d)" - " to %s\n", + CNETERR("%s failed with %s (%d) (errno = %d) to %s\n", type == MXLND_MSG_CONN_REQ ? "CONN_REQ" : "CONN_ACK", mx_strstatus(code), code, tx->mxc_errno, libcfs_nid2str(tx->mxc_nid)); if (!peer->mxp_incompatible) { - spin_lock(&conn->mxk_lock); - if (code == MX_STATUS_BAD_SESSION) - mxlnd_set_conn_status(conn, MXLND_CONN_INIT); - else - mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + if (code == MX_STATUS_BAD_SESSION) + mxlnd_set_conn_status(conn, + MXLND_CONN_INIT); + else + mxlnd_set_conn_status(conn, + MXLND_CONN_FAIL); + spin_unlock(&conn->mxk_lock); } } break; default: - CDEBUG(D_NETERROR, "Unknown msg type of %d\n", type); + CNETERR("Unknown msg type of %d\n", type); LBUG(); } if (credit) { - spin_lock(&conn->mxk_lock); - if (conn->mxk_incarnation == tx->mxc_incarnation) { - conn->mxk_ntx_posted--; - } - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + if (conn->mxk_incarnation == tx->mxc_incarnation) { + conn->mxk_ntx_posted--; + } + spin_unlock(&conn->mxk_lock); } mxlnd_put_idle_tx(tx); @@ -3175,7 +3191,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) } /* else peer and conn == NULL */ if (conn == NULL && peer != NULL) { - write_lock(&kmxlnd_data.kmx_global_lock); + write_lock(&kmxlnd_data.kmx_global_lock); conn = peer->mxp_conn; if (conn) { mxlnd_conn_addref(conn); /* conn takes ref... */ @@ -3183,7 +3199,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) conn_ref = 1; peer_ref = 0; } - write_unlock(&kmxlnd_data.kmx_global_lock); + write_unlock(&kmxlnd_data.kmx_global_lock); rx->mxc_conn = conn; } @@ -3196,10 +3212,10 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) if (rx->mxc_status.code != MX_STATUS_SUCCESS && rx->mxc_status.code != MX_STATUS_TRUNCATED) { - CDEBUG(D_NETERROR, "rx from %s failed with %s (%d)\n", - libcfs_nid2str(rx->mxc_nid), - mx_strstatus(rx->mxc_status.code), - rx->mxc_status.code); + CNETERR("rx from %s failed with %s (%d)\n", + libcfs_nid2str(rx->mxc_nid), + mx_strstatus(rx->mxc_status.code), + rx->mxc_status.code); credit = 0; goto cleanup; } @@ -3214,8 +3230,8 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) goto cleanup; } else { /* we had a rx complete with 0 bytes (no hdr, nothing) */ - CDEBUG(D_NETERROR, "rx from %s returned with 0 bytes\n", - libcfs_nid2str(rx->mxc_nid)); + CNETERR("rx from %s returned with 0 bytes\n", + libcfs_nid2str(rx->mxc_nid)); goto cleanup; } } @@ -3234,8 +3250,8 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) ret = mxlnd_unpack_msg(msg, nob); if (ret != 0) { - CDEBUG(D_NETERROR, "Error %d unpacking rx from %s\n", - ret, libcfs_nid2str(rx->mxc_nid)); + CNETERR("Error %d unpacking rx from %s\n", + ret, libcfs_nid2str(rx->mxc_nid)); goto cleanup; } rx->mxc_nob = nob; @@ -3243,7 +3259,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) if (rx->mxc_nid != msg->mxm_srcnid || kmxlnd_data.kmx_ni->ni_nid != msg->mxm_dstnid) { - CDEBUG(D_NETERROR, "rx with mismatched NID (type %s) (my nid is " + CNETERR("rx with mismatched NID (type %s) (my nid is " "0x%llx and rx msg dst is 0x%llx)\n", mxlnd_msgtype_to_str(type), kmxlnd_data.kmx_ni->ni_nid, msg->mxm_dstnid); @@ -3252,7 +3268,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) if ((conn != NULL && msg->mxm_srcstamp != conn->mxk_incarnation) || msg->mxm_dststamp != kmxlnd_data.kmx_incarnation) { - CDEBUG(D_NETERROR, "Stale rx from %s with type %s " + CNETERR("Stale rx from %s with type %s " "(mxm_srcstamp (%lld) != mxk_incarnation (%lld) " "|| mxm_dststamp (%lld) != kmx_incarnation (%lld))\n", libcfs_nid2str(rx->mxc_nid), mxlnd_msgtype_to_str(type), @@ -3267,18 +3283,18 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) LASSERT(peer != NULL && conn != NULL); if (msg->mxm_credits != 0) { - spin_lock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); if (msg->mxm_srcstamp == conn->mxk_incarnation) { if ((conn->mxk_credits + msg->mxm_credits) > *kmxlnd_tunables.kmx_peercredits) { - CDEBUG(D_NETERROR, "mxk_credits %d mxm_credits %d\n", - conn->mxk_credits, msg->mxm_credits); + CNETERR("mxk_credits %d mxm_credits %d\n", + conn->mxk_credits, msg->mxm_credits); } conn->mxk_credits += msg->mxm_credits; LASSERT(conn->mxk_credits >= 0); LASSERT(conn->mxk_credits <= *kmxlnd_tunables.kmx_peercredits); } - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); } CDEBUG(D_NET, "switch %s for rx (0x%llx)\n", mxlnd_msgtype_to_str(type), seq); @@ -3301,8 +3317,8 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) case MXLND_MSG_PUT_ACK: { u64 cookie = (u64) msg->mxm_u.put_ack.mxpam_dst_cookie; if (cookie > MXLND_MAX_COOKIE) { - CDEBUG(D_NETERROR, "NAK for msg_type %d from %s\n", rx->mxc_msg_type, - libcfs_nid2str(rx->mxc_nid)); + CNETERR("NAK for msg_type %d from %s\n", rx->mxc_msg_type, + libcfs_nid2str(rx->mxc_nid)); result = -((u32) MXLND_ERROR_VAL(cookie)); lntmsg[0] = rx->mxc_lntmsg[0]; } else { @@ -3320,24 +3336,24 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx) break; default: - CDEBUG(D_NETERROR, "Bad MXLND message type %x from %s\n", msg->mxm_type, - libcfs_nid2str(rx->mxc_nid)); + CNETERR("Bad MXLND message type %x from %s\n", msg->mxm_type, + libcfs_nid2str(rx->mxc_nid)); ret = -EPROTO; break; } if (ret < 0) { CDEBUG(D_NET, "setting PEER_CONN_FAILED\n"); - spin_lock(&conn->mxk_lock); - mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); - spin_unlock(&conn->mxk_lock); - } + spin_lock(&conn->mxk_lock); + mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); + spin_unlock(&conn->mxk_lock); + } cleanup: - if (conn != NULL) { - spin_lock(&conn->mxk_lock); - conn->mxk_last_rx = cfs_time_current(); /* jiffies */ - spin_unlock(&conn->mxk_lock); + if (conn != NULL) { + spin_lock(&conn->mxk_lock); + conn->mxk_last_rx = cfs_time_current(); /* jiffies */ + spin_unlock(&conn->mxk_lock); } if (repost) { @@ -3348,9 +3364,9 @@ cleanup: type == MXLND_MSG_EAGER || type == MXLND_MSG_PUT_REQ || type == MXLND_MSG_NOOP) { - spin_lock(&conn->mxk_lock); - conn->mxk_outstanding++; - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + conn->mxk_outstanding++; + spin_unlock(&conn->mxk_lock); } } if (conn_ref) mxlnd_conn_decref(conn); @@ -3390,20 +3406,21 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status) if (status.code != MX_STATUS_SUCCESS) { int send_bye = (msg_type == MXLND_MSG_ICON_REQ ? 0 : 1); - CDEBUG(D_NETERROR, "mx_iconnect() failed for %s with %s (%d) " - "to %s mxp_nid = 0x%llx mxp_nic_id = 0x%0llx mxp_ep_id = %d\n", + CNETERR("mx_iconnect() failed for %s with %s (%d) " + "to %s mxp_nid = 0x%llx mxp_nic_id = 0x%0llx mxp_ep_id = %d\n", mxlnd_msgtype_to_str(msg_type), mx_strstatus(status.code), status.code, libcfs_nid2str(peer->mxp_nid), peer->mxp_nid, peer->mxp_nic_id, peer->mxp_ep_id); - spin_lock(&conn->mxk_lock); - mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); + spin_unlock(&conn->mxk_lock); - if (time_after(jiffies, peer->mxp_reconnect_time + MXLND_CONNECT_TIMEOUT)) { - CDEBUG(D_NETERROR, "timeout, calling conn_disconnect()\n"); + if (cfs_time_after(jiffies, peer->mxp_reconnect_time + + MXLND_CONNECT_TIMEOUT)) { + CNETERR("timeout, calling conn_disconnect()\n"); mxlnd_conn_disconnect(conn, 0, send_bye); } @@ -3411,32 +3428,32 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status) return; } mx_decompose_endpoint_addr2(status.source, &nic_id, &ep_id, &sid); - write_lock(&kmxlnd_data.kmx_global_lock); - spin_lock(&conn->mxk_lock); - conn->mxk_epa = status.source; - mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn); - if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) { - mxlnd_set_conn_status(conn, MXLND_CONN_READY); - } - spin_unlock(&conn->mxk_lock); - write_unlock(&kmxlnd_data.kmx_global_lock); - - /* mx_iconnect() succeeded, reset delay to 0 */ - write_lock(&kmxlnd_data.kmx_global_lock); - peer->mxp_reconnect_time = 0; - peer->mxp_conn->mxk_sid = sid; - write_unlock(&kmxlnd_data.kmx_global_lock); + write_lock(&kmxlnd_data.kmx_global_lock); + spin_lock(&conn->mxk_lock); + conn->mxk_epa = status.source; + mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn); + if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) { + mxlnd_set_conn_status(conn, MXLND_CONN_READY); + } + spin_unlock(&conn->mxk_lock); + write_unlock(&kmxlnd_data.kmx_global_lock); + + /* mx_iconnect() succeeded, reset delay to 0 */ + write_lock(&kmxlnd_data.kmx_global_lock); + peer->mxp_reconnect_time = 0; + peer->mxp_conn->mxk_sid = sid; + write_unlock(&kmxlnd_data.kmx_global_lock); /* marshal CONN_REQ or CONN_ACK msg */ /* we are still using the conn ref from iconnect() - do not take another */ tx = mxlnd_get_idle_tx(); if (tx == NULL) { - CDEBUG(D_NETERROR, "Can't obtain %s tx for %s\n", + CNETERR("Can't obtain %s tx for %s\n", mxlnd_msgtype_to_str(type), libcfs_nid2str(peer->mxp_nid)); - spin_lock(&conn->mxk_lock); - mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); - spin_unlock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); + mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); + spin_unlock(&conn->mxk_lock); mxlnd_conn_decref(conn); return; } @@ -3466,7 +3483,6 @@ int mxlnd_request_waitd(void *arg) { long id = (long) arg; - char name[24]; __u32 result = 0; mx_return_t mxret = MX_SUCCESS; mx_status_t status; @@ -3478,16 +3494,12 @@ mxlnd_request_waitd(void *arg) int count = 0; #endif - memset(name, 0, sizeof(name)); - snprintf(name, sizeof(name), "mxlnd_request_waitd_%02ld", id); - cfs_daemonize(name); - memset(&status, 0, sizeof(status)); CDEBUG(D_NET, "%s starting\n", name); - while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { - u8 msg_type = 0; + while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { + u8 msg_type = 0; mxret = MX_SUCCESS; result = 0; @@ -3504,8 +3516,8 @@ mxlnd_request_waitd(void *arg) mxret = mx_wait_any(kmxlnd_data.kmx_endpt, MXLND_WAIT_TIMEOUT, 0ULL, 0ULL, &status, &result); #endif - if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) - break; + if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) + break; if (result != 1) { /* nothing completed... */ @@ -3518,10 +3530,10 @@ mxlnd_request_waitd(void *arg) (u64) status.match_info, status.msg_length); if (status.code != MX_STATUS_SUCCESS) { - CDEBUG(D_NETERROR, "wait_any() failed with %s (%d) with " - "match_info 0x%llx and length %d\n", - mx_strstatus(status.code), status.code, - (u64) status.match_info, status.msg_length); + CNETERR("wait_any() failed with %s (%d) with " + "match_info 0x%llx and length %d\n", + mx_strstatus(status.code), status.code, + (u64) status.match_info, status.msg_length); } msg_type = MXLND_MSG_TYPE(status.match_info); @@ -3558,7 +3570,7 @@ mxlnd_request_waitd(void *arg) mxlnd_handle_rx_completion(ctx); break; default: - CDEBUG(D_NETERROR, "Unknown ctx type %d\n", req_type); + CNETERR("Unknown ctx type %d\n", req_type); LBUG(); break; } @@ -3585,16 +3597,17 @@ mxlnd_check_timeouts(unsigned long now) unsigned long next = 0; /* jiffies */ kmx_peer_t *peer = NULL; kmx_conn_t *conn = NULL; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; - read_lock(g_lock); - for (i = 0; i < MXLND_HASH_SIZE; i++) { - list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], mxp_list) { + read_lock(g_lock); + for (i = 0; i < MXLND_HASH_SIZE; i++) { + cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i], + mxp_list) { - if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) { - read_unlock(g_lock); - return next; - } + if (unlikely(atomic_read(&kmxlnd_data.kmx_shutdown))) { + read_unlock(g_lock); + return next; + } conn = peer->mxp_conn; if (conn) { @@ -3603,14 +3616,14 @@ mxlnd_check_timeouts(unsigned long now) continue; } - spin_lock(&conn->mxk_lock); + spin_lock(&conn->mxk_lock); - /* if nothing pending (timeout == 0) or - * if conn is already disconnected, - * skip this conn */ - if (conn->mxk_timeout == 0 || - conn->mxk_status == MXLND_CONN_DISCONNECT) { - spin_unlock(&conn->mxk_lock); + /* if nothing pending (timeout == 0) or + * if conn is already disconnected, + * skip this conn */ + if (conn->mxk_timeout == 0 || + conn->mxk_status == MXLND_CONN_DISCONNECT) { + spin_unlock(&conn->mxk_lock); mxlnd_conn_decref(conn); continue; } @@ -3619,27 +3632,27 @@ mxlnd_check_timeouts(unsigned long now) * if it is in the future, we will sleep until then. * if it is in the past, then we will sleep one * second and repeat the process. */ - if ((next == 0) || (time_before(conn->mxk_timeout, next))) { + if ((next == 0) || + (cfs_time_before(conn->mxk_timeout, next))) { next = conn->mxk_timeout; } disconnect = 0; - if (time_after_eq(now, conn->mxk_timeout)) { - disconnect = 1; - } - spin_unlock(&conn->mxk_lock); + if (cfs_time_aftereq(now, conn->mxk_timeout)) + disconnect = 1; + spin_unlock(&conn->mxk_lock); - if (disconnect) { - mxlnd_conn_disconnect(conn, 1, 1); - } - mxlnd_conn_decref(conn); - } - } - read_unlock(g_lock); - if (next == 0) next = now + MXLND_COMM_TIMEOUT; + if (disconnect) + mxlnd_conn_disconnect(conn, 1, 1); + mxlnd_conn_decref(conn); + } + } + read_unlock(g_lock); + if (next == 0) + next = now + MXLND_COMM_TIMEOUT; - return next; + return next; } void @@ -3654,29 +3667,29 @@ mxlnd_passive_connect(kmx_connparams_t *cp) kmx_msg_t *msg = &cp->mxr_msg; kmx_peer_t *peer = cp->mxr_peer; kmx_conn_t *conn = NULL; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; mx_decompose_endpoint_addr2(cp->mxr_epa, &nic_id, &ep_id, &sid); ret = mxlnd_unpack_msg(msg, cp->mxr_nob); if (ret != 0) { if (peer) { - CDEBUG(D_NETERROR, "Error %d unpacking CONN_REQ from %s\n", + CNETERR("Error %d unpacking CONN_REQ from %s\n", ret, libcfs_nid2str(peer->mxp_nid)); } else { - CDEBUG(D_NETERROR, "Error %d unpacking CONN_REQ from " + CNETERR("Error %d unpacking CONN_REQ from " "unknown host with nic_id 0x%llx\n", ret, nic_id); } goto cleanup; } if (kmxlnd_data.kmx_ni->ni_nid != msg->mxm_dstnid) { - CDEBUG(D_NETERROR, "Can't accept %s: bad dst nid %s\n", + CNETERR("Can't accept %s: bad dst nid %s\n", libcfs_nid2str(msg->mxm_srcnid), libcfs_nid2str(msg->mxm_dstnid)); goto cleanup; } if (msg->mxm_u.conn_req.mxcrm_queue_depth != *kmxlnd_tunables.kmx_peercredits) { - CDEBUG(D_NETERROR, "Can't accept %s: incompatible queue depth " + CNETERR("Can't accept %s: incompatible queue depth " "%d (%d wanted)\n", libcfs_nid2str(msg->mxm_srcnid), msg->mxm_u.conn_req.mxcrm_queue_depth, @@ -3684,7 +3697,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp) incompatible = 1; } if (msg->mxm_u.conn_req.mxcrm_eager_size != MXLND_MSG_SIZE) { - CDEBUG(D_NETERROR, "Can't accept %s: incompatible EAGER size " + CNETERR("Can't accept %s: incompatible EAGER size " "%d (%d wanted)\n", libcfs_nid2str(msg->mxm_srcnid), msg->mxm_u.conn_req.mxcrm_eager_size, @@ -3711,7 +3724,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp) } peer->mxp_conn->mxk_sid = sid; LASSERT(peer->mxp_ep_id == ep_id); - write_lock(g_lock); + write_lock(g_lock); existing_peer = mxlnd_find_peer_by_nid_locked(msg->mxm_srcnid); if (existing_peer) { mxlnd_conn_decref(peer->mxp_conn); @@ -3719,19 +3732,19 @@ mxlnd_passive_connect(kmx_connparams_t *cp) peer = existing_peer; mxlnd_conn_addref(peer->mxp_conn); conn = peer->mxp_conn; - } else { - list_add_tail(&peer->mxp_list, - &kmxlnd_data.kmx_peers[hash]); - atomic_inc(&kmxlnd_data.kmx_npeers); - } - write_unlock(g_lock); + } else { + cfs_list_add_tail(&peer->mxp_list, + &kmxlnd_data.kmx_peers[hash]); + atomic_inc(&kmxlnd_data.kmx_npeers); + } + write_unlock(g_lock); } else { ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */ - write_lock(g_lock); + write_lock(g_lock); mxlnd_peer_decref(peer); /* drop ref taken above */ - write_unlock(g_lock); + write_unlock(g_lock); if (ret != 0) { - CDEBUG(D_NETERROR, "Cannot allocate mxp_conn\n"); + CNETERR("Cannot allocate mxp_conn\n"); goto cleanup; } } @@ -3750,7 +3763,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp) * conn_decref() which will eventually free it. */ ret = mxlnd_conn_alloc(&conn, peer); if (ret != 0) { - CDEBUG(D_NETERROR, "Cannot allocate peer->mxp_conn\n"); + CNETERR("Cannot allocate peer->mxp_conn\n"); goto cleanup; } /* conn_alloc() adds one ref for the peer and one @@ -3763,13 +3776,13 @@ mxlnd_passive_connect(kmx_connparams_t *cp) conn = peer->mxp_conn; } } - write_lock(g_lock); - peer->mxp_incompatible = incompatible; - write_unlock(g_lock); - spin_lock(&conn->mxk_lock); - conn->mxk_incarnation = msg->mxm_srcstamp; - mxlnd_set_conn_status(conn, MXLND_CONN_WAIT); - spin_unlock(&conn->mxk_lock); + write_lock(g_lock); + peer->mxp_incompatible = incompatible; + write_unlock(g_lock); + spin_lock(&conn->mxk_lock); + conn->mxk_incarnation = msg->mxm_srcstamp; + mxlnd_set_conn_status(conn, MXLND_CONN_WAIT); + spin_unlock(&conn->mxk_lock); /* handle_conn_ack() will create the CONN_ACK msg */ mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_ACK); @@ -3798,10 +3811,10 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp) ret = mxlnd_unpack_msg(msg, cp->mxr_nob); if (ret != 0) { if (peer) { - CDEBUG(D_NETERROR, "Error %d unpacking CONN_ACK from %s\n", + CNETERR("Error %d unpacking CONN_ACK from %s\n", ret, libcfs_nid2str(peer->mxp_nid)); } else { - CDEBUG(D_NETERROR, "Error %d unpacking CONN_ACK from " + CNETERR("Error %d unpacking CONN_ACK from " "unknown host with nic_id 0x%llx\n", ret, nic_id); } ret = -1; @@ -3809,14 +3822,14 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp) goto failed; } if (kmxlnd_data.kmx_ni->ni_nid != msg->mxm_dstnid) { - CDEBUG(D_NETERROR, "Can't accept CONN_ACK from %s: " + CNETERR("Can't accept CONN_ACK from %s: " "bad dst nid %s\n", libcfs_nid2str(msg->mxm_srcnid), libcfs_nid2str(msg->mxm_dstnid)); ret = -1; goto failed; } if (msg->mxm_u.conn_req.mxcrm_queue_depth != *kmxlnd_tunables.kmx_peercredits) { - CDEBUG(D_NETERROR, "Can't accept CONN_ACK from %s: " + CNETERR("Can't accept CONN_ACK from %s: " "incompatible queue depth %d (%d wanted)\n", libcfs_nid2str(msg->mxm_srcnid), msg->mxm_u.conn_req.mxcrm_queue_depth, @@ -3826,8 +3839,8 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp) goto failed; } if (msg->mxm_u.conn_req.mxcrm_eager_size != MXLND_MSG_SIZE) { - CDEBUG(D_NETERROR, "Can't accept CONN_ACK from %s: " - "incompatible EAGER size %d (%d wanted)\n", + CNETERR("Can't accept CONN_ACK from %s: " + "incompatible EAGER size %d (%d wanted)\n", libcfs_nid2str(msg->mxm_srcnid), msg->mxm_u.conn_req.mxcrm_eager_size, (int) MXLND_MSG_SIZE); @@ -3835,10 +3848,10 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp) ret = -1; goto failed; } - write_lock(&kmxlnd_data.kmx_global_lock); - peer->mxp_incompatible = incompatible; - write_unlock(&kmxlnd_data.kmx_global_lock); - spin_lock(&conn->mxk_lock); + write_lock(&kmxlnd_data.kmx_global_lock); + peer->mxp_incompatible = incompatible; + write_unlock(&kmxlnd_data.kmx_global_lock); + spin_lock(&conn->mxk_lock); conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits; conn->mxk_outstanding = 0; conn->mxk_incarnation = msg->mxm_srcstamp; @@ -3848,40 +3861,40 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp) libcfs_nid2str(msg->mxm_srcnid)); mxlnd_set_conn_status(conn, MXLND_CONN_READY); } - spin_unlock(&conn->mxk_lock); + spin_unlock(&conn->mxk_lock); - if (!incompatible) - mxlnd_check_sends(peer); + if (!incompatible) + mxlnd_check_sends(peer); failed: - if (ret < 0) { - spin_lock(&conn->mxk_lock); - mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); - spin_unlock(&conn->mxk_lock); - } + if (ret < 0) { + spin_lock(&conn->mxk_lock); + mxlnd_set_conn_status(conn, MXLND_CONN_FAIL); + spin_unlock(&conn->mxk_lock); + } - if (incompatible) mxlnd_conn_disconnect(conn, 0, 0); + if (incompatible) mxlnd_conn_disconnect(conn, 0, 0); - mxlnd_connparams_free(cp); - return; + mxlnd_connparams_free(cp); + return; } int mxlnd_abort_msgs(void) { - int count = 0; - struct list_head *orphans = &kmxlnd_data.kmx_orphan_msgs; - spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock; + int count = 0; + cfs_list_t *orphans = &kmxlnd_data.kmx_orphan_msgs; + spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock; - /* abort orphans */ - spin_lock(g_conn_lock); - while (!list_empty(orphans)) { - kmx_ctx_t *ctx = NULL; - kmx_conn_t *conn = NULL; + /* abort orphans */ + spin_lock(g_conn_lock); + while (!cfs_list_empty(orphans)) { + kmx_ctx_t *ctx = NULL; + kmx_conn_t *conn = NULL; - ctx = list_entry(orphans->next, kmx_ctx_t, mxc_list); - list_del_init(&ctx->mxc_list); - spin_unlock(g_conn_lock); + ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list); + cfs_list_del_init(&ctx->mxc_list); + spin_unlock(g_conn_lock); ctx->mxc_errno = -ECONNABORTED; conn = ctx->mxc_conn; @@ -3898,40 +3911,40 @@ mxlnd_abort_msgs(void) } count++; - spin_lock(g_conn_lock); - } - spin_unlock(g_conn_lock); + spin_lock(g_conn_lock); + } + spin_unlock(g_conn_lock); - return count; + return count; } int mxlnd_free_conn_zombies(void) { - int count = 0; - struct list_head *zombies = &kmxlnd_data.kmx_conn_zombies; - spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; + int count = 0; + cfs_list_t *zombies = &kmxlnd_data.kmx_conn_zombies; + spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; - /* cleanup any zombies */ - spin_lock(g_conn_lock); - while (!list_empty(zombies)) { - kmx_conn_t *conn = NULL; + /* cleanup any zombies */ + spin_lock(g_conn_lock); + while (!cfs_list_empty(zombies)) { + kmx_conn_t *conn = NULL; - conn = list_entry(zombies->next, kmx_conn_t, mxk_zombie); - list_del_init(&conn->mxk_zombie); - spin_unlock(g_conn_lock); + conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie); + cfs_list_del_init(&conn->mxk_zombie); + spin_unlock(g_conn_lock); - write_lock(g_lock); - mxlnd_conn_free_locked(conn); - write_unlock(g_lock); + write_lock(g_lock); + mxlnd_conn_free_locked(conn); + write_unlock(g_lock); - count++; - spin_lock(g_conn_lock); - } - spin_unlock(g_conn_lock); - CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count); - return count; + count++; + spin_lock(g_conn_lock); + } + spin_unlock(g_conn_lock); + CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count); + return count; } /** @@ -3943,22 +3956,20 @@ mxlnd_free_conn_zombies(void) int mxlnd_connd(void *arg) { - long id = (long) arg; - - cfs_daemonize("mxlnd_connd"); + long id = (long) arg; - CDEBUG(D_NET, "connd starting\n"); + CDEBUG(D_NET, "connd starting\n"); - while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { - int ret = 0; - kmx_connparams_t *cp = NULL; - spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock; - struct list_head *conn_reqs = &kmxlnd_data.kmx_conn_reqs; + while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { + int ret = 0; + kmx_connparams_t *cp = NULL; + spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock; + cfs_list_t *conn_reqs = &kmxlnd_data.kmx_conn_reqs; - ret = down_interruptible(&kmxlnd_data.kmx_conn_sem); + ret = down_interruptible(&kmxlnd_data.kmx_conn_sem); - if (atomic_read(&kmxlnd_data.kmx_shutdown)) - break; + if (atomic_read(&kmxlnd_data.kmx_shutdown)) + break; if (ret != 0) continue; @@ -3966,17 +3977,18 @@ mxlnd_connd(void *arg) ret = mxlnd_abort_msgs(); ret += mxlnd_free_conn_zombies(); - spin_lock(g_conn_lock); - if (list_empty(conn_reqs)) { - if (ret == 0) - CDEBUG(D_NETERROR, "connd woke up but did not " - "find a kmx_connparams_t or zombie conn\n"); - spin_unlock(g_conn_lock); - continue; - } - cp = list_entry(conn_reqs->next, kmx_connparams_t, mxr_list); - list_del_init(&cp->mxr_list); - spin_unlock(g_conn_lock); + spin_lock(g_conn_lock); + if (cfs_list_empty(conn_reqs)) { + if (ret == 0) + CNETERR("connd woke up but did not find a " + "kmx_connparams_t or zombie conn\n"); + spin_unlock(g_conn_lock); + continue; + } + cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t, + mxr_list); + cfs_list_del_init(&cp->mxr_list); + spin_unlock(g_conn_lock); switch (MXLND_MSG_TYPE(cp->mxr_match)) { case MXLND_MSG_CONN_REQ: @@ -4009,37 +4021,36 @@ mxlnd_connd(void *arg) int mxlnd_timeoutd(void *arg) { - int i = 0; - long id = (long) arg; - unsigned long now = 0; - unsigned long next = 0; - unsigned long delay = HZ; - kmx_peer_t *peer = NULL; - kmx_peer_t *temp = NULL; - kmx_conn_t *conn = NULL; - rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; - - cfs_daemonize("mxlnd_timeoutd"); + int i = 0; + long id = (long) arg; + unsigned long now = 0; + unsigned long next = 0; + unsigned long delay = HZ; + kmx_peer_t *peer = NULL; + kmx_peer_t *temp = NULL; + kmx_conn_t *conn = NULL; + rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock; CDEBUG(D_NET, "timeoutd starting\n"); - while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { + while (!(atomic_read(&kmxlnd_data.kmx_shutdown))) { now = jiffies; /* if the next timeout has not arrived, go back to sleep */ - if (time_after(now, next)) { + if (cfs_time_after(now, next)) { next = mxlnd_check_timeouts(now); } /* try to progress peers' txs */ - write_lock(g_lock); + write_lock(g_lock); for (i = 0; i < MXLND_HASH_SIZE; i++) { - struct list_head *peers = &kmxlnd_data.kmx_peers[i]; + cfs_list_t *peers = &kmxlnd_data.kmx_peers[i]; /* NOTE we are safe against the removal of peer, but * not against the removal of temp */ - list_for_each_entry_safe(peer, temp, peers, mxp_list) { - if (atomic_read(&kmxlnd_data.kmx_shutdown)) + cfs_list_for_each_entry_safe(peer, temp, peers, + mxp_list) { + if (atomic_read(&kmxlnd_data.kmx_shutdown)) break; mxlnd_peer_addref(peer); /* add ref... */ conn = peer->mxp_conn; @@ -4052,18 +4063,20 @@ mxlnd_timeoutd(void *arg) continue; } - if ((conn->mxk_status == MXLND_CONN_READY || - conn->mxk_status == MXLND_CONN_FAIL) && - time_after(now, conn->mxk_last_tx + HZ)) { - write_unlock(g_lock); - mxlnd_check_sends(peer); - write_lock(g_lock); - } - mxlnd_conn_decref(conn); /* until here */ - mxlnd_peer_decref(peer); /* ...to here */ - } - } - write_unlock(g_lock); + if ((conn->mxk_status == MXLND_CONN_READY || + conn->mxk_status == MXLND_CONN_FAIL) && + cfs_time_after(now, + conn->mxk_last_tx + + HZ)) { + write_unlock(g_lock); + mxlnd_check_sends(peer); + write_lock(g_lock); + } + mxlnd_conn_decref(conn); /* until here */ + mxlnd_peer_decref(peer); /* ...to here */ + } + } + write_unlock(g_lock); mxlnd_sleep(delay); }