Whamcloud - gitweb
LU-6142 lnet: convert kiblnd/ksocknal_thread_start to vararg
[fs/lustre-release.git] / lnet / klnds / socklnd / socklnd_cb.c
index 502d127..ec3f952 100644 (file)
@@ -39,9 +39,9 @@ ksocknal_alloc_tx(int type, int size)
                /* searching for a noop tx in free list */
                spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-               if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-                       tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
-                                       struct ksock_tx, tx_list);
+               tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs,
+                                             struct ksock_tx, tx_list);
+               if (tx) {
                        LASSERT(tx->tx_desc_size == size);
                        list_del(&tx->tx_list);
                }
@@ -423,9 +423,8 @@ ksocknal_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int error)
 {
        struct ksock_tx *tx;
 
-       while (!list_empty(txlist)) {
-               tx = list_entry(txlist->next, struct ksock_tx, tx_list);
-
+       while ((tx = list_first_entry_or_null(txlist, struct ksock_tx,
+                                             tx_list)) != NULL) {
                if (error && tx->tx_lnetmsg != NULL) {
                        CNETERR("Deleting packet type %d len %d %s->%s\n",
                                le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
@@ -675,16 +674,14 @@ ksocknal_launch_all_connections_locked(struct ksock_peer_ni *peer_ni)
 struct ksock_conn *
 ksocknal_find_conn_locked(struct ksock_peer_ni *peer_ni, struct ksock_tx *tx, int nonblk)
 {
-       struct list_head *tmp;
+       struct ksock_conn *c;
        struct ksock_conn *conn;
        struct ksock_conn *typed = NULL;
        struct ksock_conn *fallback = NULL;
        int tnob = 0;
        int fnob = 0;
 
-       list_for_each(tmp, &peer_ni->ksnp_conns) {
-               struct ksock_conn *c = list_entry(tmp, struct ksock_conn,
-                                                 ksnc_list);
+       list_for_each_entry(c, &peer_ni->ksnp_conns, ksnc_list) {
                int nob = atomic_read(&c->ksnc_tx_nob) +
                          c->ksnc_sock->sk->sk_wmem_queued;
                int rc;
@@ -1047,18 +1044,6 @@ ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
         return (-EIO);
 }
 
-int
-ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
-       struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
-       if (IS_ERR(task))
-               return PTR_ERR(task);
-
-       atomic_inc(&ksocknal_data.ksnd_nthreads);
-       return 0;
-}
-
 void
 ksocknal_thread_fini (void)
 {
@@ -1466,10 +1451,10 @@ int ksocknal_scheduler(void *arg)
                bool did_something = false;
 
                /* Ensure I progress everything semi-fairly */
-
-               if (!list_empty(&sched->kss_rx_conns)) {
-                       conn = list_entry(sched->kss_rx_conns.next,
-                                         struct ksock_conn, ksnc_rx_list);
+               conn = list_first_entry_or_null(&sched->kss_rx_conns,
+                                               struct ksock_conn,
+                                               ksnc_rx_list);
+               if (conn) {
                        list_del(&conn->ksnc_rx_list);
 
                        LASSERT(conn->ksnc_rx_scheduled);
@@ -1517,16 +1502,17 @@ int ksocknal_scheduler(void *arg)
 
                        list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
 
-                       conn = list_entry(sched->kss_tx_conns.next,
-                                         struct ksock_conn, ksnc_tx_list);
+                       conn = list_first_entry(&sched->kss_tx_conns,
+                                               struct ksock_conn,
+                                               ksnc_tx_list);
                        list_del(&conn->ksnc_tx_list);
 
                        LASSERT(conn->ksnc_tx_scheduled);
                        LASSERT(conn->ksnc_tx_ready);
                        LASSERT(!list_empty(&conn->ksnc_tx_queue));
 
-                       tx = list_entry(conn->ksnc_tx_queue.next,
-                                       struct ksock_tx, tx_list);
+                       tx = list_first_entry(&conn->ksnc_tx_queue,
+                                             struct ksock_tx, tx_list);
 
                        if (conn->ksnc_tx_carrier == tx)
                                ksocknal_next_tx_carrier(conn);
@@ -1943,7 +1929,8 @@ ksocknal_connect(struct ksock_conn_cb *conn_cb)
                        type = SOCKLND_CONN_ANY;
                } else if ((wanted & BIT(SOCKLND_CONN_CONTROL)) != 0) {
                        type = SOCKLND_CONN_CONTROL;
-               } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0) {
+               } else if ((wanted & BIT(SOCKLND_CONN_BULK_IN)) != 0 &&
+                          conn_cb->ksnr_blki_conn_count <= conn_cb->ksnr_blko_conn_count) {
                        type = SOCKLND_CONN_BULK_IN;
                } else {
                        LASSERT ((wanted & BIT(SOCKLND_CONN_BULK_OUT)) != 0);
@@ -2042,11 +2029,10 @@ ksocknal_connect(struct ksock_conn_cb *conn_cb)
                /* ksnp_tx_queue is queued on a conn on successful
                 * connection for V1.x and V2.x
                 */
-               if (!list_empty(&peer_ni->ksnp_conns)) {
-                       conn = list_entry(peer_ni->ksnp_conns.next,
-                                         struct ksock_conn, ksnc_list);
+               conn = list_first_entry_or_null(&peer_ni->ksnp_conns,
+                                               struct ksock_conn, ksnc_list);
+               if (conn)
                        LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
-               }
 
                /* take all the blocked packets while I've got the lock and
                 * complete below...
@@ -2070,7 +2056,6 @@ ksocknal_connect(struct ksock_conn_cb *conn_cb)
 static int
 ksocknal_connd_check_start(time64_t sec, long *timeout)
 {
-       char name[16];
         int rc;
         int total = ksocknal_data.ksnd_connd_starting +
                     ksocknal_data.ksnd_connd_running;
@@ -2108,8 +2093,8 @@ ksocknal_connd_check_start(time64_t sec, long *timeout)
        spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 
        /* NB: total is the next id */
-       snprintf(name, sizeof(name), "socknal_cd%02d", total);
-       rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
+       rc = ksocknal_thread_start(ksocknal_connd, NULL,
+                                  "socknal_cd%02d", total);
 
        spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
         if (rc == 0)
@@ -2224,11 +2209,10 @@ ksocknal_connd(void *arg)
                        dropped_lock = true;
                }
 
-               if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+               cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs,
+                                             struct ksock_connreq, ksncr_list);
+               if (cr) {
                        /* Connection accepted by the listener */
-                       cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
-                                       struct ksock_connreq, ksncr_list);
-
                        list_del(&cr->ksncr_list);
                        spin_unlock_bh(connd_lock);
                        dropped_lock = true;
@@ -2304,14 +2288,11 @@ ksocknal_find_timed_out_conn(struct ksock_peer_ni *peer_ni)
 {
         /* We're called with a shared lock on ksnd_global_lock */
        struct ksock_conn *conn;
-       struct list_head *ctmp;
        struct ksock_tx *tx;
 
-       list_for_each(ctmp, &peer_ni->ksnp_conns) {
+       list_for_each_entry(conn, &peer_ni->ksnp_conns, ksnc_list) {
                int error;
 
-               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
-
                 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
                 LASSERT (!conn->ksnc_closing);
 
@@ -2383,10 +2364,9 @@ ksocknal_flush_stale_txs(struct ksock_peer_ni *peer_ni)
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
-       while (!list_empty(&peer_ni->ksnp_tx_queue)) {
-               tx = list_entry(peer_ni->ksnp_tx_queue.next,
-                               struct ksock_tx, tx_list);
-
+       while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+                                             struct ksock_tx,
+                                             tx_list)) != NULL) {
                if (ktime_get_seconds() < tx->tx_deadline)
                        break;
 
@@ -2506,20 +2486,16 @@ ksocknal_check_peer_timeouts(int idx)
                /* we can't process stale txs right here because we're
                 * holding only shared lock
                 */
-               if (!list_empty(&peer_ni->ksnp_tx_queue)) {
-                       struct ksock_tx *tx;
-
-                       tx = list_entry(peer_ni->ksnp_tx_queue.next,
-                                       struct ksock_tx, tx_list);
-                       if (ktime_get_seconds() >= tx->tx_deadline) {
-                               ksocknal_peer_addref(peer_ni);
-                               read_unlock(&ksocknal_data.ksnd_global_lock);
+               tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+                                             struct ksock_tx, tx_list);
+               if (tx && ktime_get_seconds() >= tx->tx_deadline) {
+                       ksocknal_peer_addref(peer_ni);
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
 
-                               ksocknal_flush_stale_txs(peer_ni);
+                       ksocknal_flush_stale_txs(peer_ni);
 
-                               ksocknal_peer_decref(peer_ni);
-                               goto again;
-                       }
+                       ksocknal_peer_decref(peer_ni);
+                       goto again;
                }
 
                if (list_empty(&peer_ni->ksnp_zc_req_list))
@@ -2583,9 +2559,9 @@ int ksocknal_reaper(void *arg)
        spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
-               if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
-                       conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
-                                         struct ksock_conn, ksnc_list);
+               conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns,
+                                               struct ksock_conn, ksnc_list);
+               if (conn) {
                        list_del(&conn->ksnc_list);
 
                        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2597,9 +2573,9 @@ int ksocknal_reaper(void *arg)
                         continue;
                 }
 
-               if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
-                       conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
-                                         struct ksock_conn, ksnc_list);
+               conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns,
+                                               struct ksock_conn, ksnc_list);
+               if (conn) {
                        list_del(&conn->ksnc_list);
 
                        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2617,9 +2593,9 @@ int ksocknal_reaper(void *arg)
 
                 /* reschedule all the connections that stalled with ENOMEM... */
                 nenomem_conns = 0;
-               while (!list_empty(&enomem_conns)) {
-                       conn = list_entry(enomem_conns.next,
-                                         struct ksock_conn, ksnc_tx_list);
+               while ((conn = list_first_entry_or_null(&enomem_conns,
+                                                       struct ksock_conn,
+                                                       ksnc_tx_list)) != NULL) {
                        list_del(&conn->ksnc_tx_list);
 
                         sched = conn->ksnc_scheduler;