Whamcloud - gitweb
LU-1346 libcfs: cleanup waitq related primitives
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.c
index 461eaaf..2da50e2 100644 (file)
@@ -790,19 +790,19 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
                goto failed_2;
        }
 
-        if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
-                /* wakeup failover thread and teardown connection */
-                if (kiblnd_dev_can_failover(dev)) {
-                        cfs_list_add_tail(&dev->ibd_fail_list,
-                                      &kiblnd_data.kib_failed_devs);
-                        cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-                }
+       if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+               /* wakeup failover thread and teardown connection */
+               if (kiblnd_dev_can_failover(dev)) {
+                       cfs_list_add_tail(&dev->ibd_fail_list,
+                                     &kiblnd_data.kib_failed_devs);
+                       wake_up(&kiblnd_data.kib_failover_waitq);
+               }
 
                write_unlock_irqrestore(glock, flags);
-                CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
-                       cmid->device->name, dev->ibd_ifname);
-                goto failed_2;
-        }
+               CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+                      cmid->device->name, dev->ibd_ifname);
+               goto failed_2;
+       }
 
         kiblnd_hdev_addref_locked(dev->ibd_hdev);
         conn->ibc_hdev = dev->ibd_hdev;
@@ -1325,7 +1325,7 @@ kiblnd_current_hdev(kib_dev_t *dev)
                if (i++ % 50 == 0)
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
-               cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+               schedule_timeout(cfs_time_seconds(1) / 100);
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
@@ -1672,7 +1672,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
                spin_unlock(&fps->fps_lock);
                CDEBUG(D_NET, "Another thread is allocating new "
                       "FMR pool, waiting for her to complete\n");
-               cfs_schedule();
+               schedule();
                goto again;
 
        }
@@ -1875,7 +1875,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
-                cfs_schedule();
+               schedule();
                 goto again;
         }
 
@@ -2831,20 +2831,20 @@ kiblnd_base_shutdown(void)
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
 
-                /* flag threads to terminate; wake and wait for them to die */
-                kiblnd_data.kib_shutdown = 1;
+               /* flag threads to terminate; wake and wait for them to die */
+               kiblnd_data.kib_shutdown = 1;
 
                /* NB: we really want to stop scheduler threads net by net
                 * instead of the whole module, this should be improved
                 * with dynamic configuration LNet */
                cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
-                       cfs_waitq_broadcast(&sched->ibs_waitq);
+                       wake_up_all(&sched->ibs_waitq);
 
-                cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
-                cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
+               wake_up_all(&kiblnd_data.kib_connd_waitq);
+               wake_up_all(&kiblnd_data.kib_failover_waitq);
 
-                i = 2;
-                while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+               i = 2;
+               while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
@@ -2975,10 +2975,10 @@ kiblnd_base_startup(void)
                 CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
        spin_lock_init(&kiblnd_data.kib_connd_lock);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
-        cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
-       cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+       init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+       init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
 
        kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
                                                  sizeof(*sched));
@@ -2990,7 +2990,7 @@ kiblnd_base_startup(void)
 
                spin_lock_init(&sched->ibs_lock);
                CFS_INIT_LIST_HEAD(&sched->ibs_conns);
-               cfs_waitq_init(&sched->ibs_waitq);
+               init_waitqueue_head(&sched->ibs_waitq);
 
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*kiblnd_tunables.kib_nscheds > 0) {