Whamcloud - gitweb
LU-4629 lnet: fix issues found by Klocwork Insight tool
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd.c
index 3f5ff4e..7a5944f 100644 (file)
@@ -346,7 +346,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         peer->ibp_nid = nid;
         peer->ibp_error = 0;
         peer->ibp_last_alive = 0;
-        cfs_atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
+       atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
 
         CFS_INIT_LIST_HEAD(&peer->ibp_list);     /* not in the peer table yet */
         CFS_INIT_LIST_HEAD(&peer->ibp_conns);
@@ -358,7 +358,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         LASSERT (net->ibn_shutdown == 0);
 
         /* npeers only grows with the global lock held */
-        cfs_atomic_inc(&net->ibn_npeers);
+       atomic_inc(&net->ibn_npeers);
 
        write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
@@ -372,7 +372,7 @@ kiblnd_destroy_peer (kib_peer_t *peer)
         kib_net_t *net = peer->ibp_ni->ni_data;
 
         LASSERT (net != NULL);
-        LASSERT (cfs_atomic_read(&peer->ibp_refcount) == 0);
+       LASSERT (atomic_read(&peer->ibp_refcount) == 0);
         LASSERT (!kiblnd_peer_active(peer));
         LASSERT (peer->ibp_connecting == 0);
         LASSERT (peer->ibp_accepting == 0);
@@ -385,7 +385,7 @@ kiblnd_destroy_peer (kib_peer_t *peer)
          * they are destroyed, so we can be assured that _all_ state to do
          * with this peer has been cleaned up when its refcount drops to
          * zero. */
-        cfs_atomic_dec(&net->ibn_npeers);
+       atomic_dec(&net->ibn_npeers);
 }
 
 kib_peer_t *
@@ -410,7 +410,7 @@ kiblnd_find_peer_locked (lnet_nid_t nid)
 
                 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
                        peer, libcfs_nid2str(nid),
-                       cfs_atomic_read(&peer->ibp_refcount),
+                      atomic_read(&peer->ibp_refcount),
                        peer->ibp_version);
                 return peer;
         }
@@ -455,7 +455,7 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
                                 continue;
 
                         *nidp = peer->ibp_nid;
-                        *count = cfs_atomic_read(&peer->ibp_refcount);
+                       *count = atomic_read(&peer->ibp_refcount);
 
                        read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                               flags);
@@ -612,7 +612,7 @@ kiblnd_debug_conn (kib_conn_t *conn)
        spin_lock(&conn->ibc_lock);
 
         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
-               cfs_atomic_read(&conn->ibc_refcount), conn,
+              atomic_read(&conn->ibc_refcount), conn,
                conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
         CDEBUG(D_CONSOLE, "   state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
                conn->ibc_state, conn->ibc_noops_posted,
@@ -738,7 +738,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
        int                     i;
 
        LASSERT(net != NULL);
-       LASSERT(!cfs_in_interrupt());
+       LASSERT(!in_interrupt());
 
        dev = net->ibn_dev;
 
@@ -790,19 +790,19 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
                goto failed_2;
        }
 
-        if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
-                /* wakeup failover thread and teardown connection */
-                if (kiblnd_dev_can_failover(dev)) {
-                        cfs_list_add_tail(&dev->ibd_fail_list,
-                                      &kiblnd_data.kib_failed_devs);
-                        cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
-                }
+       if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
+               /* wakeup failover thread and teardown connection */
+               if (kiblnd_dev_can_failover(dev)) {
+                       cfs_list_add_tail(&dev->ibd_fail_list,
+                                     &kiblnd_data.kib_failed_devs);
+                       wake_up(&kiblnd_data.kib_failover_waitq);
+               }
 
                write_unlock_irqrestore(glock, flags);
-                CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
-                       cmid->device->name, dev->ibd_ifname);
-                goto failed_2;
-        }
+               CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
+                      cmid->device->name, dev->ibd_ifname);
+               goto failed_2;
+       }
 
         kiblnd_hdev_addref_locked(dev->ibd_hdev);
         conn->ibc_hdev = dev->ibd_hdev;
@@ -873,7 +873,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
 
         /* 1 ref for caller and each rxmsg */
-        cfs_atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
+       atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
         conn->ibc_nrx = IBLND_RX_MSGS(version);
 
         /* post receives */
@@ -912,7 +912,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         conn->ibc_state = state;
 
         /* 1 more conn */
-        cfs_atomic_inc(&net->ibn_nconns);
+       atomic_inc(&net->ibn_nconns);
         return conn;
 
  failed_2:
@@ -926,69 +926,69 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 void
 kiblnd_destroy_conn (kib_conn_t *conn)
 {
-        struct rdma_cm_id *cmid = conn->ibc_cmid;
-        kib_peer_t        *peer = conn->ibc_peer;
-        int                rc;
-
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (cfs_atomic_read(&conn->ibc_refcount) == 0);
-        LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
-        LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
-        LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
-        LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
-        LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
-        LASSERT (cfs_list_empty(&conn->ibc_active_txs));
-        LASSERT (conn->ibc_noops_posted == 0);
-        LASSERT (conn->ibc_nsends_posted == 0);
-
-        switch (conn->ibc_state) {
-        default:
-                /* conn must be completely disengaged from the network */
-                LBUG();
-
-        case IBLND_CONN_DISCONNECTED:
-                /* connvars should have been freed already */
-                LASSERT (conn->ibc_connvars == NULL);
-                break;
+       struct rdma_cm_id *cmid = conn->ibc_cmid;
+       kib_peer_t        *peer = conn->ibc_peer;
+       int                rc;
+
+       LASSERT (!in_interrupt());
+       LASSERT (atomic_read(&conn->ibc_refcount) == 0);
+       LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
+       LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
+       LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
+       LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
+       LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
+       LASSERT (cfs_list_empty(&conn->ibc_active_txs));
+       LASSERT (conn->ibc_noops_posted == 0);
+       LASSERT (conn->ibc_nsends_posted == 0);
+
+       switch (conn->ibc_state) {
+       default:
+               /* conn must be completely disengaged from the network */
+               LBUG();
+
+       case IBLND_CONN_DISCONNECTED:
+               /* connvars should have been freed already */
+               LASSERT (conn->ibc_connvars == NULL);
+               break;
 
-        case IBLND_CONN_INIT:
-                break;
-        }
+       case IBLND_CONN_INIT:
+               break;
+       }
 
-        /* conn->ibc_cmid might be destroyed by CM already */
-        if (cmid != NULL && cmid->qp != NULL)
-                rdma_destroy_qp(cmid);
+       /* conn->ibc_cmid might be destroyed by CM already */
+       if (cmid != NULL && cmid->qp != NULL)
+               rdma_destroy_qp(cmid);
 
-        if (conn->ibc_cq != NULL) {
-                rc = ib_destroy_cq(conn->ibc_cq);
-                if (rc != 0)
-                        CWARN("Error destroying CQ: %d\n", rc);
-        }
+       if (conn->ibc_cq != NULL) {
+               rc = ib_destroy_cq(conn->ibc_cq);
+               if (rc != 0)
+                       CWARN("Error destroying CQ: %d\n", rc);
+       }
 
-        if (conn->ibc_rx_pages != NULL)
-                kiblnd_unmap_rx_descs(conn);
+       if (conn->ibc_rx_pages != NULL)
+               kiblnd_unmap_rx_descs(conn);
 
-        if (conn->ibc_rxs != NULL) {
-                LIBCFS_FREE(conn->ibc_rxs,
-                            IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
-        }
+       if (conn->ibc_rxs != NULL) {
+               LIBCFS_FREE(conn->ibc_rxs,
+                           IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
+       }
 
-        if (conn->ibc_connvars != NULL)
-                LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+       if (conn->ibc_connvars != NULL)
+               LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
 
-        if (conn->ibc_hdev != NULL)
-                kiblnd_hdev_decref(conn->ibc_hdev);
+       if (conn->ibc_hdev != NULL)
+               kiblnd_hdev_decref(conn->ibc_hdev);
 
-        /* See CAVEAT EMPTOR above in kiblnd_create_conn */
-        if (conn->ibc_state != IBLND_CONN_INIT) {
-                kib_net_t *net = peer->ibp_ni->ni_data;
+       /* See CAVEAT EMPTOR above in kiblnd_create_conn */
+       if (conn->ibc_state != IBLND_CONN_INIT) {
+               kib_net_t *net = peer->ibp_ni->ni_data;
 
-                kiblnd_peer_decref(peer);
-                rdma_destroy_id(cmid);
-                cfs_atomic_dec(&net->ibn_nconns);
-        }
+               kiblnd_peer_decref(peer);
+               rdma_destroy_id(cmid);
+               atomic_dec(&net->ibn_nconns);
+       }
 
-        LIBCFS_FREE(conn, sizeof(*conn));
+       LIBCFS_FREE(conn, sizeof(*conn));
 }
 
 int
@@ -1212,7 +1212,7 @@ kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
 
         for (i = 0; i < npages; i++) {
                p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
-                                                    __GFP_IO);
+                                                    GFP_NOFS);
                 if (p->ibp_pages[i] == NULL) {
                         CERROR("Can't allocate page %d of %d\n", i, npages);
                         kiblnd_free_pages(p);
@@ -1325,7 +1325,7 @@ kiblnd_current_hdev(kib_dev_t *dev)
                if (i++ % 50 == 0)
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
-               cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+               schedule_timeout(cfs_time_seconds(1) / 100);
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
@@ -1672,7 +1672,7 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
                spin_unlock(&fps->fps_lock);
                CDEBUG(D_NET, "Another thread is allocating new "
                       "FMR pool, waiting for her to complete\n");
-               cfs_schedule();
+               schedule();
                goto again;
 
        }
@@ -1875,7 +1875,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
-                cfs_schedule();
+               schedule();
                 goto again;
         }
 
@@ -2816,7 +2816,7 @@ kiblnd_base_shutdown(void)
         LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
 
         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
         switch (kiblnd_data.kib_init) {
         default:
@@ -2831,24 +2831,24 @@ kiblnd_base_shutdown(void)
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
                 LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
 
-                /* flag threads to terminate; wake and wait for them to die */
-                kiblnd_data.kib_shutdown = 1;
+               /* flag threads to terminate; wake and wait for them to die */
+               kiblnd_data.kib_shutdown = 1;
 
                /* NB: we really want to stop scheduler threads net by net
                 * instead of the whole module, this should be improved
                 * with dynamic configuration LNet */
                cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
-                       cfs_waitq_broadcast(&sched->ibs_waitq);
+                       wake_up_all(&sched->ibs_waitq);
 
-                cfs_waitq_broadcast(&kiblnd_data.kib_connd_waitq);
-                cfs_waitq_broadcast(&kiblnd_data.kib_failover_waitq);
+               wake_up_all(&kiblnd_data.kib_connd_waitq);
+               wake_up_all(&kiblnd_data.kib_failover_waitq);
 
-                i = 2;
-                while (cfs_atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+               i = 2;
+               while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "Waiting for %d threads to terminate\n",
-                               cfs_atomic_read(&kiblnd_data.kib_nthreads));
+                              atomic_read(&kiblnd_data.kib_nthreads));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
@@ -2868,10 +2868,10 @@ kiblnd_base_shutdown(void)
                cfs_percpt_free(kiblnd_data.kib_scheds);
 
         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
-        kiblnd_data.kib_init = IBLND_INIT_NOTHING;
-        PORTAL_MODULE_UNUSE;
+       kiblnd_data.kib_init = IBLND_INIT_NOTHING;
+       module_put(THIS_MODULE);
 }
 
 void
@@ -2888,7 +2888,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
                 goto out;
 
         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
        write_lock_irqsave(g_lock, flags);
        net->ibn_shutdown = 1;
@@ -2904,12 +2904,12 @@ kiblnd_shutdown (lnet_ni_t *ni)
 
                 /* Wait for all peer state to clean up */
                 i = 2;
-                while (cfs_atomic_read(&net->ibn_npeers) != 0) {
+               while (atomic_read(&net->ibn_npeers) != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
                                "%s: waiting for %d peers to disconnect\n",
                                libcfs_nid2str(ni->ni_nid),
-                               cfs_atomic_read(&net->ibn_npeers));
+                              atomic_read(&net->ibn_npeers));
                         cfs_pause(cfs_time_seconds(1));
                 }
 
@@ -2924,7 +2924,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
                 /* fall through */
 
         case IBLND_INIT_NOTHING:
-                LASSERT (cfs_atomic_read(&net->ibn_nconns) == 0);
+               LASSERT (atomic_read(&net->ibn_nconns) == 0);
 
                 if (net->ibn_dev != NULL &&
                     net->ibn_dev->ibd_nnets == 0)
@@ -2934,7 +2934,7 @@ kiblnd_shutdown (lnet_ni_t *ni)
         }
 
         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
-               cfs_atomic_read(&libcfs_kmemory));
+              atomic_read(&libcfs_kmemory));
 
         net->ibn_init = IBLND_INIT_NOTHING;
         ni->ni_data = NULL;
@@ -2956,8 +2956,8 @@ kiblnd_base_startup(void)
 
         LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
 
-        PORTAL_MODULE_USE;
-        memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
+       try_module_get(THIS_MODULE);
+       memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
 
        rwlock_init(&kiblnd_data.kib_global_lock);
 
@@ -2975,10 +2975,10 @@ kiblnd_base_startup(void)
                 CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
        spin_lock_init(&kiblnd_data.kib_connd_lock);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
-        CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
-        cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
-       cfs_waitq_init(&kiblnd_data.kib_failover_waitq);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+       CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+       init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
+       init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
 
        kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
                                                  sizeof(*sched));
@@ -2990,7 +2990,7 @@ kiblnd_base_startup(void)
 
                spin_lock_init(&sched->ibs_lock);
                CFS_INIT_LIST_HEAD(&sched->ibs_conns);
-               cfs_waitq_init(&sched->ibs_waitq);
+               init_waitqueue_head(&sched->ibs_waitq);
 
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
                if (*kiblnd_tunables.kib_nscheds > 0) {