void
kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
{
- lnet_msg_t *lntmsg[2];
- kib_net_t *net = ni->ni_data;
- int rc;
- int i;
+ lnet_msg_t *lntmsg[2];
+ kib_net_t *net = ni->ni_data;
+ int rc;
+ int i;
- LASSERT (net != NULL);
- LASSERT (!cfs_in_interrupt());
- LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
- LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
- LASSERT (tx->tx_pool != NULL);
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
+ LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
+ LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
+ LASSERT (tx->tx_pool != NULL);
- kiblnd_unmap_tx(ni, tx);
+ kiblnd_unmap_tx(ni, tx);
- /* tx may have up to 2 lnet msgs to finalise */
- lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- rc = tx->tx_status;
+ /* tx may have up to 2 lnet msgs to finalise */
+ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+ lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+ rc = tx->tx_status;
- if (tx->tx_conn != NULL) {
- LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
+ if (tx->tx_conn != NULL) {
+ LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
- kiblnd_conn_decref(tx->tx_conn);
- tx->tx_conn = NULL;
- }
+ kiblnd_conn_decref(tx->tx_conn);
+ tx->tx_conn = NULL;
+ }
- tx->tx_nwrq = 0;
- tx->tx_status = 0;
+ tx->tx_nwrq = 0;
+ tx->tx_status = 0;
- kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
- /* delay finalize until my descs have been freed */
- for (i = 0; i < 2; i++) {
- if (lntmsg[i] == NULL)
- continue;
+ /* delay finalize until my descs have been freed */
+ for (i = 0; i < 2; i++) {
+ if (lntmsg[i] == NULL)
+ continue;
- lnet_finalize(ni, lntmsg[i], rc);
- }
+ lnet_finalize(ni, lntmsg[i], rc);
+ }
}
void
int
kiblnd_post_rx (kib_rx_t *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr;
- int rc;
+ kib_conn_t *conn = rx->rx_conn;
+ kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct ib_recv_wr *bad_wrq = NULL;
+ struct ib_mr *mr;
+ int rc;
- LASSERT (net != NULL);
- LASSERT (!cfs_in_interrupt());
- LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
- credit == IBLND_POSTRX_PEER_CREDIT ||
- credit == IBLND_POSTRX_RSRVD_CREDIT);
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
+ credit == IBLND_POSTRX_PEER_CREDIT ||
+ credit == IBLND_POSTRX_RSRVD_CREDIT);
- mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
- LASSERT (mr != NULL);
+ mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
+ LASSERT (mr != NULL);
rx->rx_sge.lkey = mr->lkey;
rx->rx_sge.addr = rx->rx_msgaddr;
{
struct page *page;
- if (vaddr >= VMALLOC_START &&
- vaddr < VMALLOC_END) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
static int
kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ kib_hca_dev_t *hdev;
__u64 *pages = tx->tx_pages;
kib_fmr_poolset_t *fps;
int npages;
int rc;
int i;
+ LASSERT(tx->tx_pool != NULL);
+ LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+
+ hdev = tx->tx_pool->tpo_hdev;
+
for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
for (size = 0; size < rd->rd_frags[i].rf_nob;
size += hdev->ibh_page_size) {
}
}
- LASSERT(tx->tx_pool != NULL);
- LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
-
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
fps = net->ibn_fmr_ps[cpt];
static int
kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ kib_hca_dev_t *hdev;
kib_pmr_poolset_t *pps;
__u64 iova;
int cpt;
int rc;
- iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
-
LASSERT(tx->tx_pool != NULL);
LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+ hdev = tx->tx_pool->tpo_hdev;
+
+ iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
+
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
pps = net->ibn_pmr_ps[cpt];
int
kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
- struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_send_wr *wrq = &tx->tx_wrq[0];
- int rc = resid;
- int srcidx;
- int dstidx;
- int wrknob;
-
- LASSERT (!cfs_in_interrupt());
- LASSERT (tx->tx_nwrq == 0);
- LASSERT (type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- srcidx = dstidx = 0;
+ kib_msg_t *ibmsg = tx->tx_msg;
+ kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct ib_sge *sge = &tx->tx_sge[0];
+ struct ib_send_wr *wrq = &tx->tx_wrq[0];
+ int rc = resid;
+ int srcidx;
+ int dstidx;
+ int wrknob;
+
+ LASSERT (!in_interrupt());
+ LASSERT (tx->tx_nwrq == 0);
+ LASSERT (type == IBLND_MSG_GET_DONE ||
+ type == IBLND_MSG_PUT_DONE);
+
+ srcidx = dstidx = 0;
while (resid > 0) {
if (srcidx >= srcrd->rd_nfrags) {
LASSERT (!tx->tx_queued); /* not queued for sending already */
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ);
+ tx->tx_queued = 1;
+ tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
if (tx->tx_conn == NULL) {
kiblnd_conn_addref(conn);
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
- /* Thread context */
- LASSERT (!cfs_in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ /* Thread context */
+ LASSERT (!in_interrupt());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- switch (type) {
- default:
- LBUG();
- return (-EIO);
+ switch (type) {
+ default:
+ LBUG();
+ return (-EIO);
case LNET_MSG_ACK:
LASSERT (payload_nob == 0);
kib_conn_t *conn = rx->rx_conn;
kib_tx_t *tx;
kib_msg_t *txmsg;
- int nob;
- int post_credit = IBLND_POSTRX_PEER_CREDIT;
- int rc = 0;
+ int nob;
+ int post_credit = IBLND_POSTRX_PEER_CREDIT;
+ int rc = 0;
- LASSERT (mlen <= rlen);
- LASSERT (!cfs_in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (mlen <= rlen);
+ LASSERT (!in_interrupt());
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
- switch (rxmsg->ibm_type) {
- default:
- LBUG();
+ switch (rxmsg->ibm_type) {
+ default:
+ LBUG();
case IBLND_MSG_IMMEDIATE:
nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
}
int
-kiblnd_thread_start (int (*fn)(void *arg), void *arg)
+kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = cfs_create_thread (fn, arg, 0);
+ struct task_struct *task = kthread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- cfs_atomic_inc (&kiblnd_data.kib_nthreads);
- return (0);
+ atomic_inc(&kiblnd_data.kib_nthreads);
+ return 0;
}
void
kiblnd_thread_fini (void)
{
- cfs_atomic_dec (&kiblnd_data.kib_nthreads);
+ atomic_dec (&kiblnd_data.kib_nthreads);
}
void
kiblnd_peer_alive (kib_peer_t *peer)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->ibp_last_alive = cfs_time_current();
- cfs_mb();
+ /* This is racy, but everyone's only writing cfs_time_current() */
+ peer->ibp_last_alive = cfs_time_current();
+ smp_mb();
}
void
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- if (error != 0 &&
- kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
- }
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ cfs_list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
+ wake_up(&kiblnd_data.kib_connd_waitq);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
}
unsigned long flags;
kib_rx_t *rx;
- LASSERT(!cfs_in_interrupt());
+ LASSERT(!in_interrupt());
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
kiblnd_handle_rx(rx);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
kiblnd_finalise_conn (kib_conn_t *conn)
{
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->ibc_state > IBLND_CONN_INIT);
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state > IBLND_CONN_INIT);
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
- /* abort_receives moves QP state to IB_QPS_ERR. This is only required
- * for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free. */
- kiblnd_abort_receives(conn);
+ /* abort_receives moves QP state to IB_QPS_ERR. This is only required
+ * for connections that didn't get as far as being connected, because
+ * rdma_disconnect() does this for free. */
+ kiblnd_abort_receives(conn);
- /* Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state */
+ /* Complete all tx descs not waiting for sends to complete.
+ * NB we should be safe from RDMA now that the QP has changed state */
- kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
- kiblnd_abort_txs(conn, &conn->ibc_active_txs);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
+ kiblnd_abort_txs(conn, &conn->ibc_active_txs);
- kiblnd_handle_early_rxs(conn);
+ kiblnd_handle_early_rxs(conn);
}
void
kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
{
- CFS_LIST_HEAD (zombies);
- unsigned long flags;
+ CFS_LIST_HEAD (zombies);
+ unsigned long flags;
- LASSERT (error != 0);
- LASSERT (!cfs_in_interrupt());
+ LASSERT (error != 0);
+ LASSERT (!in_interrupt());
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (active) {
- LASSERT (peer->ibp_connecting > 0);
- peer->ibp_connecting--;
- } else {
- LASSERT (peer->ibp_accepting > 0);
- peer->ibp_accepting--;
- }
+ if (active) {
+ LASSERT (peer->ibp_connecting > 0);
+ peer->ibp_connecting--;
+ } else {
+ LASSERT (peer->ibp_accepting > 0);
+ peer->ibp_accepting--;
+ }
- if (peer->ibp_connecting != 0 ||
+ if (peer->ibp_connecting != 0 ||
peer->ibp_accepting != 0) {
/* another connection attempt under way... */
write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
- libcfs_nid2str(peer->ibp_nid), active,
- conn->ibc_version, status);
+ CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
+ libcfs_nid2str(peer->ibp_nid), active,
+ conn->ibc_version, status);
- LASSERT (!cfs_in_interrupt());
- LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
- (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
+ LASSERT (!in_interrupt());
+ LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
+ peer->ibp_connecting > 0) ||
+ (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
+ peer->ibp_accepting > 0));
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
conn->ibc_connvars = NULL;
lnet_nid_t nid;
struct rdma_conn_param cp;
kib_rej_t rej;
- int version = IBLND_MSG_VERSION;
- unsigned long flags;
- int rc;
- struct sockaddr_in *peer_addr;
- LASSERT (!cfs_in_interrupt());
+ int version = IBLND_MSG_VERSION;
+ unsigned long flags;
+ int rc;
+ struct sockaddr_in *peer_addr;
+ LASSERT (!in_interrupt());
- /* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
- LASSERT (ibdev != NULL);
+ /* cmid inherits 'context' from the corresponding listener id */
+ ibdev = (kib_dev_t *)cmid->context;
+ LASSERT (ibdev != NULL);
memset(&rej, 0, sizeof(rej));
rej.ibr_magic = IBLND_MSG_MAGIC;
void
kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ kib_peer_t *peer = conn->ibc_peer;
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- switch (reason) {
- case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
- break;
+ switch (reason) {
+ case IB_CM_REJ_STALE_CONN:
+ kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
+ break;
case IB_CM_REJ_INVALID_SERVICE_ID:
CNETERR("%s rejected: no listener at %d\n",
case IBLND_REJECT_MSG_QUEUE_SIZE:
CERROR("%s rejected: incompatible message queue depth %d, %d\n",
- libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+ libcfs_nid2str(peer->ibp_nid),
+ cp != NULL ? cp->ibcp_queue_depth :
+ IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
+ IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
break;
case IBLND_REJECT_RDMA_FRAGS:
CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
- libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
- IBLND_RDMA_FRAGS(conn->ibc_version));
+ libcfs_nid2str(peer->ibp_nid),
+ cp != NULL ? cp->ibcp_max_frags :
+ IBLND_RDMA_FRAGS(rej->ibr_version),
+ IBLND_RDMA_FRAGS(conn->ibc_version));
break;
case IBLND_REJECT_NO_RESOURCES:
void
kiblnd_disconnect_conn (kib_conn_t *conn)
{
- LASSERT (!cfs_in_interrupt());
- LASSERT (current == kiblnd_data.kib_connd);
- LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
+ LASSERT (!in_interrupt());
+ LASSERT (current == kiblnd_data.kib_connd);
+ LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
- rdma_disconnect(conn->ibc_cmid);
- kiblnd_finalise_conn(conn);
+ rdma_disconnect(conn->ibc_cmid);
+ kiblnd_finalise_conn(conn);
- kiblnd_peer_notify(conn->ibc_peer);
+ kiblnd_peer_notify(conn->ibc_peer);
}
int
kiblnd_connd (void *arg)
{
- cfs_waitlink_t wait;
- unsigned long flags;
- kib_conn_t *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
+ wait_queue_t wait;
+ unsigned long flags;
+ kib_conn_t *conn;
+ int timeout;
+ int i;
+ int dropped_lock;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
- cfs_daemonize ("kiblnd_connd");
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init (&wait);
- kiblnd_data.kib_connd = current;
+ init_waitqueue_entry_current (&wait);
+ kiblnd_data.kib_connd = current;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
if (chunk == 0)
chunk = 1;
- for (i = 0; i < chunk; i++) {
- kiblnd_check_conns(peer_index);
- peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
- }
+ for (i = 0; i < chunk; i++) {
+ kiblnd_check_conns(peer_index);
+ peer_index = (peer_index + 1) %
+ kiblnd_data.kib_peer_hash_size;
+ }
- deadline += p * CFS_HZ;
+ deadline += p * HZ;
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
continue;
/* Nothing to do for 'timeout' */
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
conn->ibc_scheduled = 1;
cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
- if (cfs_waitq_active(&sched->ibs_waitq))
- cfs_waitq_signal(&sched->ibs_waitq);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
}
spin_unlock_irqrestore(&sched->ibs_lock, flags);
long id = (long)arg;
struct kib_sched_info *sched;
kib_conn_t *conn;
- cfs_waitlink_t wait;
+ wait_queue_t wait;
unsigned long flags;
struct ib_wc wc;
- char name[20];
int did_something;
int busy_loops = 0;
int rc;
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
- KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
-
- cfs_daemonize(name);
cfs_block_allsigs();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
if (rc != 0) {
- CWARN("Failed to bind %s on CPT %d, please verify whether "
+ CWARN("Failed to bind on CPT %d, please verify whether "
"all CPUs are healthy and reload modules if necessary, "
"otherwise your system might under risk of low "
- "performance\n", name, sched->ibs_cpt);
+ "performance\n", sched->ibs_cpt);
}
spin_lock_irqsave(&sched->ibs_lock, flags);
if (busy_loops++ >= IBLND_RESCHED) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- cfs_cond_resched();
+ cond_resched();
busy_loops = 0;
spin_lock_irqsave(&sched->ibs_lock, flags);
kiblnd_conn_addref(conn);
cfs_list_add_tail(&conn->ibc_sched_list,
&sched->ibs_conns);
- if (cfs_waitq_active(&sched->ibs_waitq))
- cfs_waitq_signal(&sched->ibs_waitq);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
} else {
conn->ibc_scheduled = 0;
}
if (did_something)
continue;
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wait, TASK_INTERRUPTIBLE);
busy_loops = 0;
- cfs_waitq_del(&sched->ibs_waitq, &wait);
- cfs_set_current_state(CFS_TASK_RUNNING);
+ remove_wait_queue(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_RUNNING);
spin_lock_irqsave(&sched->ibs_lock, flags);
}
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
- cfs_waitlink_t wait;
- unsigned long flags;
- int rc;
+ kib_dev_t *dev;
+ wait_queue_t wait;
+ unsigned long flags;
+ int rc;
- LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
+ LASSERT (*kiblnd_tunables.kib_dev_failover != 0);
- cfs_daemonize ("kiblnd_failover");
- cfs_block_allsigs ();
+ cfs_block_allsigs ();
- cfs_waitlink_init(&wait);
+ init_waitqueue_entry_current(&wait);
write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
/* long sleep if no more pending failover */
long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_unlock_irqrestore(glock, flags);
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
cfs_time_seconds(1));
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
if (!long_sleep || rc != 0)