/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
lnet_eq_wait_lock();
lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
- cfs_list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
+ list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
lnet_eq_wait_unlock();
lnet_res_unlock(LNET_LOCK_EX);
refs = eq->eq_refs;
lnet_res_lh_invalidate(&eq->eq_lh);
- cfs_list_del(&eq->eq_list);
+ list_del(&eq->eq_list);
lnet_eq_free_locked(eq);
out:
lnet_eq_wait_unlock();
#ifdef __KERNEL__
/* Wake anyone waiting in LNetEQPoll() */
- if (cfs_waitq_active(&the_lnet.ln_eq_waitq))
- cfs_waitq_broadcast(&the_lnet.ln_eq_waitq);
+ if (waitqueue_active(&the_lnet.ln_eq_waitq))
+ wake_up_all(&the_lnet.ln_eq_waitq);
#else
# ifndef HAVE_LIBPTHREAD
/* LNetEQPoll() calls into _the_ LND to wait for action */
static int
lnet_eq_wait_locked(int *timeout_ms)
+__must_hold(&the_lnet.ln_eq_wait_lock)
{
int tms = *timeout_ms;
int wait;
- cfs_waitlink_t wl;
+ wait_queue_t wl;
cfs_time_t now;
if (tms == 0)
return -1; /* don't want to wait and no new event */
- cfs_waitlink_init(&wl);
- cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add(&the_lnet.ln_eq_waitq, &wl);
+ init_waitqueue_entry_current(&wl);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock();
if (tms < 0) {
- cfs_waitq_wait(&wl, CFS_TASK_INTERRUPTIBLE);
+ waitq_wait(&wl, TASK_INTERRUPTIBLE);
} else {
struct timeval tv;
now = cfs_time_current();
- cfs_waitq_timedwait(&wl, CFS_TASK_INTERRUPTIBLE,
+ waitq_timedwait(&wl, TASK_INTERRUPTIBLE,
cfs_time_seconds(tms) / 1000);
cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
*timeout_ms = tms;
lnet_eq_wait_lock();
- cfs_waitq_del(&the_lnet.ln_eq_waitq, &wl);
+ remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
return wait;
}