])
#
+# LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
+#
+# 2.6.34 adds __add_wait_queue_exclusive
+#
+AC_DEFUN([LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE],
+[AC_MSG_CHECKING([if __add_wait_queue_exclusive exists])
+LB_LINUX_TRY_COMPILE([
+ #include <linux/wait.h>
+],[
+ wait_queue_head_t queue;
+ wait_queue_t wait;
+
+ __add_wait_queue_exclusive(&queue, &wait);
+],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE___ADD_WAIT_QUEUE_EXCLUSIVE, 1,
+ [__add_wait_queue_exclusive exists])
+],[
+ AC_MSG_RESULT(no)
+])
+])
+
+#
# LIBCFS_PROG_LINUX
#
# LNet linux kernel checks
LIBCFS_SOCK_MAP_FD_2ARG
# 2.6.32
LIBCFS_STACKTRACE_OPS_HAVE_WALK_STACK
+# 2.6.34
+LIBCFS_ADD_WAIT_QUEUE_EXCLUSIVE
])
#
void cfs_waitq_add(cfs_waitq_t *waitq, cfs_waitlink_t *link);
void cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
cfs_waitlink_t *link);
+void cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq,
+ cfs_waitlink_t *link);
void cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link);
int cfs_waitq_active(cfs_waitq_t *waitq);
void cfs_waitq_signal(cfs_waitq_t *waitq);
}
EXPORT_SYMBOL(cfs_waitq_add);
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
void
cfs_waitq_add_exclusive(cfs_waitq_t *waitq,
cfs_waitlink_t *link)
}
EXPORT_SYMBOL(cfs_waitq_add_exclusive);
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if cfs_waitq_add_exclusive_head is used.
+ */
+void
+cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
+ spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+}
+EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
+
void
cfs_waitq_del(cfs_waitq_t *waitq, cfs_waitlink_t *link)
{
(void)link;
}
+void cfs_waitq_add_exclusive_head(struct cfs_waitq *waitq, struct cfs_waitlink *link)
+{
+ cfs_waitq_add_exclusive(waitq, link);
+}
+
void cfs_waitq_del(struct cfs_waitq *waitq, struct cfs_waitlink *link)
{
LASSERT(waitq != NULL);
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
*/
-#define __l_wait_event(wq, condition, info, ret, excl) \
+#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
do { \
cfs_waitlink_t __wait; \
cfs_duration_t __timeout = info->lwi_timeout; \
break; \
\
cfs_waitlink_init(&__wait); \
- if (excl) \
- cfs_waitq_add_exclusive(&wq, &__wait); \
- else \
- cfs_waitq_add(&wq, &__wait); \
+ l_add_wait(&wq, &__wait); \
\
/* Block all signals (just the non-fatal ones if no timeout). */ \
if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
} while (0)
#else /* !__KERNEL__ */
-#define __l_wait_event(wq, condition, info, ret, excl) \
+#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
do { \
long __timeout = info->lwi_timeout; \
long __now; \
int __ret; \
struct l_wait_info *__info = (info); \
\
- __l_wait_event(wq, condition, __info, __ret, 0); \
+ __l_wait_event(wq, condition, __info, \
+ __ret, cfs_waitq_add); \
__ret; \
})
int __ret; \
struct l_wait_info *__info = (info); \
\
- __l_wait_event(wq, condition, __info, __ret, 1); \
+ __l_wait_event(wq, condition, __info, \
+ __ret, cfs_waitq_add_exclusive); \
+ __ret; \
+})
+
+#define l_wait_event_exclusive_head(wq, condition, info) \
+({ \
+ int __ret; \
+ struct l_wait_info *__info = (info); \
+ \
+ __l_wait_event(wq, condition, __info, \
+ __ret, cfs_waitq_add_exclusive_head); \
__ret; \
})
-#define l_cfs_wait_event(wq, condition) \
+#define l_wait_condition(wq, condition) \
({ \
struct l_wait_info lwi = { 0 }; \
l_wait_event(wq, condition, &lwi); \
})
+#define l_wait_condition_exclusive(wq, condition) \
+({ \
+ struct l_wait_info lwi = { 0 }; \
+ l_wait_event_exclusive(wq, condition, &lwi); \
+})
+
+#define l_wait_condition_exclusive_head(wq, condition) \
+({ \
+ struct l_wait_info lwi = { 0 }; \
+ l_wait_event_exclusive_head(wq, condition, &lwi); \
+})
+
#ifdef __KERNEL__
#define LIBLUSTRE_CLIENT (0)
#else
return rc;
}
- l_cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
+ l_wait_condition(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
return 0;
}
thread->t_flags = SVC_STOPPING;
cfs_waitq_signal(&thread->t_ctl_waitq);
- l_cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ l_wait_condition(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
}
cfs_cond_resched();
- l_wait_event_exclusive(svc->srv_waitq,
+ l_wait_event_exclusive_head(svc->srv_waitq,
ptlrpc_thread_stopping(thread) ||
ptlrpc_server_request_waiting(svc) ||
ptlrpc_server_request_pending(svc, 0) ||
while (!cfs_test_bit(HRT_STOPPING, &t->hrt_flags)) {
- l_cfs_wait_event(t->hrt_wait, hrt_dont_sleep(t, &replies));
+ l_wait_condition(t->hrt_wait, hrt_dont_sleep(t, &replies));
while (!cfs_list_empty(&replies)) {
struct ptlrpc_reply_state *rs;
cfs_complete(&t->hrt_completion);
GOTO(out, rc);
}
- l_cfs_wait_event(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags));
+ l_wait_condition(t->hrt_wait, cfs_test_bit(HRT_RUNNING, &t->hrt_flags));
RETURN(0);
out:
return rc;