* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* @{
*/
+#include <linux/signal.h>
#include <libcfs/libcfs.h>
#include <lustre/lustre_idl.h>
#include <lustre_ver.h>
#include <lustre_cfg.h>
-#include <linux/lustre_lib.h>
/* target.c */
-struct kstatfs;
struct ptlrpc_request;
struct obd_export;
struct lu_target;
#ifdef HAVE_SERVER_SUPPORT
void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
- int error);
+ int error);
+int rev_import_init(struct obd_export *exp);
int target_handle_connect(struct ptlrpc_request *req);
int target_handle_disconnect(struct ptlrpc_request *req);
void target_destroy_export(struct obd_export *exp);
int target_pack_pool_reply(struct ptlrpc_request *req);
int do_set_info_async(struct obd_import *imp,
- int opcode, int version,
- obd_count keylen, void *key,
- obd_count vallen, void *val,
- struct ptlrpc_request_set *set);
-
-#define OBD_RECOVERY_MAX_TIME (obd_timeout * 18) /* b13079 */
+ int opcode, int version,
+ size_t keylen, void *key,
+ size_t vallen, void *val,
+ struct ptlrpc_request_set *set);
void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
-/* client.c */
-
-int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
-struct client_obd *client_conn2cli(struct lustre_handle *conn);
-
-struct md_open_data;
-struct obd_client_handle {
- struct lustre_handle och_fh;
- struct lu_fid och_fid;
- struct md_open_data *och_mod;
- struct lustre_handle och_lease_handle; /* open lock for lease */
- __u32 och_magic;
- int och_flags;
-};
-#define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
-
-/* statfs_pack.c */
-void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
-void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
-
-/* Until such time as we get_info the per-stripe maximum from the OST,
- * we define this to be 2T - 4k, which is the ext3 maxbytes. */
-#define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
-
-/* Special values for remove LOV EA from disk */
-#define LOVEA_DELETE_VALUES(size, count, offset) (size == 0 && count == 0 && \
- offset == (typeof(offset))(-1))
-
-#define LMVEA_DELETE_VALUES(count, offset) ((count) == 0 && \
- (offset) == (typeof(offset))(-1))
-/* #define POISON_BULK 0 */
-
/*
* l_wait_event is a flexible sleeping function, permitting simple caller
* configuration of interrupt and timeout sensitivity along with actions to
#define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
+#define LUSTRE_FATAL_SIGS \
+ (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
+ sigmask(SIGQUIT) | sigmask(SIGALRM))
+
+/*
+ * Wait Queue
+ */
+#ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
+static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
+ wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+#endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
+
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
/*
* wait for @condition to become true, but no longer than timeout, specified
if (condition) \
break; \
\
- init_waitqueue_entry_current(&__wait); \
+ init_waitqueue_entry(&__wait, current); \
l_add_wait(&wq, &__wait); \
\
/* Block all signals (just the non-fatal ones if no timeout). */ \
__blocked = cfs_block_sigsinv(0); \
\
for (;;) { \
- unsigned __wstate; \
- \
- __wstate = info->lwi_on_signal != NULL && \
- (__timeout == 0 || __allow_intr) ? \
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
- \
set_current_state(TASK_INTERRUPTIBLE); \
\
if (condition) \
break; \
\
if (__timeout == 0) { \
- waitq_wait(&__wait, __wstate); \
+ schedule(); \
} else { \
cfs_duration_t interval = info->lwi_interval? \
min_t(cfs_duration_t, \
info->lwi_interval,__timeout):\
__timeout; \
- cfs_duration_t remaining = waitq_timedwait(&__wait, \
- __wstate, \
- interval); \
+ cfs_duration_t remaining = schedule_timeout(interval); \
__timeout = cfs_time_sub(__timeout, \
cfs_time_sub(interval, remaining));\
if (__timeout == 0) { \
\
if (condition) \
break; \
- if (cfs_signal_pending()) { \
+ if (signal_pending(current)) { \
if (info->lwi_on_signal != NULL && \
(__timeout == 0 || __allow_intr)) { \
if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
\
cfs_restore_sigs(__blocked); \
\
- set_current_state(TASK_RUNNING); \
+ set_current_state(TASK_RUNNING); \
remove_wait_queue(&wq, &__wait); \
} while (0)
-
#define l_wait_event(wq, condition, info) \
({ \
int __ret; \