# include <string.h>
#else
# include <asm/semaphore.h>
+#include <linux/kp30.h> /* XXX just for LASSERT! */
#endif
#include <linux/portals_lib.h>
#include <linux/lustre_idl.h>
+#if BITS_PER_LONG > 32
+#define LPU64 "%lu"
+#define LPD64 "%ld"
+#define LPX64 "%#lx"
+#else
+#define LPU64 "%Lu"
+#define LPD64 "%Ld"
+#define LPX64 "%#Lx"
+#endif
+
#ifdef __KERNEL__
/* l_net.c */
struct ptlrpc_request;
struct obd_device;
+struct recovd_data;
+
int target_handle_connect(struct ptlrpc_request *req);
int target_handle_disconnect(struct ptlrpc_request *req);
int client_obd_connect(struct lustre_handle *conn, struct obd_device *obd,
- char *cluuid);
+ obd_uuid_t cluuid);
int client_obd_disconnect(struct lustre_handle *conn);
int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf);
int client_obd_cleanup(struct obd_device * obddev);
struct client_obd *client_conn2cli(struct lustre_handle *conn);
+int target_revoke_connection(struct recovd_data *rd, int phase);
+
/* l_lock.c */
struct lustre_lock {
int l_depth;
/* page.c */
#define CB_PHASE_START 12
#define CB_PHASE_FINISH 13
+
+/*
+ * io_cb_data: io callback data merged into one struct to simplify
+ * memory managment. This may be turn out to be too simple.
+ */
+struct io_cb_data;
+typedef int (*brw_callback_t)(struct io_cb_data *, int err, int phase);
+
struct io_cb_data {
wait_queue_head_t waitq;
atomic_t refcount;
int complete;
int err;
struct ptlrpc_bulk_desc *desc;
+ brw_callback_t cb;
+ void *data;
};
+
int ll_sync_io_cb(struct io_cb_data *data, int err, int phase);
struct io_cb_data *ll_init_cb(void);
-inline void lustre_put_page(struct page *page);
-struct page *lustre_get_page_read(struct inode *dir, unsigned long index);
-struct page *lustre_get_page_write(struct inode *dir, unsigned long index);
-int lustre_commit_write(struct page *page, unsigned from, unsigned to);
-void set_page_clean(struct page *page);
-void set_page_dirty(struct page *page);
/* simple.c */
struct obd_run_ctxt;
void push_ctxt(struct obd_run_ctxt *save, struct obd_run_ctxt *new);
void pop_ctxt(struct obd_run_ctxt *saved);
-#ifdef OBD_CTXT_DEBUG
-#define OBD_SET_CTXT_MAGIC(ctxt) (ctxt)->magic = OBD_RUN_CTXT_MAGIC
-#else
-#define OBD_SET_CTXT_MAGIC(ctxt) do {} while(0)
-#endif
struct dentry *simple_mkdir(struct dentry *dir, char *name, int mode);
int lustre_fread(struct file *file, char *str, int len, loff_t *off);
int lustre_fwrite(struct file *file, const char *str, int len, loff_t *off);
if (!de || IS_ERR(de))
return;
shrink_dcache_parent(de);
+ LASSERT(atomic_read(&de->d_count) > 0);
dput(de);
}
struct obd_statfs;
struct statfs;
-void obd_statfs_pack(struct obd_statfs *osfs, struct statfs *sfs);
-void obd_statfs_unpack(struct obd_statfs *osfs, struct statfs *sfs);
+void statfs_pack(struct obd_statfs *osfs, struct statfs *sfs);
+void statfs_unpack(struct statfs *sfs, struct obd_statfs *osfs);
+void obd_statfs_pack(struct obd_statfs *tgt, struct obd_statfs *src);
+static inline void
+obd_statfs_unpack(struct obd_statfs *tgt, struct obd_statfs *src)
+{
+ obd_statfs_pack(tgt, src);
+}
#include <linux/portals_lib.h>
#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 133 )
+#define OBD_IOC_OPEN _IOWR('f', 134, long)
+#define OBD_IOC_CLOSE _IOWR('f', 135, long)
+
+/*
+ * l_wait_event is a flexible sleeping function, permitting simple caller
+ * configuration of interrupt and timeout sensitivity along with actions to
+ * be performed in the event of either exception.
+ *
+ * Common usage looks like this:
+ *
+ * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
+ * intr_handler, callback_data);
+ * rc = l_wait_event(waitq, condition, &lwi);
+ *
+ * (LWI_TIMEOUT and LWI_INTR macros are available for timeout- and
+ * interrupt-only variants, respectively.)
+ *
+ * If a timeout is specified, the timeout_handler will be invoked in the event
+ * that the timeout expires before the process is awakened. (Note that any
+ * waking of the process will restart the timeout, even if the condition is
+ * not satisfied and the process immediately returns to sleep. This might be
+ * considered a bug.) If the timeout_handler returns non-zero, l_wait_event
+ * will return -ETIMEDOUT and the caller will continue. If the handler returns
+ * zero instead, the process will go back to sleep until it is awakened by the
+ * waitq or some similar mechanism, or an interrupt occurs (if the caller has
+ * asked for interrupts to be detected). The timeout will only fire once, so
+ * callers should take care that a timeout_handler which returns zero will take
+ * future steps to awaken the process. N.B. that these steps must include making
+ * the provided condition become true.
+ *
+ * If the interrupt flag (lwi_signals) is non-zero, then the process will be
+ * interruptible, and will be awakened by any "killable" signal (SIGTERM,
+ * SIGKILL or SIGINT). If a timeout is also specified, then the process will
+ * only become interruptible _after_ the timeout has expired, though it can be
+ * awakened by a signal that was delivered before the timeout and is still
+ * pending when the timeout expires. If a timeout is not specified, the process
+ * will be interruptible at all times during l_wait_event.
+ */
+
struct l_wait_info {
long lwi_timeout;
int (*lwi_on_timeout)(void *);
lwi_cb_data: data \
})
-#define LWI_INTR(signals, cb, data) \
+#define LWI_INTR(cb, data) \
((struct l_wait_info) { \
- lwi_signals: signals, \
+ lwi_signals: 1, \
lwi_on_signal: cb, \
lwi_cb_data: data \
})
-#define LWI_TIMEOUT_INTR(time, time_cb, signals, sig_cb, data) \
+#define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
((struct l_wait_info) { \
lwi_timeout: time, \
lwi_on_timeout: time_cb, \
- lwi_signals: signals, \
+ lwi_signals: 1, \
lwi_on_signal: sig_cb, \
lwi_cb_data: data \
})
do { \
wait_queue_t __wait; \
long __state; \
+ int __timed_out = 0; \
init_waitqueue_entry(&__wait, current); \
\
add_wait_queue(&wq, &__wait); \
- __state = TASK_UNINTERRUPTIBLE; \
+ if (info->lwi_signals && !info->lwi_timeout) \
+ __state = TASK_INTERRUPTIBLE; \
+ else \
+ __state = TASK_UNINTERRUPTIBLE; \
for (;;) { \
- set_current_state(__state); \
- if (condition) \
+ set_current_state(__state); \
+ if (condition) \
+ break; \
+ if (__state == TASK_INTERRUPTIBLE && l_killable_pending(current)) { \
+ CERROR("lwe: interrupt\n"); \
+ if (info->lwi_on_signal) \
+ info->lwi_on_signal(info->lwi_cb_data); \
+ ret = -EINTR; \
+ break; \
+ } \
+ if (info->lwi_timeout && !__timed_out) { \
+ if (schedule_timeout(info->lwi_timeout) == 0) { \
+ CERROR("lwe: timeout\n"); \
+ __timed_out = 1; \
+ if (!info->lwi_on_timeout || \
+ info->lwi_on_timeout(info->lwi_cb_data)) { \
+ ret = -ETIMEDOUT; \
break; \
- /* We only become INTERRUPTIBLE if a timeout has fired, and \
- * the caller has given us some signals to care about. \
- * \
- * XXXshaver we should check against info->wli_signals here, \
- * XXXshaver instead of just using l_killable_pending, perhaps. \
- */ \
- if (__state == TASK_INTERRUPTIBLE && \
- l_killable_pending(current)) { \
- if (info->lwi_on_signal) \
+ } \
+ /* We'll take signals after a timeout. */ \
+ if (info->lwi_signals) { \
+ __state = TASK_INTERRUPTIBLE; \
+ /* Check for a pending interrupt. */ \
+ if (info->lwi_signals && l_killable_pending(current)) { \
+ CERROR("lwe: pending interrupt\n"); \
+ if (info->lwi_on_signal) \
info->lwi_on_signal(info->lwi_cb_data); \
- ret = -EINTR; \
- break; \
- } \
- if (info->lwi_timeout) { \
- if (schedule_timeout(info->lwi_timeout) == 0) { \
- /* We'll take signals only after a timeout. */ \
- if (info->lwi_signals) \
- __state = TASK_INTERRUPTIBLE; \
- if (info->lwi_on_timeout && \
- info->lwi_on_timeout(info->lwi_cb_data)) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
+ ret = -EINTR; \
+ break; \
} \
- } else { \
- schedule(); \
+ } \
} \
+ } else { \
+ schedule(); \
+ } \
} \
current->state = TASK_RUNNING; \
remove_wait_queue(&wq, &__wait); \