#ifndef _LUSTRE_LIB_H
#define _LUSTRE_LIB_H
-#include <asm/types.h>
-
#ifndef __KERNEL__
# include <string.h>
#else
# include <asm/semaphore.h>
#endif
-
+#include <linux/types.h>
#include <linux/portals_lib.h>
+#include <linux/kp30.h> /* XXX just for LASSERT! */
#include <linux/lustre_idl.h>
+#ifndef LPU64
+#if BITS_PER_LONG > 32
+#define LPU64 "%lu"
+#define LPD64 "%ld"
+#define LPX64 "%#lx"
+#else
+#define LPU64 "%Lu"
+#define LPD64 "%Ld"
+#define LPX64 "%#Lx"
+#endif
+#endif
+
#ifdef __KERNEL__
/* l_net.c */
struct ptlrpc_request;
+struct obd_device;
+struct recovd_data;
+struct recovd_obd;
+#include <linux/lustre_ha.h>
+
int target_handle_connect(struct ptlrpc_request *req);
int target_handle_disconnect(struct ptlrpc_request *req);
+int client_obd_connect(struct lustre_handle *conn, struct obd_device *obd,
+ obd_uuid_t cluuid, struct recovd_obd *recovd,
+ ptlrpc_recovery_cb_t recover);
+int client_obd_disconnect(struct lustre_handle *conn);
+int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf);
+int client_obd_cleanup(struct obd_device * obddev);
+struct client_obd *client_conn2cli(struct lustre_handle *conn);
+
+int target_revoke_connection(struct recovd_data *rd, int phase);
/* l_lock.c */
struct lustre_lock {
void l_lock_init(struct lustre_lock *);
void l_lock(struct lustre_lock *);
void l_unlock(struct lustre_lock *);
+int l_has_lock(struct lustre_lock *);
+
+#define CB_PHASE_START 12
+#define CB_PHASE_FINISH 13
+/* This list head doesn't need to be locked, because it's only manipulated by
+ * one thread at a time. */
+struct obd_brw_set {
+ struct list_head brw_desc_head; /* list of ptlrpc_bulk_desc */
+ wait_queue_head_t brw_waitq;
+ atomic_t brw_refcount;
+ int brw_flags;
-/* page.c */
-inline void lustre_put_page(struct page *page);
-struct page *lustre_get_page_read(struct inode *dir, unsigned long index);
-struct page *lustre_get_page_write(struct inode *dir, unsigned long index);
-int lustre_commit_write(struct page *page, unsigned from, unsigned to);
-void set_page_clean(struct page *page);
-void set_page_dirty(struct page *page);
+ int (*brw_callback)(struct obd_brw_set *, int phase);
+};
/* simple.c */
struct obd_run_ctxt;
-void push_ctxt(struct obd_run_ctxt *save, struct obd_run_ctxt *new);
-void pop_ctxt(struct obd_run_ctxt *saved);
-#ifdef OBD_CTXT_DEBUG
-#define OBD_SET_CTXT_MAGIC(ctxt) (ctxt)->magic = OBD_RUN_CTXT_MAGIC
-#else
-#define OBD_SET_CTXT_MAGIC(ctxt) do {} while(0)
-#endif
+struct obd_ucred;
+void push_ctxt(struct obd_run_ctxt *save, struct obd_run_ctxt *new_ctx,
+ struct obd_ucred *cred);
+void pop_ctxt(struct obd_run_ctxt *saved, struct obd_run_ctxt *new_ctx,
+ struct obd_ucred *cred);
struct dentry *simple_mkdir(struct dentry *dir, char *name, int mode);
+struct dentry *simple_mknod(struct dentry *dir, char *name, int mode);
int lustre_fread(struct file *file, char *str, int len, loff_t *off);
int lustre_fwrite(struct file *file, const char *str, int len, loff_t *off);
int lustre_fsync(struct file *file);
if (!de || IS_ERR(de))
return;
shrink_dcache_parent(de);
+ LASSERT(atomic_read(&de->d_count) > 0);
dput(de);
}
struct obd_statfs;
struct statfs;
-void obd_statfs_pack(struct obd_statfs *osfs, struct statfs *sfs);
-void obd_statfs_unpack(struct obd_statfs *osfs, struct statfs *sfs);
+void statfs_pack(struct obd_statfs *osfs, struct statfs *sfs);
+void statfs_unpack(struct statfs *sfs, struct obd_statfs *osfs);
+void obd_statfs_pack(struct obd_statfs *tgt, struct obd_statfs *src);
+static inline void
+obd_statfs_unpack(struct obd_statfs *tgt, struct obd_statfs *src)
+{
+ obd_statfs_pack(tgt, src);
+}
#include <linux/portals_lib.h>
data->ioc_len = obd_ioctl_packlen(data);
data->ioc_version = OBD_IOCTL_VERSION;
- if (*pbuf && obd_ioctl_packlen(data) > max)
+ if (*pbuf && data->ioc_len > max)
return 1;
if (*pbuf == NULL) {
*pbuf = malloc(data->ioc_len);
return 0;
}
+static inline int obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf,
+ int max)
+{
+ char *ptr;
+ struct obd_ioctl_data *overlay;
+
+ if (!pbuf)
+ return 1;
+ overlay = (struct obd_ioctl_data *)pbuf;
+
+ /* Preserve the caller's buffer pointers */
+ overlay->ioc_inlbuf1 = data->ioc_inlbuf1;
+ overlay->ioc_inlbuf2 = data->ioc_inlbuf2;
+ overlay->ioc_inlbuf3 = data->ioc_inlbuf3;
+
+ memcpy(data, pbuf, sizeof(*data));
+
+ ptr = overlay->ioc_bulk;
+ if (data->ioc_inlbuf1)
+ LOGU(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
+ if (data->ioc_inlbuf2)
+ LOGU(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
+ if (data->ioc_inlbuf3)
+ LOGU(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
+
+ return 0;
+}
#else
+#include <linux/obd_support.h>
+
/* buffer MUST be at least the size of obd_ioctl_hdr */
static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
{
int err;
ENTRY;
-
err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if ( err ) {
EXIT;
#define OBD_IOC_UUID2DEV _IOWR('f', 130, long)
#define OBD_IOC_RECOVD_NEWCONN _IOWR('f', 131, long)
-#define OBD_IOC_LOV_CONFIG _IOWR('f', 132, long)
+#define OBD_IOC_LOV_SET_CONFIG _IOWR('f', 132, long)
+#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 133, long)
+#define OBD_IOC_LOV_CONFIG OBD_IOC_LOV_SET_CONFIG
+
+#define OBD_IOC_OPEN _IOWR('f', 134, long)
+#define OBD_IOC_CLOSE _IOWR('f', 135, long)
+
+#define OBD_IOC_RECOVD_FAILCONN _IOWR('f', 136, long)
-#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 133 )
+#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139 )
+
+#define OBD_GET_VERSION _IOWR ('f', 144, long)
+
+/*
+ * l_wait_event is a flexible sleeping function, permitting simple caller
+ * configuration of interrupt and timeout sensitivity along with actions to
+ * be performed in the event of either exception.
+ *
+ * Common usage looks like this:
+ *
+ * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
+ * intr_handler, callback_data);
+ * rc = l_wait_event(waitq, condition, &lwi);
+ *
+ * (LWI_TIMEOUT and LWI_INTR macros are available for timeout- and
+ * interrupt-only variants, respectively.)
+ *
+ * If a timeout is specified, the timeout_handler will be invoked in the event
+ * that the timeout expires before the process is awakened. (Note that any
+ * waking of the process will restart the timeout, even if the condition is
+ * not satisfied and the process immediately returns to sleep. This might be
+ * considered a bug.) If the timeout_handler returns non-zero, l_wait_event
+ * will return -ETIMEDOUT and the caller will continue. If the handler returns
+ * zero instead, the process will go back to sleep until it is awakened by the
+ * waitq or some similar mechanism, or an interrupt occurs (if the caller has
+ * asked for interrupts to be detected). The timeout will only fire once, so
+ * callers should take care that a timeout_handler which returns zero will take
+ * future steps to awaken the process. N.B. that these steps must include
+ * making the provided condition become true.
+ *
+ * If the interrupt flag (lwi_signals) is non-zero, then the process will be
+ * interruptible, and will be awakened by any "killable" signal (SIGTERM,
+ * SIGKILL or SIGINT). If a timeout is also specified, then the process will
+ * only become interruptible _after_ the timeout has expired, though it can be
+ * awakened by a signal that was delivered before the timeout and is still
+ * pending when the timeout expires. If a timeout is not specified, the process
+ * will be interruptible at all times during l_wait_event.
+ */
+struct l_wait_info {
+ long lwi_timeout;
+ int (*lwi_on_timeout)(void *);
+ long lwi_signals;
+ int (*lwi_on_signal)(void *); /* XXX return is ignored for now */
+ void *lwi_cb_data;
+};
+#define LWI_TIMEOUT(time, cb, data) \
+((struct l_wait_info) { \
+ lwi_timeout: time, \
+ lwi_on_timeout: cb, \
+ lwi_cb_data: data \
+})
+#define LWI_INTR(cb, data) \
+((struct l_wait_info) { \
+ lwi_signals: 1, \
+ lwi_on_signal: cb, \
+ lwi_cb_data: data \
+})
+#define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
+((struct l_wait_info) { \
+ lwi_timeout: time, \
+ lwi_on_timeout: time_cb, \
+ lwi_signals: 1, \
+ lwi_on_signal: sig_cb, \
+ lwi_cb_data: data \
+})
/* XXX this should be one mask-check */
#define l_killable_pending(task) \
sigismember(&(task->pending.signal), SIGINT) || \
sigismember(&(task->pending.signal), SIGTERM))
-/*
- * Like wait_event_interruptible, but we're only interruptible by KILL, INT, or
- * TERM.
- *
- * XXXshaver These are going away soon, I hope.
- */
-#define __l_wait_event_killable(wq, condition, ret) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (!signal_pending(current) || \
- !l_killable_pending(current)) { \
- schedule(); \
- continue; \
- } \
- ret = -ERESTARTSYS; \
- break; \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
-} while(0)
-
-#define l_wait_event_killable(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __l_wait_event_killable(wq, condition, __ret); \
- __ret; \
-})
-
-#define __l_wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- wait_queue_t __wait; \
- init_waitqueue_entry(&__wait, current); \
- \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (timeout) \
- schedule_timeout(timeout); \
- else \
- schedule(); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
+#define __l_wait_event(wq, condition, info, ret) \
+do { \
+ wait_queue_t __wait; \
+ long __state; \
+ int __timed_out = 0; \
+ init_waitqueue_entry(&__wait, current); \
+ \
+ add_wait_queue(&wq, &__wait); \
+ if (info->lwi_signals && !info->lwi_timeout) \
+ __state = TASK_INTERRUPTIBLE; \
+ else \
+ __state = TASK_UNINTERRUPTIBLE; \
+ for (;;) { \
+ set_current_state(__state); \
+ if (condition) \
+ break; \
+ if (__state == TASK_INTERRUPTIBLE && l_killable_pending(current)) {\
+ if (info->lwi_on_signal) \
+ info->lwi_on_signal(info->lwi_cb_data); \
+ ret = -EINTR; \
+ break; \
+ } \
+ if (info->lwi_timeout && !__timed_out) { \
+ if (schedule_timeout(info->lwi_timeout) == 0) { \
+ __timed_out = 1; \
+ if (!info->lwi_on_timeout || \
+ info->lwi_on_timeout(info->lwi_cb_data)) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ /* We'll take signals after a timeout. */ \
+ if (info->lwi_signals) { \
+ __state = TASK_INTERRUPTIBLE; \
+ /* Check for a pending interrupt. */ \
+ if (info->lwi_signals && l_killable_pending(current)) {\
+ if (info->lwi_on_signal) \
+ info->lwi_on_signal(info->lwi_cb_data); \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&wq, &__wait); \
} while(0)
-#define l_wait_event_timeout(wq, condition, timeout) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __l_wait_event_timeout(wq, condition, timeout, __ret); \
- __ret; \
+#define l_wait_event(wq, condition, info) \
+({ \
+ int __ret = 0; \
+ struct l_wait_info *__info = (info); \
+ if (!(condition)) \
+ __l_wait_event(wq, condition, __info, __ret); \
+ __ret; \
})
#endif /* _LUSTRE_LIB_H */