2. Apply portability functions to more files.
3. Add ENTRY for some functions.
#undef LPROCFS
#endif
-#include <sys/mount.h>
+#include <libcfs/libcfs.h>
#define kstatfs statfs
/*
#error Do not #include this file directly. #include <lprocfs_status.h> instead
#endif
+#define IT_OPEN 0x0001
+#define IT_CREAT 0x0002
+#define IT_READDIR 0x0004
+#define IT_GETATTR 0x0008
+#define IT_LOOKUP 0x0010
+#define IT_UNLINK 0x0020
+#define IT_GETXATTR 0x0040
+#define IT_EXEC 0x0080
+#define IT_PIN 0x0100
+#define IT_CHDIR 0x0200
+
+
#endif
#ifdef __KERNEL__
-/*
- * XXX Liang:
- * liblustre.h has defined all of them in user space.
- * Where should we put them in OSX?
- */
-#define IT_OPEN 0x0001
-#define IT_CREAT 0x0002
-#define IT_READDIR 0x0004
-#define IT_GETATTR 0x0008
-#define IT_LOOKUP 0x0010
-#define IT_UNLINK 0x0020
-#define IT_GETXATTR 0x0040
-#define IT_EXEC 0x0080
-#define IT_PIN 0x0100
-#define IT_CHDIR 0x0200
-
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
#error Do not #include this file directly. #include <lustre_net.h> instead
#endif
+#include <libcfs/libcfs.h>
#include <netat/sysglue.h>
-#include <libcfs/darwin/darwin-prim.h>
#undef WITH_GROUP_INFO
/* use the macro's argument to avoid unused warnings */
#define down(a) do { (void)a; } while (0)
+#define mutex_down(a) down(a)
#define up(a) do { (void)a; } while (0)
+#define mutex_up(a) up(a)
#define down_read(a) do { (void)a; } while (0)
#define up_read(a) do { (void)a; } while (0)
#define down_write(a) do { (void)a; } while (0)
if (owner) {
++lock->l_depth;
} else {
- down(&lock->l_sem);
+ mutex_down(&lock->l_sem);
spin_lock(&lock->l_spin);
lock->l_owner = cfs_current();
lock->l_depth = 0;
if (--lock->l_depth < 0) {
lock->l_owner = NULL;
spin_unlock(&lock->l_spin);
- up(&lock->l_sem);
+ mutex_up(&lock->l_sem);
return;
}
spin_unlock(&lock->l_spin);
}
#ifdef __KERNEL__
-#include <linux/lustre_version.h>
void l_check_ns_lock(struct ldlm_namespace *ns)
{
- static unsigned long next_msg;
+ static cfs_time_t next_msg;
- if (!l_has_lock(&ns->ns_lock) && time_after(jiffies, next_msg)) {
+ if (!l_has_lock(&ns->ns_lock) && cfs_time_after(cfs_time_current(), next_msg)) {
CERROR("namespace %s lock not held when it should be; tell "
"phil\n", ns->ns_name);
libcfs_debug_dumpstack(NULL);
- next_msg = jiffies + 60 * HZ;
+ next_msg = cfs_time_shift(60);
}
}
{
struct client_obd *cli = &obddev->u.cli;
+ ENTRY;
if (!cli->cl_import)
RETURN(-EINVAL);
class_destroy_import(cli->cl_import);
int rc;
ENTRY;
- down(&cli->cl_sem);
+ mutex_down(&cli->cl_sem);
rc = class_connect(dlm_handle, obd, cluuid);
if (rc)
GOTO(out_sem, rc);
class_export_put(exp);
}
out_sem:
- up(&cli->cl_sem);
+ mutex_up(&cli->cl_sem);
return rc;
}
RETURN(-EINVAL);
}
- down(&cli->cl_sem);
+ mutex_down(&cli->cl_sem);
if (!cli->cl_conn_count) {
CERROR("disconnecting disconnected device (%s)\n",
obd->obd_name);
if (!rc && err)
rc = err;
out_sem:
- up(&cli->cl_sem);
+ mutex_up(&cli->cl_sem);
RETURN(rc);
}
int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
struct obd_uuid *cluuid)
{
+ ENTRY;
if (exp->exp_connection) {
struct lustre_handle *hdl;
hdl = &exp->exp_imp_reverse->imp_remote_handle;
{
struct obd_device *obd = data;
+ ENTRY;
spin_lock_bh(&obd->obd_processing_task_lock);
if (!obd->obd_recovering) {
spin_unlock_bh(&obd->obd_processing_task_lock);
target_finish_recovery(obd);
ptlrpc_run_recovery_over_upcall(obd);
+ EXIT;
}
static void target_recovery_expired(unsigned long castmeharder)
#define DEBUG_SUBSYSTEM S_LDLM
#ifdef __KERNEL__
-# include <linux/slab.h>
-# include <linux/module.h>
-# include <lustre_dlm.h>
+# include <libcfs/libcfs.h>
#else
# include <liblustre.h>
# include <libcfs/kp30.h>
{
int i, rc;
+ ENTRY;
l_lock(&ns->ns_lock);
for (i = 0; i < RES_HASH_SIZE; i++) {
struct list_head *tmp, *next;
{
int need_to_run;
+ ENTRY;
spin_lock_bh(&waiting_locks_spinlock);
need_to_run = !list_empty(&expired_lock_thread.elt_expired_locks);
spin_unlock_bh(&waiting_locks_spinlock);
LDLM_ERROR(lock, "enqueue wait took %luus from %lu",
total_enqueue_wait, lock->l_enqueued_time.tv_sec);
- down(&lock->l_resource->lr_lvb_sem);
+ mutex_down(&lock->l_resource->lr_lvb_sem);
if (lock->l_resource->lr_lvb_len) {
buffers = 2;
size[1] = lock->l_resource->lr_lvb_len;
}
- up(&lock->l_resource->lr_lvb_sem);
+ mutex_up(&lock->l_resource->lr_lvb_sem);
req = ptlrpc_prep_req(lock->l_export->exp_imp_reverse,
LDLM_CP_CALLBACK, buffers, size, NULL);
if (buffers == 2) {
void *lvb;
- down(&lock->l_resource->lr_lvb_sem);
+ mutex_down(&lock->l_resource->lr_lvb_sem);
lvb = lustre_msg_buf(req->rq_reqmsg, 1,
lock->l_resource->lr_lvb_len);
memcpy(lvb, lock->l_resource->lr_lvb_data,
lock->l_resource->lr_lvb_len);
- up(&lock->l_resource->lr_lvb_sem);
+ mutex_up(&lock->l_resource->lr_lvb_sem);
}
LDLM_DEBUG(lock, "server preparing completion AST (after %ldus wait)",
sizeof(body->lock_handle1));
ldlm_lock2desc(lock, &body->lock_desc);
- down(&lock->l_resource->lr_lvb_sem);
+ mutex_down(&lock->l_resource->lr_lvb_sem);
size = lock->l_resource->lr_lvb_len;
- up(&lock->l_resource->lr_lvb_sem);
+ mutex_up(&lock->l_resource->lr_lvb_sem);
req->rq_replen = lustre_msg_size(1, &size);
req->rq_send_state = LUSTRE_IMP_FULL;
} else {
int buffers = 1;
- down(&lock->l_resource->lr_lvb_sem);
+ mutex_down(&lock->l_resource->lr_lvb_sem);
if (lock->l_resource->lr_lvb_len) {
size[1] = lock->l_resource->lr_lvb_len;
buffers = 2;
}
- up(&lock->l_resource->lr_lvb_sem);
+ mutex_up(&lock->l_resource->lr_lvb_sem);
if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_LDLM_ENQUEUE_EXTENT_ERR))
GOTO(out, rc = -ENOMEM);
l_unlock(&lock->l_resource->lr_namespace->ns_lock);
if (rc == 0) {
- down(&lock->l_resource->lr_lvb_sem);
+ mutex_down(&lock->l_resource->lr_lvb_sem);
size[1] = lock->l_resource->lr_lvb_len;
if (size[1] > 0) {
void *lvb = lustre_msg_buf(req->rq_repmsg,
memcpy(lvb, lock->l_resource->lr_lvb_data,
size[1]);
}
- up(&lock->l_resource->lr_lvb_sem);
+ mutex_up(&lock->l_resource->lr_lvb_sem);
} else {
ldlm_resource_unlink_lock(lock);
ldlm_lock_destroy(lock);
/* XXX boiler-plate */
{
- char name[sizeof(current->comm)];
+ char name[CFS_CURPROC_COMM_MAX];
snprintf(name, sizeof(name) - 1, "ldlm_bl_%02d",
bltd->bltd_num);
cfs_daemonize(name);
int ldlm_get_ref(void)
{
int rc = 0;
- down(&ldlm_ref_sem);
+ ENTRY;
+ mutex_down(&ldlm_ref_sem);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
if (rc)
ldlm_refcount--;
}
- up(&ldlm_ref_sem);
+ mutex_up(&ldlm_ref_sem);
RETURN(rc);
}
void ldlm_put_ref(int force)
{
- down(&ldlm_ref_sem);
+ ENTRY;
+ mutex_down(&ldlm_ref_sem);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup(force);
if (rc)
} else {
ldlm_refcount--;
}
- up(&ldlm_ref_sem);
+ mutex_up(&ldlm_ref_sem);
EXIT;
}
struct obd_import *imp;
struct obd_device *obd;
+ ENTRY;
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
{
int i, rc = LDLM_ITER_CONTINUE;
+ ENTRY;
l_lock(&ns->ns_lock);
for (i = 0; i < RES_HASH_SIZE; i++) {
struct list_head *tmp, *next;
struct ldlm_lock *lock;
struct ldlm_reply *reply;
+ ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
if (rc != ELDLM_OK)
GOTO(out, rc);
int size[2];
int flags;
+ ENTRY;
/*
* If granted mode matches the requested mode, this lock is granted.
*
cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
-DECLARE_MUTEX(ldlm_namespace_lock);
+struct semaphore ldlm_namespace_lock;
struct list_head ldlm_namespace_list = CFS_LIST_HEAD_INIT(ldlm_namespace_list);
cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
ns->ns_nr_unused = 0;
ns->ns_max_unused = LDLM_DEFAULT_LRU_SIZE;
- down(&ldlm_namespace_lock);
+ mutex_down(&ldlm_namespace_lock);
list_add(&ns->ns_list_chain, &ldlm_namespace_list);
- up(&ldlm_namespace_lock);
+ mutex_up(&ldlm_namespace_lock);
ldlm_proc_namespace(ns);
RETURN(ns);
/* Cleanup, but also free, the namespace */
int ldlm_namespace_free(struct ldlm_namespace *ns, int force)
{
+ ENTRY;
if (!ns)
RETURN(ELDLM_OK);
- down(&ldlm_namespace_lock);
+ mutex_down(&ldlm_namespace_lock);
list_del(&ns->ns_list_chain);
- up(&ldlm_namespace_lock);
+ mutex_up(&ldlm_namespace_lock);
/* At shutdown time, don't call the cancellation callback */
ldlm_namespace_cleanup(ns, 0);
/* Although this is technically a lock inversion risk (lvb_sem
* should be taken before DLM lock), this resource was just
* created, so nobody else can take the lvb_sem yet. -p */
- down(&res->lr_lvb_sem);
+ mutex_down(&res->lr_lvb_sem);
/* Drop the dlm lock, because lvbo_init can touch the disk */
l_unlock(&ns->ns_lock);
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
rc = ns->ns_lvbo->lvbo_init(res);
- up(&res->lr_lvb_sem);
+ mutex_up(&res->lr_lvb_sem);
if (rc)
CERROR("lvbo_init failed for resource "LPU64"/"LPU64
": rc %d\n", name.name[0], name.name[1], rc);
{
struct list_head *tmp;
- down(&ldlm_namespace_lock);
+ mutex_down(&ldlm_namespace_lock);
list_for_each(tmp, &ldlm_namespace_list) {
struct ldlm_namespace *ns;
ldlm_namespace_dump(level, ns);
}
- up(&ldlm_namespace_lock);
+ mutex_up(&ldlm_namespace_lock);
}
void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>lov</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.clusterfs.lustre.lov</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundlePackageType</key>
+ <string>KEXT</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0.1</string>
+ <key>OSBundleCompatibleVersion</key>
+ <string>1.0.0</string>
+ <key>OSBundleLibraries</key>
+ <dict>
+ <key>com.apple.kernel.bsd</key>
+ <string>1.1</string>
+ <key>com.apple.kernel.iokit</key>
+ <string>1.0.0b1</string>
+ <key>com.apple.kernel.mach</key>
+ <string>1.0.0b1</string>
+ <key>com.clusterfs.lustre.libcfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.portals</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.lvfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.obdclass</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.osc</key>
+ <string>1.0.0</string>
+ </dict>
+</dict>
+</plist>
endif
if MODULES
+if LINUX
modulefs_DATA = lov$(KMODEXT)
+endif
+
+if DARWIN
+macos_PROGRAMS := lov
+
+lov_SOURCES := \
+ lov_log.c \
+ lov_obd.c \
+ lov_pack.c \
+ lov_request.c \
+ lov_merge.c \
+ lov_qos.c \
+ lov_offset.c \
+ lov_internal.h
+
+lov_CFLAGS := $(EXTRA_KCFLAGS)
+lov_LDFLAGS := $(EXTRA_KLDFLAGS)
+lov_LDADD := $(EXTRA_KLIBS)
+
+plist_DATA := Info.plist
+
+#install-data-hook: fix-kext-ownership
+
+endif # DARWIN
+
endif # MODULES
DIST_SOURCES = $(lov-objs:.o=.c) lov_internal.h
return NULL;
atomic_set(&llh->llh_refcount, 2);
llh->llh_stripe_count = lsm->lsm_stripe_count;
- INIT_LIST_HEAD(&llh->llh_handle.h_link);
+ CFS_INIT_LIST_HEAD(&llh->llh_handle.h_link);
class_handle_hash(&llh->llh_handle, lov_llh_addref);
return llh;
}
#endif
#define DEBUG_SUBSYSTEM S_LOV
#ifdef __KERNEL__
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <asm/div64.h>
-#include <linux/seq_file.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
#define DEBUG_SUBSYSTEM S_LOV
#ifdef __KERNEL__
-#include <asm/div64.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
LASSERT_SPIN_LOCKED(&lsm->lsm_lock);
#ifdef __KERNEL__
- LASSERT(lsm->lsm_lock_owner == current);
+ LASSERT(lsm->lsm_lock_owner == cfs_current());
#endif
for (i = 0, loi = lsm->lsm_oinfo; i < lsm->lsm_stripe_count;
LASSERT_SPIN_LOCKED(&lsm->lsm_lock);
#ifdef __KERNEL__
- LASSERT(lsm->lsm_lock_owner == current);
+ LASSERT(lsm->lsm_lock_owner == cfs_current());
#endif
if (shrink) {
#endif
#define DEBUG_SUBSYSTEM S_LOV
#ifdef __KERNEL__
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/seq_file.h>
-#include <asm/div64.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
struct lustre_handle conn = {0, };
struct obd_import *imp;
#ifdef __KERNEL__
- struct proc_dir_entry *lov_proc_dir;
+ cfs_proc_dir_entry_t *lov_proc_dir;
#endif
int rc;
ENTRY;
lov_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
if (lov_proc_dir) {
struct obd_device *osc_obd = class_conn2obd(&conn);
- struct proc_dir_entry *osc_symlink;
+ cfs_proc_dir_entry_t *osc_symlink;
char name[MAX_STRING_SIZE];
LASSERT(osc_obd != NULL);
static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
{
- struct proc_dir_entry *lov_proc_dir;
+ cfs_proc_dir_entry_t *lov_proc_dir;
struct obd_device *osc_obd = class_exp2obd(tgt->ltd_exp);
struct lov_obd *lov = &obd->u.lov;
int rc;
lov_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
if (lov_proc_dir) {
- struct proc_dir_entry *osc_symlink;
+ cfs_proc_dir_entry_t *osc_symlink;
osc_symlink = lprocfs_srch(lov_proc_dir, osc_obd->obd_name);
if (osc_symlink) {
int rc;
struct obd_uuid *uuid;
+ ENTRY;
if (strcmp(watched->obd_type->typ_name, "osc")) {
CERROR("unexpected notification of %s %s!\n",
watched->obd_type->typ_name,
watched->obd_name);
- return -EINVAL;
+ RETURN(-EINVAL);
}
uuid = &watched->u.cli.cl_import->imp_target_uuid;
lprocfs_obd_setup(obd, lvars.obd_vars);
#ifdef LPROCFS
{
- struct proc_dir_entry *entry;
+ cfs_proc_dir_entry_t *entry;
entry = create_proc_entry("target_obd", 0444,
obd->obd_proc_entry);
{
struct lov_obd *lov = &obd->u.lov;
+ ENTRY;
lprocfs_obd_cleanup(obd);
if (lov->tgts) {
int i;
struct lov_request_set *set = NULL;
struct lov_obd *lov;
struct obd_statfs osfs;
- unsigned long maxage;
+ cfs_time_t maxage;
struct lov_request *req;
int rc = 0;
ENTRY;
if (!lov->desc.ld_active_tgt_count)
RETURN(-EIO);
- maxage = jiffies - lov->desc.ld_qos_maxage * HZ;
+ maxage = cfs_time_shift(-lov->desc.ld_qos_maxage);
obd_statfs(exp->exp_obd, &osfs, maxage);
rc = lov_prep_create_set(exp, ea, src_oa, oti, &set);
struct lov_async_page *lap;
int rc;
+ ENTRY;
LASSERT(loi == NULL);
ASSERT_LSM_MAGIC(lsm);
struct lov_async_page *lap;
int rc;
+ ENTRY;
LASSERT(loi == NULL);
ASSERT_LSM_MAGIC(lsm);
struct lov_async_page *lap;
int rc;
+ ENTRY;
LASSERT(loi == NULL);
ASSERT_LSM_MAGIC(lsm);
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i, err;
+ ENTRY;
LASSERT(loi == NULL);
ASSERT_LSM_MAGIC(lsm);
struct lov_async_page *lap;
int rc;
+ ENTRY;
LASSERT(loi == NULL);
ASSERT_LSM_MAGIC(lsm);
} while(0)
static int lov_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- unsigned long max_age)
+ cfs_time_t max_age)
{
struct lov_obd *lov = &obd->u.lov;
struct obd_statfs lov_sfs;
void lov_stripe_lock(struct lov_stripe_md *md)
{
- LASSERT(md->lsm_lock_owner != current);
+ LASSERT(md->lsm_lock_owner != cfs_current());
spin_lock(&md->lsm_lock);
LASSERT(md->lsm_lock_owner == NULL);
- md->lsm_lock_owner = current;
+ md->lsm_lock_owner = cfs_current();
}
EXPORT_SYMBOL(lov_stripe_lock);
void lov_stripe_unlock(struct lov_stripe_md *md)
{
- LASSERT(md->lsm_lock_owner == current);
+ LASSERT(md->lsm_lock_owner == cfs_current());
md->lsm_lock_owner = NULL;
spin_unlock(&md->lsm_lock);
}
MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver");
MODULE_LICENSE("GPL");
-module_init(lov_init);
-module_exit(lov_exit);
+cfs_module(lov, "1.0.0", lov_init, lov_exit);
#endif
#define DEBUG_SUBSYSTEM S_LOV
#ifdef __KERNEL__
-#include <asm/div64.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
struct lov_obd *lov = &exp->exp_obd->u.lov;
obd_id last_id = 0;
+ ENTRY;
for (i = 0; i < lump->lmm_stripe_count; i++) {
__u32 len = sizeof(last_id);
oexp = lov->tgts[lump->lmm_objects[i].l_ost_idx].ltd_exp;
#define DEBUG_SUBSYSTEM S_LOV
#ifdef __KERNEL__
-#include <linux/types.h>
-#include <linux/random.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
#define DEBUG_SUBSYSTEM S_LOV
#ifdef __KERNEL__
-#include <asm/div64.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
set->set_count = 0;
set->set_completes = 0;
set->set_success = 0;
- INIT_LIST_HEAD(&set->set_list);
+ CFS_INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
}
}
static int mdc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- unsigned long max_age)
+ cfs_time_t max_age)
{
struct ptlrpc_request *req;
struct obd_statfs *msfs;
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>obdecho</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.clusterfs.lustre.obdecho</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundlePackageType</key>
+ <string>KEXT</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0.1</string>
+ <key>OSBundleCompatibleVersion</key>
+ <string>1.0.0</string>
+ <key>OSBundleLibraries</key>
+ <dict>
+ <key>com.apple.kernel.bsd</key>
+ <string>1.1</string>
+ <key>com.apple.kernel.iokit</key>
+ <string>1.0.0b1</string>
+ <key>com.apple.kernel.mach</key>
+ <string>1.0.0b1</string>
+ <key>com.clusterfs.lustre.libcfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.lnet</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.lvfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.obdclass</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.ptlrpc</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.osc</key>
+ <string>1.0.0</string>
+ </dict>
+</dict>
+</plist>
endif
if MODULES
+if LINUX
modulefs_DATA = obdecho$(KMODEXT)
+endif
+
+if DARWIN
+macos_PROGRAMS := obdecho
+obdecho_SOURCES := \
+ lproc_echo.c \
+ echo.c \
+ echo_client.c
+
+obdecho_CFLAGS := $(EXTRA_KCFLAGS)
+obdecho_LDFLAGS := $(EXTRA_KLDFLAGS)
+obdecho_LDADD := $(EXTRA_KLIBS)
+
+plist_DATA := Info.plist
+
+#install-data-hook: fix-kext-ownership
+endif # darwin
+
endif # MODULES
MOSTLYCLEANFILES := @MOSTLYCLEANFILES@
{
struct obd_device *obd = class_exp2obd(exp);
+ ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
RETURN(-EINVAL);
}
- return 0;
+ RETURN(0);
}
static int echo_getattr(struct obd_export *exp, struct obdo *oa,
struct obd_device *obd = class_exp2obd(exp);
obd_id id = oa->o_id;
+ ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
obdo_cpy_md(oa, &obd->u.echo.eo_oa, oa->o_valid);
oa->o_id = id;
- return 0;
+ RETURN(0);
}
static int echo_setattr(struct obd_export *exp, struct obdo *oa,
{
struct obd_device *obd = class_exp2obd(exp);
+ ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
oti->oti_ack_locks[0].lock = obd->u.echo.eo_nl_lock;
}
- return 0;
+ RETURN(0);
}
static void
/* XXX Bug 3413; wait for a bit to ensure the BL callback has
* happened before calling ldlm_namespace_free() */
set_current_state (TASK_UNINTERRUPTIBLE);
- schedule_timeout (cfs_time_seconds(1));
+ cfs_schedule_timeout (CFS_TASK_UNINT, cfs_time_seconds(1));
ldlm_namespace_free(obd->obd_namespace, obd->obd_force);
int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ?
CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER;
- pg = alloc_pages (gfp_mask, 0);
+ pg = cfs_alloc_page (gfp_mask);
if (pg == NULL) {
echo_persistent_pages_fini ();
return (-ENOMEM);
struct lprocfs_static_vars lvars;
int rc;
+ ENTRY;
printk(KERN_INFO "Lustre: Echo OBD driver; info@clusterfs.com\n");
LASSERT(CFS_PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
MODULE_LICENSE("GPL");
-cfs_module(obdecho, "1.0.0", obdecho_init, obdecho_exit)
+cfs_module(obdecho, "1.0.0", obdecho_init, obdecho_exit);
#define DEBUG_SUBSYSTEM S_ECHO
#ifdef __KERNEL__
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/completion.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/iobuf.h>
-#endif
-#include <asm/div64.h>
-#include <linux/smp_lock.h>
+#include <libcfs/libcfs.h>
#else
#include <liblustre.h>
#endif
obd_count npages, i;
struct echo_async_page *eap;
struct echo_async_state eas;
- struct list_head *pos, *n;
int rc = 0;
unsigned long flags;
- CFS_LIST_HEAD(pages);
+ struct echo_async_page **aps = NULL;
+
+ ENTRY;
#if 0
int verify;
int gfp_mask;
eas.eas_lsm = lsm;
CFS_INIT_LIST_HEAD(&eas.eas_avail);
+ OBD_ALLOC(aps, npages * sizeof aps[0]);
+ if (aps == NULL)
+ return (-ENOMEM);
+
/* prepare the group of pages that we're going to be keeping
* in flight */
for (i = 0; i < npages; i++) {
if (page == NULL)
GOTO(out, rc = -ENOMEM);
- page->private = 0;
- list_add_tail(&PAGE_LIST(page), &pages);
-
OBD_ALLOC(eap, sizeof(*eap));
- if (eap == NULL)
+ if (eap == NULL) {
+ cfs_free_page(page);
GOTO(out, rc = -ENOMEM);
+ }
eap->eap_magic = EAP_MAGIC;
eap->eap_page = page;
eap->eap_eas = &eas;
- page->private = (unsigned long)eap;
list_add_tail(&eap->eap_item, &eas.eas_avail);
+ aps[i] = eap;
}
/* first we spin queueing io and being woken by its completion */
spin_unlock_irqrestore(&eas.eas_lock, flags);
out:
- list_for_each_safe(pos, n, &pages) {
- cfs_page_t *page = list_entry(pos, cfs_page_t,
- PAGE_LIST_ENTRY);
+ if (aps != NULL) {
+ for (i = 0; i < npages; ++ i) {
+ cfs_page_t *page;
- list_del(&PAGE_LIST(page));
- if (page->private != 0) {
- eap = (struct echo_async_page *)page->private;
+ eap = aps[i];
+ page = eap->eap_page;
if (eap->eap_cookie != NULL)
- obd_teardown_async_page(exp, lsm, NULL,
+ obd_teardown_async_page(exp, lsm, NULL,
eap->eap_cookie);
OBD_FREE(eap, sizeof(*eap));
+ cfs_free_page(page);
}
- cfs_free_page(page);
+ OBD_FREE(aps, npages * sizeof aps[0]);
}
RETURN(rc);
struct obd_export *exp;
int rc;
+ ENTRY;
rc = class_connect(conn, src, cluuid);
if (rc == 0) {
exp = class_conn2export(conn);
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>osc</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.clusterfs.lustre.osc</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundlePackageType</key>
+ <string>KEXT</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0.1</string>
+ <key>OSBundleCompatibleVersion</key>
+ <string>1.0.0</string>
+ <key>OSBundleLibraries</key>
+ <dict>
+ <key>com.apple.kernel.bsd</key>
+ <string>1.1</string>
+ <key>com.apple.kernel.iokit</key>
+ <string>1.0.0b1</string>
+ <key>com.apple.kernel.mach</key>
+ <string>1.0.0b1</string>
+ <key>com.clusterfs.lustre.libcfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.lnet</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.lvfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.obdclass</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.ptlrpc</key>
+ <string>1.0.0</string>
+ </dict>
+</dict>
+</plist>
endif
if MODULES
+
+if LINUX
modulefs_DATA = osc$(KMODEXT)
endif
+if DARWIN
+macos_PROGRAMS := osc
+
+osc_SOURCES := \
+ osc_create.c \
+ osc_request.c
+
+osc_CFLAGS := $(EXTRA_KCFLAGS)
+osc_LDFLAGS := $(EXTRA_KLDFLAGS)
+osc_LDADD := $(EXTRA_KLIBS)
+
+plist_DATA := Info.plist
+
+#install-data-hook: fix-kext-ownership
+endif # Darwin
+
+endif
+
MOSTLYCLEANFILES := @MOSTLYCLEANFILES@
DIST_SOURCES = $(osc-objs:%.o=%.c) osc_internal.h
#define DEBUG_SUBSYSTEM S_OSC
#ifdef __KERNEL__
-# include <linux/version.h>
-# include <linux/module.h>
-# include <linux/mm.h>
-# include <linux/highmem.h>
-# include <linux/ctype.h>
-# include <linux/init.h>
-# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-# include <linux/workqueue.h>
-# include <linux/smp_lock.h>
-# else
-# include <linux/locks.h>
-# endif
+# include <libcfs/libcfs.h>
#else /* __KERNEL__ */
# include <liblustre.h>
#endif
#define DEBUG_SUBSYSTEM S_OSC
#ifdef __KERNEL__
-# include <linux/version.h>
-# include <linux/module.h>
-# include <linux/mm.h>
-# include <linux/highmem.h>
-# include <linux/ctype.h>
-# include <linux/init.h>
-# if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-# include <linux/workqueue.h>
-# include <linux/smp_lock.h>
-# else
-# include <linux/locks.h>
-# endif
+# include <libcfs/libcfs.h>
#else /* __KERNEL__ */
# include <liblustre.h>
#endif
struct list_head *l, *tmp;
struct osc_cache_waiter *ocw;
+ ENTRY;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
/* if we can't dirty more, we must wait until some is written */
if (cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) {
if (pga->count > nob_read) {
/* EOF inside this page */
- ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
+ ptr = cfs_kmap(pga->pg) + (pga->off & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga->count - nob_read);
- kunmap(pga->pg);
+ cfs_kunmap(pga->pg);
page_count--;
pga++;
break;
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga->pg) + (pga->off & ~PAGE_MASK);
+ ptr = cfs_kmap(pga->pg) + (pga->off & ~PAGE_MASK);
memset(ptr, 0, pga->count);
- kunmap(pga->pg);
+ cfs_kunmap(pga->pg);
pga++;
}
}
LASSERT (pg_count > 0);
while (nob > 0 && pg_count > 0) {
- char *ptr = kmap(pga->pg);
+ char *ptr = cfs_kmap(pga->pg);
int off = pga->off & ~PAGE_MASK;
int count = pga->count > nob ? nob : pga->count;
cksum = crc32_le(cksum, ptr + off, count);
- kunmap(pga->pg);
+ cfs_kunmap(pga->pg);
LL_CDEBUG_PAGE(D_PAGE, pga->pg, "off %d checksum %x\n",
off, cksum);
int rc;
struct ptlrpc_request_pool *pool;
+ ENTRY;
opc = ((cmd & OBD_BRW_WRITE) != 0) ? OST_WRITE : OST_READ;
pool = ((cmd & OBD_BRW_WRITE) != 0) ? cli->cl_rq_pool : NULL;
OBD_FAIL_RETURN(OBD_FAIL_OSC_BRW_PREP_REQ, -ENOMEM);
req = ptlrpc_prep_req_pool(imp, opc, 3, size, NULL, pool);
if (req == NULL)
- return (-ENOMEM);
+ RETURN (-ENOMEM);
if (opc == OST_WRITE)
desc = ptlrpc_prep_bulk_imp (req, page_count,
LASSERTF((pg->off & ~PAGE_MASK) + pg->count <= PAGE_SIZE,
"i: %d pg: %p off: "LPU64", count: %u\n", i, pg,
pg->off, pg->count);
+#ifdef __LINUX__
LASSERTF(i == 0 || pg->off > pg_prev->off,
"i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
" prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
pg->pg, pg->pg->private, pg->pg->index, pg->off,
pg_prev->pg, pg_prev->pg->private, pg_prev->pg->index,
pg_prev->off);
+#else
+ LASSERTF(i == 0 || pg->off > pg_prev->off,
+ "i %d p_c %u\n", i, page_count);
+#endif
LASSERT((pga[0].flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
*niocountp = niocount;
*requested_nobp = requested_nob;
*reqp = req;
- return (0);
+ RETURN (0);
out:
ptlrpc_req_finished (req);
- return (rc);
+ RETURN (rc);
}
static void check_write_csum(__u32 cli, __u32 srv, int requested_nob,
static void osc_ap_completion(struct client_obd *cli, struct obdo *oa,
struct osc_async_page *oap, int sent, int rc)
{
+ ENTRY;
osc_exit_cache(cli, oap, sent);
oap->oap_async_flags = 0;
oap->oap_interrupted = 0;
oap->oap_caller_ops->ap_completion(oap->oap_caller_data, oap->oap_cmd,
oa, rc);
+ EXIT;
}
static int brw_interpret_oap(struct ptlrpc_request *request,
struct list_head *pos;
int i, rc;
+ ENTRY;
LASSERT(!list_empty(rpc_list));
OBD_ALLOC(pga, sizeof(*pga) * page_count);
pga[i].count = oap->oap_count;
pga[i].flag = oap->oap_brw_flags;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i].pg, oap->oap_page->index, oap, pga[i].flag);
+ pga[i].pg, cfs_page_index(oap->oap_page), oap, pga[i].flag);
i++;
}
struct osc_async_page *oap = NULL;
struct osc_brw_async_args *aa;
struct obd_async_page_ops *ops;
- LIST_HEAD(rpc_list);
+ CFS_LIST_HEAD(rpc_list);
unsigned int ending_offset;
unsigned starting_offset = 0;
ENTRY;
* XXX nikita: this assertion should be adjusted when lustre
* starts using PG_writeback for pages being written out.
*/
-#if defined(__KERNEL__)
+#if defined(__KERNEL__) && defined(__LINUX__)
LASSERT(PageLocked(oap->oap_page));
#endif
/* If there is a gap at the start of this page, it can't merge
LASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
aa = (struct osc_brw_async_args *)&request->rq_async_args;
- INIT_LIST_HEAD(&aa->aa_oaps);
+ CFS_INIT_LIST_HEAD(&aa->aa_oaps);
list_splice(&rpc_list, &aa->aa_oaps);
- INIT_LIST_HEAD(&rpc_list);
+ CFS_INIT_LIST_HEAD(&rpc_list);
if (cmd == OBD_BRW_READ) {
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
struct osc_cache_waiter ocw;
struct l_wait_info lwi = { 0 };
+ ENTRY;
CDEBUG(D_CACHE, "dirty: %ld dirty_max: %ld dropped: %lu grant: %lu\n",
cli->cl_dirty, cli->cl_dirty_max, cli->cl_lost_grant,
cli->cl_avail_grant);
* of queued writes and create a discontiguous rpc stream */
if (cli->cl_dirty_max < PAGE_SIZE || cli->cl_ar.ar_force_sync ||
loi->loi_ar.ar_force_sync)
- return(-EDQUOT);
+ RETURN(-EDQUOT);
/* Hopefully normal case - cache space and write credits available */
if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
cli->cl_avail_grant >= PAGE_SIZE) {
/* account for ourselves */
osc_consume_write_grant(cli, oap);
- return(0);
+ RETURN(0);
}
/* Make sure that there are write rpcs in flight to wait for. This
oap->oap_page = page;
oap->oap_obj_off = offset;
- INIT_LIST_HEAD(&oap->oap_pending_item);
- INIT_LIST_HEAD(&oap->oap_urgent_item);
- INIT_LIST_HEAD(&oap->oap_rpc_item);
+ CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
+ CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
+ CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
oap->oap_occ.occ_interrupted = osc_occ_interrupted;
}
l_lock(&lock->l_resource->lr_namespace->ns_lock);
#ifdef __KERNEL__
+#ifdef __LINUX__
+ /* Liang XXX: Darwin and Winnt checking should be added */
if (lock->l_ast_data && lock->l_ast_data != data) {
struct inode *new_inode = data;
struct inode *old_inode = lock->l_ast_data;
new_inode, new_inode->i_ino, new_inode->i_generation);
}
#endif
+#endif
lock->l_ast_data = data;
lock->l_flags |= (flags & LDLM_FL_NO_LRU);
l_unlock(&lock->l_resource->lr_namespace->ns_lock);
}
static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
- unsigned long max_age)
+ cfs_time_t max_age)
{
struct obd_statfs *msfs;
struct ptlrpc_request *request;
GOTO(out, err);
default:
CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
- cmd, current->comm);
+ cmd, cfs_curproc_comm());
GOTO(out, err = -ENOTTY);
}
out:
struct client_obd *cli;
int rc = 0;
+ ENTRY;
LASSERT(imp->imp_obd == obd);
switch (event) {
{
int rc;
+ ENTRY;
rc = ptlrpcd_addref();
if (rc)
- return rc;
+ RETURN(rc);
rc = client_obd_setup(obd, len, buf);
if (rc) {
struct client_obd *cli = &obd->u.cli;
int rc;
+ ENTRY;
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
MODULE_LICENSE("GPL");
-module_init(osc_init);
-module_exit(osc_exit);
+cfs_module(osc, "1.0.0", osc_init, osc_exit);
#endif
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>lvfs</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.clusterfs.lustre.lvfs</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundlePackageType</key>
+ <string>KEXT</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0.1</string>
+ <key>OSBundleCompatibleVersion</key>
+ <string>1.0.0</string>
+ <key>OSBundleLibraries</key>
+ <dict>
+ <key>com.apple.kernel.bsd</key>
+ <string>1.1</string>
+ <key>com.apple.kernel.iokit</key>
+ <string>1.0.0b1</string>
+ <key>com.apple.kernel.mach</key>
+ <string>1.0.0b1</string>
+ <key>com.clusterfs.lustre.libcfs</key>
+ <string>1.0.0</string>
+ <key>com.clusterfs.lustre.lnet</key>
+ <string>1.0.0</string>
+ </dict>
+</dict>
+</plist>
struct obd_import *imp = req->rq_import;
struct ptlrpc_bulk_desc *desc;
+ ENTRY;
LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
desc = new_bulk(npages, type, portal);
if (desc == NULL)
struct obd_export *exp = req->rq_export;
struct ptlrpc_bulk_desc *desc;
+ ENTRY;
LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
desc = new_bulk(npages, type, portal);
{
struct ptlrpc_request_set *set;
+ ENTRY;
OBD_ALLOC(set, sizeof *set);
if (!set)
RETURN(NULL);
struct obd_import *imp = req->rq_import;
unsigned long flags;
+ ENTRY;
atomic_dec(&imp->imp_replay_inflight);
if (!req->rq_replied) {
struct ptlrpc_connect_async_args *aa;
unsigned long flags;
+ ENTRY;
spin_lock_irqsave(&imp->imp_lock, flags);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
spin_unlock_irqrestore(&imp->imp_lock, flags);
static int completed_replay_interpret(struct ptlrpc_request *req,
void * data, int rc)
{
+ ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
if (req->rq_status == 0) {
ptlrpc_import_recovery_state_machine(req->rq_import);
char *target_start;
int target_len;
+ ENTRY;
if (imp->imp_state == LUSTRE_IMP_EVICTED) {
deuuidify(imp->imp_target_uuid.uuid, NULL,
&target_start, &target_len);
{
int rc = 0;
+ ENTRY;
RETURN(rc);
}
#ifndef __KERNEL__
#include <liblustre.h>
-#else
-#include <linux/fs.h>
#endif
#include <obd_class.h>
}
#if RS_DEBUG
-LIST_HEAD(ptlrpc_rs_debug_lru);
+CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
spinlock_t ptlrpc_rs_debug_lock;
#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
#ifndef __KERNEL__
#include <liblustre.h>
#else
-#include <linux/version.h>
-#include <asm/semaphore.h>
#define DEBUG_SUBSYSTEM S_RPC
#endif
cfs_block_allsigs();
- LASSERTF(strlen(data->name) < sizeof(current->comm),
+ LASSERTF(strlen(data->name) < CFS_CURPROC_COMM_MAX,
"name %d > len %d\n",
- (int)strlen(data->name), (int)sizeof(current->comm));
- THREAD_NAME(current->comm, sizeof(current->comm) - 1, "%s", data->name);
+ (int)strlen(data->name), CFS_CURPROC_COMM_MAX);
+ THREAD_NAME(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX - 1, "%s", data->name);
unlock_kernel();
/* Record that the thread is running */
cfs_duration_t time_to_next_ping;
struct list_head *iter;
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
list_for_each(iter, &pinger_imports) {
struct obd_import *imp =
list_entry(iter, struct obd_import,
cfs_time_seconds(update_interval))))
ptlrpc_update_next_ping(imp);
}
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
/* Wait until the next ping time, or until we're stopped. We
* sleep here smaller interval of two possible (ping or
thread->t_flags = SVC_STOPPED;
cfs_waitq_signal(&thread->t_ctl_waitq);
- CDEBUG(D_NET, "pinger thread exiting, process %d\n", current->pid);
+ CDEBUG(D_NET, "pinger thread exiting, process %d\n", cfs_curproc_pid());
return 0;
}
OBD_ALLOC(pinger_thread, sizeof(*pinger_thread));
if (pinger_thread == NULL)
RETURN(-ENOMEM);
- init_waitqueue_head(&pinger_thread->t_ctl_waitq);
+ cfs_waitq_init(&pinger_thread->t_ctl_waitq);
d.name = "ll_ping";
d.thread = pinger_thread;
/* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
* just drop the VM and FILES in ptlrpc_daemonize() right away. */
- rc = kernel_thread(ptlrpc_pinger_main, &d, CLONE_VM | CLONE_FILES);
+ rc = cfs_kernel_thread(ptlrpc_pinger_main, &d, CLONE_VM | CLONE_FILES);
if (rc < 0) {
CERROR("cannot start thread: %d\n", rc);
OBD_FREE(pinger_thread, sizeof(*pinger_thread));
if (pinger_thread == NULL)
RETURN(-EALREADY);
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
pinger_thread->t_flags = SVC_STOPPING;
cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
l_wait_event(pinger_thread->t_ctl_waitq,
(pinger_thread->t_flags & SVC_STOPPED), &lwi);
if (!list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
CDEBUG(D_HA, "adding pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, imp->imp_target_uuid.uuid);
ptlrpc_update_next_ping(imp);
class_import_get(imp);
ptlrpc_pinger_wake_up();
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
RETURN(0);
}
if (list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, imp->imp_target_uuid.uuid);
class_import_put(imp);
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
RETURN(0);
}
set = pd->pd_set;
/* add rpcs into set */
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
list_for_each(iter, &pinger_imports) {
struct obd_import *imp =
list_entry(iter, struct obd_import, imp_pinger_chain);
}
}
pd->pd_this_ping = curtime;
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
/* Might be empty, that's OK. */
if (set->set_remaining == 0)
}
/* Expire all the requests that didn't come back. */
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
list_for_each(iter, &set->set_requests) {
req = list_entry(iter, struct ptlrpc_request,
rq_set_chain);
CDEBUG(D_HA, "pinger initiate expire_one_request\n");
ptlrpc_expire_one_request(req);
}
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
ptlrpc_set_destroy(set);
pd->pd_set = NULL;
void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
{
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
ptlrpc_update_next_ping(imp);
if (pinger_args.pd_set == NULL &&
time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
imp->imp_next_ping, cfs_time_current());
pinger_args.pd_next_ping = imp->imp_next_ping;
}
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
}
int ptlrpc_pinger_add_import(struct obd_import *imp)
imp->imp_obd->obd_uuid.uuid, imp->imp_target_uuid.uuid);
ptlrpc_pinger_sending_on_import(imp);
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
class_import_get(imp);
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
RETURN(0);
}
if (list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- down(&pinger_sem);
+ mutex_down(&pinger_sem);
list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, imp->imp_target_uuid.uuid);
class_import_put(imp);
- up(&pinger_sem);
+ mutex_up(&pinger_sem);
RETURN(0);
}
{
int rc = 0;
+ ENTRY;
memset(pc, 0, sizeof(*pc));
init_completion(&pc->pc_starting);
init_completion(&pc->pc_finishing);
int rc = 0;
ENTRY;
- down(&ptlrpcd_sem);
+ mutex_down(&ptlrpcd_sem);
if (++ptlrpcd_users != 1)
GOTO(out, rc);
GOTO(out, rc);
}
out:
- up(&ptlrpcd_sem);
+ mutex_up(&ptlrpcd_sem);
RETURN(rc);
}
void ptlrpcd_decref(void)
{
- down(&ptlrpcd_sem);
+ mutex_down(&ptlrpcd_sem);
if (--ptlrpcd_users == 0) {
ptlrpcd_stop(&ptlrpcd_pc);
ptlrpcd_stop(&ptlrpcd_recovery_pc);
}
- up(&ptlrpcd_sem);
+ mutex_up(&ptlrpcd_sem);
}
LASSERT(ctxt);
- down(&ctxt->loc_sem);
+ mutex_down(&ctxt->loc_sem);
if (ctxt->loc_imp == NULL) {
CWARN("no import for ctxt %p\n", ctxt);
GOTO(out, rc = 0);
llcd_send(llcd);
}
out:
- up(&ctxt->loc_sem);
+ mutex_up(&ctxt->loc_sem);
return rc;
}
EXPORT_SYMBOL(llog_obd_repl_cancel);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
CDEBUG(D_HA, "reverse import disconnected, put llcd %p:%p\n",
ctxt->loc_llcd, ctxt);
- down(&ctxt->loc_sem);
+ mutex_down(&ctxt->loc_sem);
if (ctxt->loc_llcd != NULL) {
llcd_put(ctxt->loc_llcd);
ctxt->loc_llcd = NULL;
}
ctxt->loc_imp = NULL;
- up(&ctxt->loc_sem);
+ mutex_up(&ctxt->loc_sem);
} else {
rc = llog_cancel(ctxt, NULL, 0, NULL, OBD_LLOG_FL_SENDNOW);
}
continue;
}
- down(&llcd->llcd_ctxt->loc_sem);
+ mutex_down(&llcd->llcd_ctxt->loc_sem);
if (llcd->llcd_ctxt->loc_imp == NULL) {
- up(&llcd->llcd_ctxt->loc_sem);
+ mutex_up(&llcd->llcd_ctxt->loc_sem);
CWARN("import will be destroyed, put "
"llcd %p:%p\n", llcd, llcd->llcd_ctxt);
llcd_put(llcd);
continue;
}
- up(&llcd->llcd_ctxt->loc_sem);
+ mutex_up(&llcd->llcd_ctxt->loc_sem);
if (!import || (import == LP_POISON)) {
CERROR("No import %p (llcd=%p, ctxt=%p)\n",
request->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
request->rq_replen = lustre_msg_size(0, NULL);
- down(&llcd->llcd_ctxt->loc_sem);
+ mutex_down(&llcd->llcd_ctxt->loc_sem);
if (llcd->llcd_ctxt->loc_imp == NULL) {
- up(&llcd->llcd_ctxt->loc_sem);
+ mutex_up(&llcd->llcd_ctxt->loc_sem);
CWARN("import will be destroyed, put "
"llcd %p:%p\n", llcd, llcd->llcd_ctxt);
llcd_put(llcd);
ptlrpc_req_finished(request);
continue;
}
- up(&llcd->llcd_ctxt->loc_sem);
+ mutex_up(&llcd->llcd_ctxt->loc_sem);
rc = ptlrpc_queue_wait(request);
ptlrpc_req_finished(request);
CFS_INIT_LIST_HEAD(&lcm->lcm_thread_idle);
spin_lock_init(&lcm->lcm_thread_lock);
atomic_set(&lcm->lcm_thread_numidle, 0);
- init_waitqueue_head(&lcm->lcm_waitq);
+ cfs_waitq_init(&lcm->lcm_waitq);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_pending);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_resend);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcd_free);
int rc;
ENTRY;
- up(&data->llpa_sem);
+ mutex_up(&data->llpa_sem);
lock_kernel();
ptlrpc_daemonize(); /* thread does IO to log files */
THREAD_NAME(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX - 1, "llog_process");
int rc;
ENTRY;
- down(&llpa.llpa_sem);
+ mutex_down(&llpa.llpa_sem);
llpa.llpa_ctxt = ctxt;
llpa.llpa_cb = handle;
llpa.llpa_arg = arg;
llog_sync(ctxt, NULL);
}
- down(&ctxt->loc_sem);
+ mutex_down(&ctxt->loc_sem);
ctxt->loc_gen = *gen;
llcd = llcd_grab();
if (llcd == NULL) {
CERROR("couldn't get an llcd\n");
- up(&ctxt->loc_sem);
+ mutex_up(&ctxt->loc_sem);
RETURN(-ENOMEM);
}
llcd->llcd_ctxt = ctxt;
ctxt->loc_llcd = llcd;
- up(&ctxt->loc_sem);
+ mutex_up(&ctxt->loc_sem);
rc = llog_recovery_generic(ctxt, ctxt->llog_proc_cb, logid);
if (rc != 0)
#define DEBUG_SUBSYSTEM S_RPC
#ifdef __KERNEL__
-# include <linux/config.h>
-# include <linux/module.h>
-# include <linux/kmod.h>
-# include <linux/list.h>
+# include <libcfs/libcfs.h>
#else
# include <liblustre.h>
#endif
struct obd_device *obd = imp->imp_obd;
int rc = 0;
+ ENTRY;
LASSERT(obd);
/* When deactivating, mark import invalid, and abort in-flight