-/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=4:tabstop=4:
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * Copyright (c) 2004 Cluster File Systems, Inc.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * This file is part of Lustre, http://www.lustre.org.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
*
- * Lustre is free software; you can redistribute it and/or modify it under
- * the terms of version 2 of the GNU General Public License as published by
- * the Free Software Foundation. Lustre is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details. You should have received a
- * copy of the GNU General Public License along with Lustre; if not, write
- * to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- * USA.
+ * Copyright (c) 2012, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*/
#include <libcfs/libcfs.h>
-#if _X86_
+#if defined(_X86_)
void __declspec (naked) FASTCALL
-atomic_add(
+cfs_atomic_add(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
// ECX = i
}
void __declspec (naked) FASTCALL
-atomic_sub(
+cfs_atomic_sub(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
// ECX = i
}
void __declspec (naked) FASTCALL
-atomic_inc(
- atomic_t *v
+cfs_atomic_inc(
+ cfs_atomic_t *v
)
{
//InterlockedIncrement((PULONG)(&((v)->counter)));
}
void __declspec (naked) FASTCALL
-atomic_dec(
- atomic_t *v
+cfs_atomic_dec(
+ cfs_atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
int __declspec (naked) FASTCALL
-atomic_sub_and_test(
+cfs_atomic_sub_and_test(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
}
int __declspec (naked) FASTCALL
-atomic_inc_and_test(
- atomic_t *v
+cfs_atomic_inc_and_test(
+ cfs_atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
int __declspec (naked) FASTCALL
-atomic_dec_and_test(
- atomic_t *v
+cfs_atomic_dec_and_test(
+ cfs_atomic_t *v
)
{
// ECX = v ; [ECX][0] = v->counter
}
}
-#else
+#elif defined(_AMD64_)
void FASTCALL
-atomic_add(
+cfs_atomic_add(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (i));
}
void FASTCALL
-atomic_sub(
+cfs_atomic_sub(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
InterlockedExchangeAdd( (PULONG)(&((v)->counter)) , (LONG) (-1*i));
}
void FASTCALL
-atomic_inc(
- atomic_t *v
+cfs_atomic_inc(
+ cfs_atomic_t *v
)
{
InterlockedIncrement((PULONG)(&((v)->counter)));
}
void FASTCALL
-atomic_dec(
- atomic_t *v
+cfs_atomic_dec(
+ cfs_atomic_t *v
)
{
InterlockedDecrement((PULONG)(&((v)->counter)));
}
int FASTCALL
-atomic_sub_and_test(
+cfs_atomic_sub_and_test(
int i,
- atomic_t *v
+ cfs_atomic_t *v
)
{
int counter, result;
}
int FASTCALL
-atomic_inc_and_test(
- atomic_t *v
+cfs_atomic_inc_and_test(
+ cfs_atomic_t *v
)
{
int counter, result;
}
int FASTCALL
-atomic_dec_and_test(
- atomic_t *v
+cfs_atomic_dec_and_test(
+ cfs_atomic_t *v
)
{
int counter, result;
do {
counter = v->counter;
- result = counter + 1;
+ result = counter - 1;
} while ( InterlockedCompareExchange(
&(v->counter),
return (result == 0);
}
+#else
+
+#error CPU arch type isn't specified.
+
#endif
+/**
+ * atomic_add_return - add integer and return
+ * \param v pointer of type atomic_t
+ * \param i integer value to add
+ *
+ * Atomically adds \a i to \a v and returns \a i + \a v
+ */
+int FASTCALL cfs_atomic_add_return(int i, cfs_atomic_t *v)
+{
+ int counter, result;
+
+ do {
+
+ counter = v->counter;
+ result = counter + i;
+
+ } while ( InterlockedCompareExchange(
+ &(v->counter),
+ result,
+ counter) != counter);
+
+ return result;
+
+}
+
+/**
+ * atomic_sub_return - subtract integer and return
+ * \param v pointer of type atomic_t
+ * \param i integer value to subtract
+ *
+ * Atomically subtracts \a i from \a v and returns \a v - \a i
+ */
+int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
+{
+ return cfs_atomic_add_return(-i, v);
+}
+
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
+{
+ if (cfs_atomic_read(v) != 1)
+ return 0;
+
+ spin_lock(lock);
+ if (cfs_atomic_dec_and_test(v))
+ return 1;
+ spin_unlock(lock);
+ return 0;
+}
+
/*
* rw spinlock
void
-rwlock_init(rwlock_t * rwlock)
+rwlock_init(rwlock_t *rwlock)
{
- spin_lock_init(&rwlock->guard);
- rwlock->count = 0;
+ spin_lock_init(&rwlock->guard);
+ rwlock->count = 0;
}
void
-rwlock_fini(rwlock_t * rwlock)
+cfs_rwlock_fini(rwlock_t *rwlock)
{
}
void
-read_lock(rwlock_t * rwlock)
+read_lock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
- while (TRUE) {
- spin_lock(&rwlock->guard);
- if (rwlock->count >= 0)
- break;
- spin_unlock(&rwlock->guard);
- }
+ while (TRUE) {
+ spin_lock(&rwlock->guard);
+ if (rwlock->count >= 0)
+ break;
+ spin_unlock(&rwlock->guard);
+ }
rwlock->count++;
spin_unlock(&rwlock->guard);
}
void
-read_unlock(rwlock_t * rwlock)
+read_unlock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
-
- spin_lock(&rwlock->guard);
+
+ spin_lock(&rwlock->guard);
ASSERT(rwlock->count > 0);
- rwlock->count--;
- if (rwlock < 0) {
- cfs_enter_debugger();
- }
+ rwlock->count--;
+ if (rwlock < 0)
+ cfs_enter_debugger();
spin_unlock(&rwlock->guard);
- KeLowerIrql(slot->irql);
+ KeLowerIrql(slot->irql);
}
void
-write_lock(rwlock_t * rwlock)
+write_lock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
- while (TRUE) {
- spin_lock(&rwlock->guard);
- if (rwlock->count == 0)
- break;
- spin_unlock(&rwlock->guard);
- }
+ while (TRUE) {
+ spin_lock(&rwlock->guard);
+ if (rwlock->count == 0)
+ break;
+ spin_unlock(&rwlock->guard);
+ }
rwlock->count = -1;
spin_unlock(&rwlock->guard);
}
void
-write_unlock(rwlock_t * rwlock)
+write_unlock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
-
- spin_lock(&rwlock->guard);
+
+ spin_lock(&rwlock->guard);
ASSERT(rwlock->count == -1);
- rwlock->count = 0;
+ rwlock->count = 0;
spin_unlock(&rwlock->guard);
- KeLowerIrql(slot->irql);
+ KeLowerIrql(slot->irql);
}