kernel: atomic_*() functions rework

* No need for the atomically changed variables to be declared as
   volatile.
 * Drop support for atomically getting and setting unaligned data.
 * Introduce atomic_get_and_set[64]() which works the same as
   atomic_set[64]() used to. atomic_set[64]() does not return the
   previous value anymore.
This commit is contained in:
Pawel Dziepak 2013-11-05 22:32:59 +01:00
parent e7dba861fd
commit 077c84eb27
47 changed files with 360 additions and 253 deletions

View File

@ -118,17 +118,19 @@ struct media_node;
extern "C" {
#endif
extern int32 atomic_set(vint32 *value, int32 newValue);
extern int32 atomic_test_and_set(vint32 *value, int32 newValue,
extern void atomic_set(int32* value, int32 newValue);
extern int32 atomic_get_and_set(int32* value, int32 newValue);
extern int32 atomic_test_and_set(int32 *value, int32 newValue,
int32 testAgainst);
extern int32 atomic_get(vint32 *value);
extern int64 atomic_set64(vint64 *value, int64 newValue);
extern int64 atomic_test_and_set64(vint64 *value, int64 newValue,
extern int32 atomic_get(int32 *value);
extern void atomic_set64(int64* value, int64 newValue);
extern int64 atomic_get_and_set64(int64* value, int64 newValue);
extern int64 atomic_test_and_set64(int64 *value, int64 newValue,
int64 testAgainst);
extern int64 atomic_get64(vint64 *value);
extern int64 atomic_add64(vint64 *value, int64 addValue);
extern int64 atomic_and64(vint64 *value, int64 andValue);
extern int64 atomic_or64(vint64 *value, int64 orValue);
extern int64 atomic_get64(int64 *value);
extern int64 atomic_add64(int64 *value, int64 addValue);
extern int64 atomic_and64(int64 *value, int64 andValue);
extern int64 atomic_or64(int64 *value, int64 orValue);
extern size_t strnlen(const char *string, size_t count);

View File

@ -20,9 +20,9 @@ typedef ulong cpu_status;
#if B_DEBUG_SPINLOCK_CONTENTION
typedef struct {
vint32 lock;
vint32 count_low;
vint32 count_high;
int32 lock;
int32 count_low;
int32 count_high;
} spinlock;
# define B_SPINLOCK_INITIALIZER { 0, 0, 0 }
@ -33,7 +33,7 @@ typedef ulong cpu_status;
} while (false)
# define B_SPINLOCK_IS_LOCKED(spinlock) ((spinlock)->lock > 0)
#else
typedef vint32 spinlock;
typedef int32 spinlock;
# define B_SPINLOCK_INITIALIZER 0
# define B_INITIALIZE_SPINLOCK(lock) do { *(lock) = 0; } while (false)

View File

@ -396,8 +396,8 @@ private:
int32 withLength);
private:
vint32& _ReferenceCount();
const vint32& _ReferenceCount() const;
int32& _ReferenceCount();
const int32& _ReferenceCount() const;
bool _IsShareable() const;
void _FreePrivateData();

View File

@ -196,19 +196,21 @@ extern "C" {
#endif
/* Atomic functions; previous value is returned */
extern int32 atomic_set(vint32 *value, int32 newValue);
extern int32 atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst);
extern int32 atomic_add(vint32 *value, int32 addValue);
extern int32 atomic_and(vint32 *value, int32 andValue);
extern int32 atomic_or(vint32 *value, int32 orValue);
extern int32 atomic_get(vint32 *value);
extern void atomic_set(int32* value, int32 newValue);
extern int32 atomic_get_and_set(int32* value, int32 newValue);
extern int32 atomic_test_and_set(int32 *value, int32 newValue, int32 testAgainst);
extern int32 atomic_add(int32 *value, int32 addValue);
extern int32 atomic_and(int32 *value, int32 andValue);
extern int32 atomic_or(int32 *value, int32 orValue);
extern int32 atomic_get(int32 *value);
extern int64 atomic_set64(vint64 *value, int64 newValue);
extern int64 atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst);
extern int64 atomic_add64(vint64 *value, int64 addValue);
extern int64 atomic_and64(vint64 *value, int64 andValue);
extern int64 atomic_or64(vint64 *value, int64 orValue);
extern int64 atomic_get64(vint64 *value);
extern void atomic_set64(int64* value, int64 newValue);
extern int64 atomic_get_and_set64(int64* value, int64 newValue);
extern int64 atomic_test_and_set64(int64 *value, int64 newValue, int64 testAgainst);
extern int64 atomic_add64(int64 *value, int64 addValue);
extern int64 atomic_and64(int64 *value, int64 andValue);
extern int64 atomic_or64(int64 *value, int64 orValue);
extern int64 atomic_get64(int64 *value);
/* Other stuff */
extern void* get_stack_frame(void);
@ -240,15 +242,6 @@ extern void* get_stack_frame(void);
__sync_fetch_and_and(valuePointer, andValue)
#define atomic_or(valuePointer, orValue) \
__sync_fetch_and_or(valuePointer, orValue)
#define atomic_get(valuePointer) \
__sync_fetch_and_or(valuePointer, 0)
// No equivalent to atomic_get(). We simulate it via atomic or. On most
// (all?) 32+ bit architectures aligned 32 bit reads will be atomic anyway,
// though.
// Note: No equivalent for atomic_set(). It could be simulated by a
// get + atomic test and set loop, but calling the atomic_set() implementation
// might be faster.
#endif // B_USE_BUILTIN_ATOMIC_FUNCTIONS && __GNUC__ >= 4

View File

@ -30,7 +30,7 @@
typedef uint32_t bus_addr_t;
typedef uint32_t bus_size_t;
#define atomic_readandclear_int(ptr) atomic_set((int32 *)(ptr), 0)
#define atomic_readandclear_int(ptr) atomic_get_and_set((int32*)(ptr), 0)
#define atomic_set_int(ptr, value) atomic_or((int32 *)(ptr), value)
#define mtx_lock mutex_lock

View File

@ -50,6 +50,7 @@
// #pragma mark - fssh_atomic.h
#define atomic_set fssh_atomic_set
#define atomic_get_and_Set fssh_atomic_get_and_set
#define atomic_test_and_set fssh_atomic_test_and_set
#define atomic_add fssh_atomic_add
#define atomic_and fssh_atomic_and

View File

@ -15,21 +15,23 @@ extern "C" {
#endif
int32_t fssh_atomic_set(vint32_t *value, int32_t newValue);
int32_t fssh_atomic_test_and_set(vint32_t *value, int32_t newValue,
void fssh_atomic_set(int32_t* value, int32_t newValue);
int32_t fssh_atomic_get_and_set(int32_t* value, int32_t newValue);
int32_t fssh_atomic_test_and_set(int32_t *value, int32_t newValue,
int32_t testAgainst);
int32_t fssh_atomic_add(vint32_t *value, int32_t addValue);
int32_t fssh_atomic_and(vint32_t *value, int32_t andValue);
int32_t fssh_atomic_or(vint32_t *value, int32_t orValue);
int32_t fssh_atomic_get(vint32_t *value);
int32_t fssh_atomic_add(int32_t *value, int32_t addValue);
int32_t fssh_atomic_and(int32_t *value, int32_t andValue);
int32_t fssh_atomic_or(int32_t *value, int32_t orValue);
int32_t fssh_atomic_get(int32_t *value);
int64_t fssh_atomic_set64(vint64_t *value, int64_t newValue);
int64_t fssh_atomic_test_and_set64(vint64_t *value, int64_t newValue,
void fssh_atomic_set64(int64_t* value, int64_t newValue);
int64_t fssh_atomic_get_and_set64(int64_t* value, int64_t newValue);
int64_t fssh_atomic_test_and_set64(int64_t *value, int64_t newValue,
int64_t testAgainst);
int64_t fssh_atomic_add64(vint64_t *value, int64_t addValue);
int64_t fssh_atomic_and64(vint64_t *value, int64_t andValue);
int64_t fssh_atomic_or64(vint64_t *value, int64_t orValue);
int64_t fssh_atomic_get64(vint64_t *value);
int64_t fssh_atomic_add64(int64_t *value, int64_t addValue);
int64_t fssh_atomic_and64(int64_t *value, int64_t andValue);
int64_t fssh_atomic_or64(int64_t *value, int64_t orValue);
int64_t fssh_atomic_get64(int64_t *value);
#ifdef __cplusplus
}

View File

@ -47,7 +47,7 @@ status_t smp_per_cpu_init(struct kernel_args *args, int32 cpu);
status_t smp_init_post_generic_syscalls(void);
bool smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous);
void smp_wake_up_non_boot_cpus(void);
void smp_cpu_rendezvous(volatile uint32 *var, int current_cpu);
void smp_cpu_rendezvous(uint32 *var, int current_cpu);
void smp_send_ici(int32 targetCPU, int32 message, addr_t data, addr_t data2, addr_t data3,
void *data_ptr, uint32 flags);
void smp_send_multicast_ici(cpu_mask_t cpuMask, int32 message, addr_t data,
@ -107,7 +107,7 @@ static inline bool
try_acquire_write_seqlock_inline(seqlock* lock) {
bool succeed = try_acquire_spinlock(&lock->lock);
if (succeed)
atomic_add(&lock->count, 1);
atomic_add((int32*)&lock->count, 1);
return succeed;
}
@ -115,26 +115,26 @@ try_acquire_write_seqlock_inline(seqlock* lock) {
static inline void
acquire_write_seqlock_inline(seqlock* lock) {
acquire_spinlock(&lock->lock);
atomic_add(&lock->count, 1);
atomic_add((int32*)&lock->count, 1);
}
static inline void
release_write_seqlock_inline(seqlock* lock) {
atomic_add(&lock->count, 1);
atomic_add((int32*)&lock->count, 1);
release_spinlock(&lock->lock);
}
static inline uint32
acquire_read_seqlock_inline(seqlock* lock) {
return atomic_get(&lock->count);
return atomic_get((int32*)&lock->count);
}
static inline bool
release_read_seqlock_inline(seqlock* lock, uint32 count) {
uint32 current = atomic_get(&lock->count);
uint32 current = atomic_get((int32*)&lock->count);
return count % 2 == 0 && current == count;
}

View File

@ -18,22 +18,34 @@ atomic_pointer_test_and_set(PointerType** _pointer, const PointerType* set,
const PointerType* test)
{
#if LONG_MAX == INT_MAX
return (PointerType*)atomic_test_and_set((vint32*)_pointer, (int32)set,
return (PointerType*)atomic_test_and_set((int32*)_pointer, (int32)set,
(int32)test);
#else
return (PointerType*)atomic_test_and_set64((vint64*)_pointer, (int64)set,
return (PointerType*)atomic_test_and_set64((int64*)_pointer, (int64)set,
(int64)test);
#endif
}
template<typename PointerType> PointerType*
atomic_pointer_set(PointerType** _pointer, const PointerType* set)
atomic_pointer_get_and_set(PointerType** _pointer, const PointerType* set)
{
#if LONG_MAX == INT_MAX
return (PointerType*)atomic_set((vint32*)_pointer, (int32)set);
return (PointerType*)atomic_get_and_set((int32*)_pointer, (int32)set);
#else
return (PointerType*)atomic_set64((vint64*)_pointer, (int64)set);
return (PointerType*)atomic_get_and_set64((int64*)_pointer, (int64)set);
#endif
}
template<typename PointerType> void
atomic_pointer_set(PointerType** _pointer, const PointerType* set)
{
ASSERT((addr_t(_pointer) & (sizeof(PointerType*) - 1)) == 0);
#if LONG_MAX == INT_MAX
atomic_set((int32*)_pointer, (int32)set);
#else
atomic_set64((int64*)_pointer, (int64)set);
#endif
}
@ -41,10 +53,11 @@ atomic_pointer_set(PointerType** _pointer, const PointerType* set)
template<typename PointerType> PointerType*
atomic_pointer_get(PointerType** _pointer)
{
ASSERT((addr_t(_pointer) & (sizeof(PointerType*) - 1)) == 0);
#if LONG_MAX == INT_MAX
return (PointerType*)atomic_get((vint32*)_pointer);
return (PointerType*)atomic_get((int32*)_pointer);
#else
return (PointerType*)atomic_get64((vint64*)_pointer);
return (PointerType*)atomic_get64((int64*)_pointer);
#endif
}

View File

@ -50,12 +50,12 @@ typedef struct _pthread_rwlockattr {
typedef void (*pthread_key_destructor)(void *data);
struct pthread_key {
vint32 sequence;
int32 sequence;
pthread_key_destructor destructor;
};
struct pthread_key_data {
vint32 sequence;
int32 sequence;
void *value;
};

View File

@ -31,7 +31,7 @@ private:
private:
BDirectory fBaseDirectory;
vint32 fNextNumber;
int32 fNextNumber;
};

View File

@ -30,7 +30,7 @@ protected:
virtual void LastReferenceReleased();
protected:
vint32 fReferenceCount;
int32 fReferenceCount;
};

View File

@ -30,7 +30,7 @@ public:
void GetUnchecked();
private:
vint32 fUseCount;
int32 fUseCount;
BWeakReferenceable* fObject;
};

View File

@ -71,7 +71,7 @@ int32 recursive_lock_get_recursion(recursive_lock *lock);
#define INIT_ONCE_UNINITIALIZED -1
#define INIT_ONCE_INITIALIZED -4
status_t __init_once(vint32* control, status_t (*initRoutine)(void*),
status_t __init_once(int32* control, status_t (*initRoutine)(void*),
void* data);
#ifdef __cplusplus

View File

@ -32,12 +32,12 @@ public:
return fString._IsShareable();
}
static vint32& DataRefCount(char* data)
static int32& DataRefCount(char* data)
{
return *(((int32 *)data) - 2);
}
vint32& DataRefCount()
int32& DataRefCount()
{
return DataRefCount(Data());
}

View File

@ -28,7 +28,7 @@ struct ps2_dev {
bool active;
uint8 idx;
sem_id result_sem;
vint32 flags;
int32 flags;
uint8 * result_buf;
int result_buf_idx;
int result_buf_cnt;

View File

@ -264,7 +264,7 @@ HIDDevice::MaybeScheduleTransfer()
if (fRemoved)
return B_ERROR;
if (atomic_set(&fTransferScheduled, 1) != 0) {
if (atomic_get_and_set(&fTransferScheduled, 1) != 0) {
// someone else already caused a transfer to be scheduled
return B_OK;
}

View File

@ -33,7 +33,7 @@ struct battery_driver_cookie {
struct battery_device_cookie {
battery_driver_cookie* driver_cookie;
vint32 stop_watching;
int32 stop_watching;
};

View File

@ -35,7 +35,7 @@
static device_manager_info *sDeviceManager;
static ConditionVariable sFrequencyCondition;
static vint32 sCurrentID;
static int32 sCurrentID;
static status_t

View File

@ -22,7 +22,7 @@ struct est_cookie {
freq_info* available_states;
uint8 number_states;
vint32 stop_watching;
int32 stop_watching;
};

View File

@ -109,7 +109,7 @@ private:
RPC::Server* fServer;
vint64 fId;
int64 fId;
dev_t fDevId;
InodeIdMap fInoIdMap;

View File

@ -187,7 +187,7 @@ put_request_buffer(arp_entry* entry, net_buffer* buffer)
static void
delete_request_buffer(arp_entry* entry)
{
net_buffer* buffer = atomic_pointer_set(&entry->request_buffer,
net_buffer* buffer = atomic_pointer_get_and_set(&entry->request_buffer,
kDeletedBuffer);
if (buffer != NULL && buffer != kDeletedBuffer)
gBufferModule->free(buffer);

View File

@ -251,7 +251,7 @@ put_request_buffer(ndp_entry* entry, net_buffer* buffer)
static void
delete_request_buffer(ndp_entry* entry)
{
net_buffer* buffer = atomic_pointer_set(&entry->request_buffer,
net_buffer* buffer = atomic_pointer_get_and_set(&entry->request_buffer,
kDeletedBuffer);
if (buffer != NULL && buffer != kDeletedBuffer)
gBufferModule->free(buffer);

View File

@ -166,12 +166,12 @@ static status_t read_data(net_buffer* _buffer, size_t offset, void* data,
#if ENABLE_STATS
static vint32 sAllocatedDataHeaderCount = 0;
static vint32 sAllocatedNetBufferCount = 0;
static vint32 sEverAllocatedDataHeaderCount = 0;
static vint32 sEverAllocatedNetBufferCount = 0;
static vint32 sMaxAllocatedDataHeaderCount = 0;
static vint32 sMaxAllocatedNetBufferCount = 0;
static int32 sAllocatedDataHeaderCount = 0;
static int32 sAllocatedNetBufferCount = 0;
static int32 sEverAllocatedDataHeaderCount = 0;
static int32 sEverAllocatedNetBufferCount = 0;
static int32 sMaxAllocatedDataHeaderCount = 0;
static int32 sMaxAllocatedNetBufferCount = 0;
#endif

View File

@ -90,7 +90,7 @@ private:
ExpungeHandler fExpungeHandler;
FlagsHandler fFlagsHandler;
vint32 fWatching;
int32 fWatching;
BString fSelectedMailbox;

View File

@ -120,7 +120,7 @@ private:
BString fCommandError;
vint32 fStopNow;
int32 fStopNow;
bool fIsConnected;
};

View File

@ -76,7 +76,7 @@ public:
void RemoveAllDataSources();
bigtime_t RefreshInterval() const
{ return atomic_get64((vint64*)&fRefreshInterval); }
{ return atomic_get64((int64*)&fRefreshInterval); }
protected:
virtual void AttachedToWindow();

View File

@ -67,7 +67,7 @@ public:
protected:
virtual void _WatchPowerStatus() = 0;
vint32 fIsWatching;
int32 fIsWatching;
sem_id fWaitSem;
private:

View File

@ -6,8 +6,15 @@
#include <SupportDefs.h>
void
atomic_set(int32 *value, int32 newValue)
{
*value = newValue;
}
int32
atomic_set(vint32 *value, int32 newValue)
atomic_get_and_set(int32 *value, int32 newValue)
{
int32 oldValue = *value;
*value = newValue;
@ -16,7 +23,7 @@ atomic_set(vint32 *value, int32 newValue)
int32
atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst)
atomic_test_and_set(int32 *value, int32 newValue, int32 testAgainst)
{
int32 oldValue = *value;
if (oldValue == testAgainst)
@ -26,7 +33,7 @@ atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst)
int32
atomic_add(vint32 *value, int32 addValue)
atomic_add(int32 *value, int32 addValue)
{
int32 oldValue = *value;
*value += addValue;
@ -35,7 +42,7 @@ atomic_add(vint32 *value, int32 addValue)
int32
atomic_and(vint32 *value, int32 andValue)
atomic_and(int32 *value, int32 andValue)
{
int32 oldValue = *value;
*value &= andValue;
@ -44,7 +51,7 @@ atomic_and(vint32 *value, int32 andValue)
int32
atomic_or(vint32 *value, int32 orValue)
atomic_or(int32 *value, int32 orValue)
{
int32 oldValue = *value;
*value |= orValue;
@ -53,14 +60,21 @@ atomic_or(vint32 *value, int32 orValue)
int32
atomic_get(vint32 *value)
atomic_get(int32 *value)
{
return *value;
}
void
atomic_set64(int64 *value, int64 newValue)
{
*value = newValue;
}
int64
atomic_set64(vint64 *value, int64 newValue)
atomic_get_and_set64(int64 *value, int64 newValue)
{
int64 oldValue = *value;
*value = newValue;
@ -68,7 +82,7 @@ atomic_set64(vint64 *value, int64 newValue)
}
int64
atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
atomic_test_and_set64(int64 *value, int64 newValue, int64 testAgainst)
{
int64 oldValue = *value;
if (oldValue == testAgainst)
@ -77,7 +91,7 @@ atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
}
int64
atomic_add64(vint64 *value, int64 addValue)
atomic_add64(int64 *value, int64 addValue)
{
int64 oldValue = *value;
*value += addValue;
@ -85,7 +99,7 @@ atomic_add64(vint64 *value, int64 addValue)
}
int64
atomic_and64(vint64 *value, int64 andValue)
atomic_and64(int64 *value, int64 andValue)
{
int64 oldValue = *value;
*value &= andValue;
@ -93,7 +107,7 @@ atomic_and64(vint64 *value, int64 andValue)
}
int64
atomic_or64(vint64 *value, int64 orValue)
atomic_or64(int64 *value, int64 orValue)
{
int64 oldValue = *value;
*value |= orValue;
@ -101,7 +115,7 @@ atomic_or64(vint64 *value, int64 orValue)
}
int64
atomic_get64(vint64 *value)
atomic_get64(int64 *value)
{
return *value;
}

View File

@ -183,14 +183,14 @@ BStringRef::operator&()
// #pragma mark - BString
inline vint32&
inline int32&
BString::_ReferenceCount()
{
return Private::DataRefCount(fPrivateData);
}
inline const vint32&
inline const int32&
BString::_ReferenceCount() const
{
return Private::DataRefCount(fPrivateData);

View File

@ -33,7 +33,7 @@ struct SignalEvent::EventSignal : Signal {
bool MarkUsed()
{
return atomic_set(&fInUse, 1) != 0;
return atomic_get_and_set(&fInUse, 1) != 0;
}
void SetUnused()
@ -216,7 +216,7 @@ CreateThreadEvent::Create(const ThreadCreationAttributes& attributes)
status_t
CreateThreadEvent::Fire()
{
bool wasPending = atomic_set(&fPendingDPC, 1) != 0;
bool wasPending = atomic_get_and_set(&fPendingDPC, 1) != 0;
if (wasPending)
return B_BUSY;

View File

@ -205,7 +205,7 @@ set_mtrr(void* _parameter, int cpu)
// sCpuRendezvous2 before the last CPU has actually left the loop in
// smp_cpu_rendezvous();
if (cpu == 0)
atomic_set((vint32*)&sCpuRendezvous3, 0);
atomic_set((int32*)&sCpuRendezvous3, 0);
disable_caches();
@ -233,7 +233,7 @@ set_mtrrs(void* _parameter, int cpu)
// sCpuRendezvous2 before the last CPU has actually left the loop in
// smp_cpu_rendezvous();
if (cpu == 0)
atomic_set((vint32*)&sCpuRendezvous3, 0);
atomic_set((int32*)&sCpuRendezvous3, 0);
disable_caches();
@ -259,7 +259,7 @@ init_mtrrs(void* _unused, int cpu)
// sCpuRendezvous2 before the last CPU has actually left the loop in
// smp_cpu_rendezvous();
if (cpu == 0)
atomic_set((vint32*)&sCpuRendezvous3, 0);
atomic_set((int32*)&sCpuRendezvous3, 0);
disable_caches();

View File

@ -108,7 +108,7 @@ X86PagingMethod32Bit::Method()
X86PagingMethod32Bit::SetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry)
{
return atomic_set((int32*)entry, newEntry);
return atomic_get_and_set((int32*)entry, newEntry);
}

View File

@ -116,7 +116,7 @@ X86PagingMethod64Bit::Method()
/*static*/ inline uint64
X86PagingMethod64Bit::SetTableEntry(uint64* entry, uint64 newEntry)
{
return atomic_set64((int64*)entry, newEntry);
return atomic_get_and_set64((int64*)entry, newEntry);
}

View File

@ -158,7 +158,7 @@ X86PagingMethodPAE::PageDirEntryForAddress(
X86PagingMethodPAE::SetPageTableEntry(pae_page_table_entry* entry,
pae_page_table_entry newEntry)
{
return atomic_set64((int64*)entry, newEntry);
return atomic_get_and_set64((int64*)entry, newEntry);
}

View File

@ -165,7 +165,8 @@ EntryCache::Lookup(ino_t dirID, const char* name, ino_t& _nodeID)
if (entry == NULL)
return false;
int32 oldGeneration = atomic_set(&entry->generation, fCurrentGeneration);
int32 oldGeneration = atomic_get_and_set(&entry->generation,
fCurrentGeneration);
if (oldGeneration == fCurrentGeneration || entry->index < 0) {
// The entry is already in the current generation or is being moved to
// it by another thread.

View File

@ -34,14 +34,14 @@ struct EntryCacheEntry {
EntryCacheEntry* hash_link;
ino_t node_id;
ino_t dir_id;
vint32 generation;
vint32 index;
int32 generation;
int32 index;
char name[1];
};
struct EntryCacheGeneration {
vint32 next_index;
int32 next_index;
EntryCacheEntry** entries;
EntryCacheGeneration();

View File

@ -30,15 +30,15 @@ const static uint32 kMaxUnusedVnodes = 8192;
*/
static mutex sUnusedVnodesLock = MUTEX_INITIALIZER("unused vnodes");
static list sUnusedVnodeList;
static vuint32 sUnusedVnodes = 0;
static uint32 sUnusedVnodes = 0;
static const int32 kMaxHotVnodes = 1024;
static rw_lock sHotVnodesLock = RW_LOCK_INITIALIZER("hot vnodes");
static Vnode* sHotVnodes[kMaxHotVnodes];
static vint32 sNextHotVnodeIndex = 0;
static int32 sNextHotVnodeIndex = 0;
static const int32 kUnusedVnodesCheckInterval = 64;
static vint32 sUnusedVnodesCheckCount = 0;
static int32 sUnusedVnodesCheckCount = 0;
/*! Must be called with sHotVnodesLock write-locked.
@ -48,7 +48,7 @@ flush_hot_vnodes_locked()
{
MutexLocker unusedLocker(sUnusedVnodesLock);
int32 count = std::min((int32)sNextHotVnodeIndex, kMaxHotVnodes);
int32 count = std::min(sNextHotVnodeIndex, kMaxHotVnodes);
for (int32 i = 0; i < count; i++) {
Vnode* vnode = sHotVnodes[i];
if (vnode == NULL)
@ -87,7 +87,7 @@ vnode_unused(Vnode* vnode)
bool result = false;
int32 checkCount = atomic_add(&sUnusedVnodesCheckCount, 1);
if (checkCount == kUnusedVnodesCheckInterval) {
uint32 unusedCount = sUnusedVnodes;
uint32 unusedCount = atomic_get((int32*)&sUnusedVnodes);
if (unusedCount > kMaxUnusedVnodes
&& low_resource_state(
B_KERNEL_RESOURCE_PAGES | B_KERNEL_RESOURCE_MEMORY)
@ -164,7 +164,7 @@ vnode_to_be_freed(Vnode* vnode)
if (vnode->IsHot()) {
// node is hot -- remove it from the array
// TODO: Maybe better completely flush the array while at it?
int32 count = sNextHotVnodeIndex;
int32 count = atomic_get(&sNextHotVnodeIndex);
count = std::min(count, kMaxHotVnodes);
for (int32 i = 0; i < count; i++) {
if (sHotVnodes[i] == vnode) {

View File

@ -385,8 +385,8 @@ static mutex sIpcLock;
static mutex sXsiMessageQueueLock;
static uint32 sGlobalSequenceNumber = 1;
static vint32 sXsiMessageCount = 0;
static vint32 sXsiMessageQueueCount = 0;
static int32 sXsiMessageCount = 0;
static int32 sXsiMessageQueueCount = 0;
// #pragma mark -
@ -690,7 +690,7 @@ _user_xsi_msgget(key_t key, int flags)
if (create) {
// Create a new message queue for this key
if (sXsiMessageQueueCount >= MAX_XSI_MESSAGE_QUEUE) {
if (atomic_get(&sXsiMessageQueueCount) >= MAX_XSI_MESSAGE_QUEUE) {
TRACE_ERROR(("xsi_msgget: reached limit of maximun number of "
"message queues\n"));
return ENOSPC;

View File

@ -79,9 +79,9 @@ enum mailbox_source {
MAILBOX_BCAST,
};
static vint32 sBootCPUSpin = 0;
static int32 sBootCPUSpin = 0;
static vint32 sEarlyCPUCall = 0;
static int32 sEarlyCPUCall = 0;
static void (*sEarlyCPUCallFunction)(void*, int);
void* sEarlyCPUCallCookie;
@ -109,7 +109,7 @@ static struct {
spinlock *lock;
} sLastCaller[NUM_LAST_CALLERS];
static vint32 sLastIndex = 0;
static int32 sLastIndex = 0;
// Is incremented atomically. Must be % NUM_LAST_CALLERS before being used
// as index into sLastCaller. Note, that it has to be casted to uint32
// before applying the modulo operation, since otherwise after overflowing
@ -129,7 +129,7 @@ push_lock_caller(void* caller, spinlock* lock)
static void*
find_lock_caller(spinlock* lock)
{
int32 lastIndex = (uint32)sLastIndex % NUM_LAST_CALLERS;
int32 lastIndex = (uint32)atomic_get(&sLastIndex) % NUM_LAST_CALLERS;
for (int32 i = 0; i < NUM_LAST_CALLERS; i++) {
int32 index = (NUM_LAST_CALLERS + lastIndex - 1 - i) % NUM_LAST_CALLERS;
@ -522,7 +522,7 @@ bool
try_acquire_write_seqlock(seqlock* lock) {
bool succeed = try_acquire_spinlock(&lock->lock);
if (succeed)
atomic_add(&lock->count, 1);
atomic_add((int32*)&lock->count, 1);
return succeed;
}
@ -530,26 +530,26 @@ try_acquire_write_seqlock(seqlock* lock) {
void
acquire_write_seqlock(seqlock* lock) {
acquire_spinlock(&lock->lock);
atomic_add(&lock->count, 1);
atomic_add((int32*)&lock->count, 1);
}
void
release_write_seqlock(seqlock* lock) {
atomic_add(&lock->count, 1);
atomic_add((int32*)&lock->count, 1);
release_spinlock(&lock->lock);
}
uint32
acquire_read_seqlock(seqlock* lock) {
return atomic_get(&lock->count);
return atomic_get((int32*)&lock->count);
}
bool
release_read_seqlock(seqlock* lock, uint32 count) {
uint32 current = atomic_get(&lock->count);
uint32 current = atomic_get((int32*)&lock->count);
if (count % 2 == 1 || current != count) {
PAUSE();
@ -870,10 +870,10 @@ call_all_cpus_early(void (*function)(void*, int), void* cookie)
uint32 cpuMask = (1 << sNumCPUs) - 2;
// all CPUs but the boot cpu
sEarlyCPUCall = cpuMask;
atomic_set(&sEarlyCPUCall, cpuMask);
// wait for all CPUs to finish
while ((sEarlyCPUCall & cpuMask) != 0)
while ((atomic_get(&sEarlyCPUCall) & cpuMask) != 0)
PAUSE();
}
@ -1166,8 +1166,8 @@ smp_trap_non_boot_cpus(int32 cpu, uint32* rendezVous)
smp_cpu_rendezvous(rendezVous, cpu);
while (sBootCPUSpin == 0) {
if ((sEarlyCPUCall & (1 << cpu)) != 0)
while (atomic_get(&sBootCPUSpin) == 0) {
if ((atomic_get(&sEarlyCPUCall) & (1 << cpu)) != 0)
process_early_cpu_call(cpu);
PAUSE();
@ -1185,7 +1185,7 @@ smp_wake_up_non_boot_cpus()
sICIEnabled = true;
// resume non boot CPUs
sBootCPUSpin = 1;
atomic_set(&sBootCPUSpin, 1);
}
@ -1200,11 +1200,12 @@ smp_wake_up_non_boot_cpus()
ensured via another rendez-vous) the variable can be reset.
*/
void
smp_cpu_rendezvous(volatile uint32* var, int current_cpu)
smp_cpu_rendezvous(uint32* var, int current_cpu)
{
atomic_or((vint32*)var, 1 << current_cpu);
atomic_or((int32*)var, 1 << current_cpu);
while (*var != (((uint32)1 << sNumCPUs) - 1))
uint32 allReady = ((uint32)1 << sNumCPUs) - 1;
while ((uint32)atomic_get((int32*)var) != allReady)
PAUSE();
}

View File

@ -1,4 +1,7 @@
/*
* Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
* Distributed under the terms of the MIT License.
*
** Copyright 2003, Marcus Overhagen. All rights reserved.
** Distributed under the terms of the OpenBeOS license.
**
@ -6,96 +9,133 @@
** Distributed under the terms of the NewOS License.
*/
#include <asm_defs.h>
.text
/* int32 atomic_set(vint32 *value, int32 newValue) */
/* void atomic_set(int32* value, int32 newValue) */
FUNCTION(atomic_set):
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 4(%esp), %edx
movl 8(%esp), %eax
lock
xchg %eax,(%edx)
addl $0, (%esp)
movl %eax, (%edx)
ret
FUNCTION_END(atomic_set)
/* int32 atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst) */
/* int32 atomic_get_and_set(int32* value, int32 newValue) */
FUNCTION(atomic_get_and_set):
movl 4(%esp), %edx
movl 8(%esp), %eax
xchg %eax, (%edx)
ret
FUNCTION_END(atomic_get_and_set)
/* int32 atomic_test_and_set(int32* value, int32 newValue,
int32 testAgainst) */
FUNCTION(atomic_test_and_set):
movl 4(%esp),%edx
movl 8(%esp),%ecx
movl 12(%esp),%eax
movl 4(%esp), %edx
movl 8(%esp), %ecx
movl 12(%esp), %eax
lock
cmpxchgl %ecx,(%edx)
cmpxchgl %ecx, (%edx)
ret
FUNCTION_END(atomic_test_and_set)
/* int32 atomic_add(vint32 *value, int32 addValue) */
/* int32 atomic_add(int32* value, int32 addValue) */
FUNCTION(atomic_add):
movl 4(%esp),%edx
movl 8(%esp),%eax
movl 4(%esp), %edx
movl 8(%esp), %eax
lock
xaddl %eax,(%edx)
xaddl %eax, (%edx)
ret
FUNCTION_END(atomic_add)
/* int32 atomic_and(vint32 *value, int32 andValue) */
/* int32 atomic_and(int32* value, int32 andValue) */
FUNCTION(atomic_and):
movl 4(%esp),%edx
_atomic_and1:
movl 8(%esp),%ecx
movl (%edx),%eax
andl %eax,%ecx
movl 4(%esp), %edx
1:
movl 8(%esp), %ecx
movl (%edx), %eax
andl %eax, %ecx
lock
cmpxchgl %ecx,(%edx)
jnz _atomic_and1
cmpxchgl %ecx, (%edx)
jnz 1b
ret
FUNCTION_END(atomic_and)
/* int32 atomic_or(vint32 *value, int32 orValue) */
/* int32 atomic_or(int32* value, int32 orValue) */
FUNCTION(atomic_or):
movl 4(%esp),%edx
_atomic_or1:
movl 8(%esp),%ecx
movl (%edx),%eax
orl %eax,%ecx
movl 4(%esp), %edx
1:
movl 8(%esp), %ecx
movl (%edx), %eax
orl %eax, %ecx
lock
cmpxchgl %ecx,(%edx)
jnz _atomic_or1
cmpxchgl %ecx, (%edx)
jnz 1b
ret
FUNCTION_END(atomic_or)
/* int32 atomic_get(vint32 *value) */
/* int32 atomic_get(int32* value) */
FUNCTION(atomic_get):
movl 4(%esp), %edx
_atomic_get1:
movl (%edx), %eax
movl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jnz _atomic_get1
addl $0, (%esp)
ret
FUNCTION_END(atomic_get)
/* int64 atomic_set64(vint64 *value, int64 newValue) */
/* void atomic_set64(int64* value, int64 newValue) */
FUNCTION(atomic_set64):
push %esi
push %ebx
movl 12(%esp), %esi /* value */
movl 16(%esp), %ebx /* newValue low */
movl 20(%esp), %ecx /* newValue high */
_atomic_set64_1:
1:
movl (%esi), %eax /* testAgainst low */
movl 4(%esi), %edx /* testAgainst high */
lock
cmpxchg8b (%esi)
jnz _atomic_set64_1
jnz 1b
pop %ebx
pop %esi
ret
FUNCTION_END(atomic_set64)
/* int64 atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst) */
/* void atomic_get_and_set64(int64* value, int64 newValue) */
FUNCTION(atomic_get_and_set64):
push %esi
push %ebx
movl 12(%esp), %esi /* value */
movl 16(%esp), %ebx /* newValue low */
movl 20(%esp), %ecx /* newValue high */
1:
movl (%esi), %eax /* testAgainst low */
movl 4(%esi), %edx /* testAgainst high */
lock
cmpxchg8b (%esi)
jnz 1b
pop %ebx
pop %esi
ret
FUNCTION_END(atomic_get_and_set64)
/* int64 atomic_test_and_set64(int64* value, int64 newValue,
int64 testAgainst) */
FUNCTION(atomic_test_and_set64):
push %esi
push %ebx
@ -111,12 +151,13 @@ FUNCTION(atomic_test_and_set64):
ret
FUNCTION_END(atomic_test_and_set64)
/* int64 atomic_add64(vint64 *value, int64 addValue) */
/* int64 atomic_add64(int64* value, int64 addValue) */
FUNCTION(atomic_add64):
push %esi
push %ebx
movl 12(%esp), %esi
_atomic_add64_1:
1:
movl (%esi), %eax
movl 4(%esi), %edx
movl %eax, %ebx
@ -125,18 +166,18 @@ _atomic_add64_1:
adcl 20(%esp), %ecx
lock
cmpxchg8b (%esi)
jnz _atomic_add64_1
jnz 1b
pop %ebx
pop %esi
ret
FUNCTION_END(atomic_add64)
/* int64 atomic_and64(vint64 *value, int64 andValue) */
/* int64 atomic_and64(int64* value, int64 andValue) */
FUNCTION(atomic_and64):
push %esi
push %ebx
movl 12(%esp), %esi
_atomic_and64_1:
1:
movl (%esi), %eax
movl 4(%esi), %edx
movl %eax, %ebx
@ -145,18 +186,19 @@ _atomic_and64_1:
andl 20(%esp), %ecx
lock
cmpxchg8b (%esi)
jnz _atomic_and64_1
jnz 1b
pop %ebx
pop %esi
ret
FUNCTION_END(atomic_and64)
/* int64 atomic_or64(vint64 *value, int64 orValue) */
/* int64 atomic_or64(int64* value, int64 orValue) */
FUNCTION(atomic_or64):
push %esi
push %ebx
movl 12(%esp), %esi
_atomic_or64_1:
1:
movl (%esi), %eax
movl 4(%esi), %edx
movl %eax, %ebx
@ -165,26 +207,28 @@ _atomic_or64_1:
orl 20(%esp), %ecx
lock
cmpxchg8b (%esi)
jnz _atomic_or64_1
jnz 1b
pop %ebx
pop %esi
ret
FUNCTION_END(atomic_or64)
/* int64 atomic_get64(vint64 *value) */
/* int64 atomic_get64(int64* value) */
FUNCTION(atomic_get64):
push %esi
push %ebx
movl 12(%esp), %esi
_atomic_get64_1:
1:
movl (%esi), %eax
movl 4(%esi), %edx
movl %eax, %ebx
movl %edx, %ecx
lock
cmpxchg8b (%esi)
jnz _atomic_get64_1
jnz 1b
pop %ebx
pop %esi
ret
FUNCTION_END(atomic_get64)

View File

@ -1,4 +1,5 @@
/*
* Copyright 2013, Paweł Dziepak, pdziepak@quarnos.org.
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
@ -9,15 +10,23 @@
.text
/* int32 atomic_set(vint32 *value, int32 newValue) */
/* int32 atomic_set(int32* value, int32 newValue) */
FUNCTION(atomic_set):
movl %esi, %eax
lock
xchgl %eax, (%rdi)
sfence
movl %esi, (%rdi)
ret
FUNCTION_END(atomic_set)
/* int32 atomic_test_and_set(vint32 *value, int32 newValue, int32 testAgainst) */
/* int32 atomic_get_and_set(int32* value, int32 newValue) */
FUNCTION(atomic_get_and_set):
xchgl %esi, (%rdi)
ret
FUNCTION_END(atomic_get_and_set)
/* int32 atomic_test_and_set(int32* value, int32 newValue, int32 testAgainst) */
FUNCTION(atomic_test_and_set):
movl %edx, %eax
lock
@ -25,15 +34,16 @@ FUNCTION(atomic_test_and_set):
ret
FUNCTION_END(atomic_test_and_set)
/* int32 atomic_add(vint32 *value, int32 addValue) */
/* int32 atomic_add(int32* value, int32 addValue) */
FUNCTION(atomic_add):
movl %esi, %eax
lock
xaddl %eax, (%rdi)
xaddl %esi, (%rdi)
ret
FUNCTION_END(atomic_add)
/* int32 atomic_and(vint32 *value, int32 andValue) */
/* int32 atomic_and(int32* value, int32 andValue) */
FUNCTION(atomic_and):
movl (%rdi), %eax
1: movl %eax, %edx
@ -46,7 +56,8 @@ FUNCTION(atomic_and):
ret
FUNCTION_END(atomic_and)
/* int32 atomic_or(vint32 *value, int32 orValue) */
/* int32 atomic_or(int32* value, int32 orValue) */
FUNCTION(atomic_or):
movl (%rdi), %eax
1: movl %eax, %edx
@ -59,24 +70,31 @@ FUNCTION(atomic_or):
ret
FUNCTION_END(atomic_or)
/* int32 atomic_get(vint32 *value) */
/* int32 atomic_get(int32* value) */
FUNCTION(atomic_get):
movl (%rdi), %eax
1: lock
cmpxchgl %eax, (%rdi)
jnz 1b
ret
lfence
FUNCTION_END(atomic_get)
/* int64 atomic_set64(vint64 *value, int64 newValue) */
/* int64 atomic_set64(int64* value, int64 newValue) */
FUNCTION(atomic_set64):
movq %rsi, %rax
lock
xchgq %rax, (%rdi)
sfence
movq %rsi, (%rdi)
ret
FUNCTION_END(atomic_set64)
/* int64 atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst) */
/* int64 atomic_get_and_set64(int64* value, int64 newValue) */
FUNCTION(atomic_get_and_set64):
xchgq %rsi, (%rdi)
ret
FUNCTION_END(atomic_get_and_set64)
/* int64 atomic_test_and_set64(int64* value, int64 newValue,
int64 testAgainst) */
FUNCTION(atomic_test_and_set64):
movq %rdx, %rax
lock
@ -84,7 +102,8 @@ FUNCTION(atomic_test_and_set64):
ret
FUNCTION_END(atomic_test_and_set64)
/* int64 atomic_add64(vint64 *value, int64 addValue) */
/* int64 atomic_add64(int64* value, int64 addValue) */
FUNCTION(atomic_add64):
movq %rsi, %rax
lock
@ -92,7 +111,8 @@ FUNCTION(atomic_add64):
ret
FUNCTION_END(atomic_add64)
/* int64 atomic_and64(vint64 *value, int64 andValue) */
/* int64 atomic_and64(int64* value, int64 andValue) */
FUNCTION(atomic_and64):
movq (%rdi), %rax
1: movq %rax, %rdx
@ -105,7 +125,8 @@ FUNCTION(atomic_and64):
ret
FUNCTION_END(atomic_and64)
/* int64 atomic_or64(vint64 *value, int64 orValue) */
/* int64 atomic_or64(int64* value, int64 orValue) */
FUNCTION(atomic_or64):
movq (%rdi), %rax
1: movq %rax, %rdx
@ -118,11 +139,10 @@ FUNCTION(atomic_or64):
ret
FUNCTION_END(atomic_or64)
/* int64 atomic_get64(vint64 *value) */
/* int64 atomic_get64(int64* value) */
FUNCTION(atomic_get64):
movq (%rdi), %rax
1: lock
cmpxchgq %rax, (%rdi)
jnz 1b
ret
lfence
FUNCTION_END(atomic_get64)

View File

@ -16,7 +16,7 @@ enum {
status_t
__init_once(vint32* control, status_t (*initRoutine)(void*), void* data)
__init_once(int32* control, status_t (*initRoutine)(void*), void* data)
{
// Algorithm:
// The control variable goes through at most four states:
@ -41,7 +41,7 @@ __init_once(vint32* control, status_t (*initRoutine)(void*), void* data)
// we're the first -- perform the initialization
initRoutine(data);
value = atomic_set(control, STATE_INITIALIZED);
value = atomic_get_and_set(control, STATE_INITIALIZED);
// If someone else is waiting, we need to delete the semaphore.
if (value >= 0)

View File

@ -106,7 +106,7 @@ pthread_key_delete(pthread_key_t key)
if (key < 0 || key >= PTHREAD_KEYS_MAX)
return EINVAL;
int32 sequence = atomic_set(&sKeyTable[key].sequence,
int32 sequence = atomic_get_and_set(&sKeyTable[key].sequence,
PTHREAD_UNUSED_SEQUENCE);
if (sequence == PTHREAD_UNUSED_SEQUENCE)
return EINVAL;

View File

@ -26,7 +26,8 @@ init_function_canceled(void* data)
pthread_once_t* onceControl = (pthread_once_t*)data;
// reset the control state to uninitialized
int32 value = atomic_set((vint32*)&onceControl->state, STATE_UNINITIALIZED);
int32 value = atomic_get_and_set((int32*)&onceControl->state,
STATE_UNINITIALIZED);
// If someone has set a semaphore, delete it.
if (value >= 0)
@ -66,7 +67,8 @@ pthread_once(pthread_once_t* onceControl, void (*initRoutine)(void))
initRoutine();
pthread_cleanup_pop(false);
value = atomic_set((vint32*)&onceControl->state, STATE_INITIALIZED);
value = atomic_get_and_set((int32*)&onceControl->state,
STATE_INITIALIZED);
// If someone else is waiting, we need to delete the semaphore.
if (value >= 0)
@ -105,7 +107,7 @@ pthread_once(pthread_once_t* onceControl, void (*initRoutine)(void))
return 0;
} else if (value == STATE_SPINNING) {
// out of semaphores -- spin
while (atomic_get((vint32*)&onceControl->state) == STATE_SPINNING);
while (atomic_get((int32*)&onceControl->state) == STATE_SPINNING);
}
}
}

View File

@ -10,86 +10,100 @@
#include "fssh_atomic.h"
int32_t
fssh_atomic_set(vint32_t *value, int32_t newValue)
void
fssh_atomic_set(int32_t* value, int32_t newValue)
{
return atomic_set((vint32*)value, newValue);
atomic_set((int32*)value, newValue);
}
int32_t
fssh_atomic_test_and_set(vint32_t *value, int32_t newValue, int32_t testAgainst)
fssh_atomic_get_and_set(int32_t* value, int32_t newValue)
{
return atomic_test_and_set((vint32*)value, newValue, testAgainst);
return atomic_get_and_set((int32*)value, newValue);
}
int32_t
fssh_atomic_add(vint32_t *value, int32_t addValue)
fssh_atomic_test_and_set(int32_t *value, int32_t newValue, int32_t testAgainst)
{
return atomic_add((vint32*)value, addValue);
return atomic_test_and_set((int32*)value, newValue, testAgainst);
}
int32_t
fssh_atomic_and(vint32_t *value, int32_t andValue)
fssh_atomic_add(int32_t *value, int32_t addValue)
{
return atomic_and((vint32*)value, andValue);
return atomic_add((int32*)value, addValue);
}
int32_t
fssh_atomic_or(vint32_t *value, int32_t orValue)
fssh_atomic_and(int32_t *value, int32_t andValue)
{
return atomic_or((vint32*)value, orValue);
return atomic_and((int32*)value, andValue);
}
int32_t
fssh_atomic_get(vint32_t *value)
fssh_atomic_or(int32_t *value, int32_t orValue)
{
return atomic_get((vint32*)value);
return atomic_or((int32*)value, orValue);
}
int32_t
fssh_atomic_get(int32_t *value)
{
return atomic_get((int32*)value);
}
void
fssh_atomic_set64(int64_t *value, int64_t newValue)
{
atomic_set64((int64*)value, newValue);
}
int64_t
fssh_atomic_set64(vint64_t *value, int64_t newValue)
fssh_atomic_get_and_set64(int64_t* value, int64_t newValue)
{
return atomic_set64((vint64*)value, newValue);
return atomic_get_and_set64((int64*)value, newValue);
}
int64_t
fssh_atomic_test_and_set64(vint64_t *value, int64_t newValue, int64_t testAgainst)
fssh_atomic_test_and_set64(int64_t *value, int64_t newValue, int64_t testAgainst)
{
return atomic_test_and_set64((vint64 *)value, newValue, testAgainst);
return atomic_test_and_set64((int64 *)value, newValue, testAgainst);
}
int64_t
fssh_atomic_add64(vint64_t *value, int64_t addValue)
fssh_atomic_add64(int64_t *value, int64_t addValue)
{
return atomic_add64((vint64*)value, addValue);
return atomic_add64((int64*)value, addValue);
}
int64_t
fssh_atomic_and64(vint64_t *value, int64_t andValue)
fssh_atomic_and64(int64_t *value, int64_t andValue)
{
return atomic_and64((vint64*)value, andValue);
return atomic_and64((int64*)value, andValue);
}
int64_t
fssh_atomic_or64(vint64_t *value, int64_t orValue)
fssh_atomic_or64(int64_t *value, int64_t orValue)
{
return atomic_or64((vint64*)value, orValue);
return atomic_or64((int64*)value, orValue);
}
int64_t
fssh_atomic_get64(vint64_t *value)
fssh_atomic_get64(int64_t *value)
{
return atomic_get64((vint64*)value);
return atomic_get64((int64*)value);
}

View File

@ -3609,10 +3609,10 @@ common_lock_node(int fd, bool kernel)
// We need to set the locking atomically - someone
// else might set one at the same time
#ifdef __x86_64__
if (fssh_atomic_test_and_set64((vint64_t *)&vnode->mandatory_locked_by,
if (fssh_atomic_test_and_set64((int64_t *)&vnode->mandatory_locked_by,
(fssh_addr_t)descriptor, 0) != 0)
#else
if (fssh_atomic_test_and_set((vint32_t *)&vnode->mandatory_locked_by,
if (fssh_atomic_test_and_set((int32_t *)&vnode->mandatory_locked_by,
(fssh_addr_t)descriptor, 0) != 0)
#endif
status = FSSH_B_BUSY;
@ -3637,10 +3637,10 @@ common_unlock_node(int fd, bool kernel)
// We need to set the locking atomically - someone
// else might set one at the same time
#ifdef __x86_64__
if (fssh_atomic_test_and_set64((vint64_t *)&vnode->mandatory_locked_by,
if (fssh_atomic_test_and_set64((int64_t *)&vnode->mandatory_locked_by,
0, (fssh_addr_t)descriptor) != (int64_t)descriptor)
#else
if (fssh_atomic_test_and_set((vint32_t *)&vnode->mandatory_locked_by,
if (fssh_atomic_test_and_set((int32_t *)&vnode->mandatory_locked_by,
0, (fssh_addr_t)descriptor) != (int32_t)descriptor)
#endif
status = FSSH_B_BAD_VALUE;