mirror of
				https://github.com/yuzu-emu/yuzu.git
				synced 2025-11-04 09:03:42 +00:00 
			
		
		
		
	Merge pull request #88 from archshift/remove-atomic
Removed common/atomic, instead using std::atomic
This commit is contained in:
		
						commit
						5d95d038a0
					
				@ -23,9 +23,6 @@ set(SRCS
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
set(HEADERS
 | 
			
		||||
            atomic.h
 | 
			
		||||
            atomic_gcc.h
 | 
			
		||||
            atomic_win32.h
 | 
			
		||||
            bit_field.h
 | 
			
		||||
            break_points.h
 | 
			
		||||
            chunk_file.h
 | 
			
		||||
 | 
			
		||||
@ -1,16 +0,0 @@
 | 
			
		||||
// Copyright 2013 Dolphin Emulator Project
 | 
			
		||||
// Licensed under GPLv2
 | 
			
		||||
// Refer to the license.txt file included.
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#ifdef _WIN32
 | 
			
		||||
 | 
			
		||||
#include "common/atomic_win32.h"
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
// GCC-compatible compiler assumed!
 | 
			
		||||
#include "common/atomic_gcc.h"
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
@ -1,110 +0,0 @@
 | 
			
		||||
// Copyright 2013 Dolphin Emulator Project
 | 
			
		||||
// Licensed under GPLv2
 | 
			
		||||
// Refer to the license.txt file included.
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include "common/common.h"
 | 
			
		||||
 | 
			
		||||
// Atomic operations are performed in a single step by the CPU. It is
 | 
			
		||||
// impossible for other threads to see the operation "half-done."
 | 
			
		||||
//
 | 
			
		||||
// Some atomic operations can be combined with different types of memory
 | 
			
		||||
// barriers called "Acquire semantics" and "Release semantics", defined below.
 | 
			
		||||
//
 | 
			
		||||
// Acquire semantics: Future memory accesses cannot be relocated to before the
 | 
			
		||||
//                    operation.
 | 
			
		||||
//
 | 
			
		||||
// Release semantics: Past memory accesses cannot be relocated to after the
 | 
			
		||||
//                    operation.
 | 
			
		||||
//
 | 
			
		||||
// These barriers affect not only the compiler, but also the CPU.
 | 
			
		||||
 | 
			
		||||
namespace Common
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
inline void AtomicAdd(volatile u32& target, u32 value) {
 | 
			
		||||
    __sync_add_and_fetch(&target, value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicAnd(volatile u32& target, u32 value) {
 | 
			
		||||
    __sync_and_and_fetch(&target, value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicDecrement(volatile u32& target) {
 | 
			
		||||
    __sync_add_and_fetch(&target, -1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicIncrement(volatile u32& target) {
 | 
			
		||||
    __sync_add_and_fetch(&target, 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline u32 AtomicLoad(volatile u32& src) {
 | 
			
		||||
    return src; // 32-bit reads are always atomic.
 | 
			
		||||
}
 | 
			
		||||
inline u32 AtomicLoadAcquire(volatile u32& src) {
 | 
			
		||||
    //keep the compiler from caching any memory references
 | 
			
		||||
    u32 result = src; // 32-bit reads are always atomic.
 | 
			
		||||
    //__sync_synchronize(); // TODO: May not be necessary.
 | 
			
		||||
    // Compiler instruction only. x86 loads always have acquire semantics.
 | 
			
		||||
    __asm__ __volatile__ ( "":::"memory" );
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicOr(volatile u32& target, u32 value) {
 | 
			
		||||
    __sync_or_and_fetch(&target, value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicStore(volatile u32& dest, u32 value) {
 | 
			
		||||
    dest = value; // 32-bit writes are always atomic.
 | 
			
		||||
}
 | 
			
		||||
inline void AtomicStoreRelease(volatile u32& dest, u32 value) {
 | 
			
		||||
    __sync_lock_test_and_set(&dest, value); // TODO: Wrong! This function is has acquire semantics.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Old code kept here for reference in case we need the parts with __asm__ __volatile__.
 | 
			
		||||
#if 0
 | 
			
		||||
LONG SyncInterlockedIncrement(LONG *Dest)
 | 
			
		||||
{
 | 
			
		||||
#if defined(__GNUC__) && defined (__GNUC_MINOR__) && ((4 < __GNUC__) || (4 == __GNUC__ && 1 <= __GNUC_MINOR__))
 | 
			
		||||
  return  __sync_add_and_fetch(Dest, 1);
 | 
			
		||||
#else
 | 
			
		||||
  register int result;
 | 
			
		||||
  __asm__ __volatile__("lock; xadd %0,%1"
 | 
			
		||||
                       : "=r" (result), "=m" (*Dest)
 | 
			
		||||
                       : "0" (1), "m" (*Dest)
 | 
			
		||||
                       : "memory");
 | 
			
		||||
  return result;
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
LONG SyncInterlockedExchangeAdd(LONG *Dest, LONG Val)
 | 
			
		||||
{
 | 
			
		||||
#if defined(__GNUC__) && defined (__GNUC_MINOR__) && ((4 < __GNUC__) || (4 == __GNUC__ && 1 <= __GNUC_MINOR__))
 | 
			
		||||
  return  __sync_add_and_fetch(Dest, Val);
 | 
			
		||||
#else
 | 
			
		||||
  register int result;
 | 
			
		||||
  __asm__ __volatile__("lock; xadd %0,%1"
 | 
			
		||||
                       : "=r" (result), "=m" (*Dest)
 | 
			
		||||
                       : "0" (Val), "m" (*Dest)
 | 
			
		||||
                       : "memory");
 | 
			
		||||
  return result;
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
LONG SyncInterlockedExchange(LONG *Dest, LONG Val)
 | 
			
		||||
{
 | 
			
		||||
#if defined(__GNUC__) && defined (__GNUC_MINOR__) && ((4 < __GNUC__) || (4 == __GNUC__ && 1 <= __GNUC_MINOR__))
 | 
			
		||||
  return  __sync_lock_test_and_set(Dest, Val);
 | 
			
		||||
#else
 | 
			
		||||
  register int result;
 | 
			
		||||
  __asm__ __volatile__("lock; xchg %0,%1"
 | 
			
		||||
                       : "=r" (result), "=m" (*Dest)
 | 
			
		||||
                       : "0" (Val), "m" (*Dest)
 | 
			
		||||
                       : "memory");
 | 
			
		||||
  return result;
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
@ -1,69 +0,0 @@
 | 
			
		||||
// Copyright 2013 Dolphin Emulator Project
 | 
			
		||||
// Licensed under GPLv2
 | 
			
		||||
// Refer to the license.txt file included.
 | 
			
		||||
 | 
			
		||||
#pragma once
 | 
			
		||||
 | 
			
		||||
#include "common/common.h"
 | 
			
		||||
#include <intrin.h>
 | 
			
		||||
#include <Windows.h>
 | 
			
		||||
 | 
			
		||||
// Atomic operations are performed in a single step by the CPU. It is
 | 
			
		||||
// impossible for other threads to see the operation "half-done."
 | 
			
		||||
//
 | 
			
		||||
// Some atomic operations can be combined with different types of memory
 | 
			
		||||
// barriers called "Acquire semantics" and "Release semantics", defined below.
 | 
			
		||||
//
 | 
			
		||||
// Acquire semantics: Future memory accesses cannot be relocated to before the
 | 
			
		||||
//                    operation.
 | 
			
		||||
//
 | 
			
		||||
// Release semantics: Past memory accesses cannot be relocated to after the
 | 
			
		||||
//                    operation.
 | 
			
		||||
//
 | 
			
		||||
// These barriers affect not only the compiler, but also the CPU.
 | 
			
		||||
//
 | 
			
		||||
// NOTE: Acquire and Release are not differentiated right now. They perform a
 | 
			
		||||
// full memory barrier instead of a "one-way" memory barrier. The newest
 | 
			
		||||
// Windows SDK has Acquire and Release versions of some Interlocked* functions.
 | 
			
		||||
 | 
			
		||||
namespace Common
 | 
			
		||||
{
 | 
			
		||||
 | 
			
		||||
inline void AtomicAdd(volatile u32& target, u32 value) {
 | 
			
		||||
    InterlockedExchangeAdd((volatile LONG*)&target, (LONG)value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicAnd(volatile u32& target, u32 value) {
 | 
			
		||||
    _InterlockedAnd((volatile LONG*)&target, (LONG)value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicIncrement(volatile u32& target) {
 | 
			
		||||
    InterlockedIncrement((volatile LONG*)&target);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicDecrement(volatile u32& target) {
 | 
			
		||||
    InterlockedDecrement((volatile LONG*)&target);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline u32 AtomicLoad(volatile u32& src) {
 | 
			
		||||
    return src; // 32-bit reads are always atomic.
 | 
			
		||||
}
 | 
			
		||||
inline u32 AtomicLoadAcquire(volatile u32& src) {
 | 
			
		||||
    u32 result = src; // 32-bit reads are always atomic.
 | 
			
		||||
    _ReadBarrier(); // Compiler instruction only. x86 loads always have acquire semantics.
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicOr(volatile u32& target, u32 value) {
 | 
			
		||||
    _InterlockedOr((volatile LONG*)&target, (LONG)value);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
inline void AtomicStore(volatile u32& dest, u32 value) {
 | 
			
		||||
    dest = value; // 32-bit writes are always atomic.
 | 
			
		||||
}
 | 
			
		||||
inline void AtomicStoreRelease(volatile u32& dest, u32 value) {
 | 
			
		||||
    _WriteBarrier(); // Compiler instruction only. x86 stores always have release semantics.
 | 
			
		||||
    dest = value; // 32-bit writes are always atomic.
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@ -4,10 +4,10 @@
 | 
			
		||||
 | 
			
		||||
#include <vector>
 | 
			
		||||
#include <cstdio>
 | 
			
		||||
#include <atomic>
 | 
			
		||||
 | 
			
		||||
#include "common/msg_handler.h"
 | 
			
		||||
#include "common/std_mutex.h"
 | 
			
		||||
#include "common/atomic.h"
 | 
			
		||||
#include "common/chunk_file.h"
 | 
			
		||||
 | 
			
		||||
#include "core/core_timing.h"
 | 
			
		||||
@ -54,7 +54,7 @@ Event *eventPool = 0;
 | 
			
		||||
Event *eventTsPool = 0;
 | 
			
		||||
int allocatedTsEvents = 0;
 | 
			
		||||
// Optimization to skip MoveEvents when possible.
 | 
			
		||||
volatile u32 hasTsEvents = false;
 | 
			
		||||
std::atomic<u32> hasTsEvents;
 | 
			
		||||
 | 
			
		||||
// Downcount has been moved to currentMIPS, to save a couple of clocks in every ARM JIT block
 | 
			
		||||
// as we can already reach that structure through a register.
 | 
			
		||||
@ -202,7 +202,7 @@ void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata
 | 
			
		||||
        tsLast->next = ne;
 | 
			
		||||
    tsLast = ne;
 | 
			
		||||
 | 
			
		||||
    Common::AtomicStoreRelease(hasTsEvents, 1);
 | 
			
		||||
    hasTsEvents.store(1, std::memory_order_release);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread
 | 
			
		||||
@ -484,7 +484,7 @@ void ProcessFifoWaitEvents()
 | 
			
		||||
 | 
			
		||||
void MoveEvents()
 | 
			
		||||
{
 | 
			
		||||
    Common::AtomicStoreRelease(hasTsEvents, 0);
 | 
			
		||||
    hasTsEvents.store(0, std::memory_order_release);
 | 
			
		||||
 | 
			
		||||
    std::lock_guard<std::recursive_mutex> lk(externalEventSection);
 | 
			
		||||
    // Move events from async queue into main queue
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user