mesa: update to the latest u_atomic.h

This requires gcc 4.1.

Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang <zzyiwei@chromium.org>
Reviewed-by: Ryan Neph <ryanneph@google.com>
Acked-by: Gert Wollny <gert.wollny@collabora.com>
macos/master
Chia-I Wu 3 years ago
parent 10b89464a3
commit 50f448ee49
  1. 4
      meson.build
  2. 352
      src/gallium/auxiliary/util/u_atomic.h
  3. 1
      src/gallium/meson.build
  4. 272
      src/mesa/util/u_atomic.h

@ -44,6 +44,10 @@ revision = 3
cc = meson.get_compiler('c')
if cc.get_id() == 'gcc' and cc.version().version_compare('< 4.1')
error('When using GCC, version 4.1 or later is required.')
endif
warnings = [
'-Werror=implicit-function-declaration',
'-Werror=missing-prototypes',

@ -1,352 +0,0 @@
/**
* Many similar implementations exist. See for example libwsbm
* or the linux kernel include/atomic.h
*
* No copyright claimed on this file.
*
*/
#ifndef U_ATOMIC_H
#define U_ATOMIC_H
#include "pipe/p_compiler.h"
#include "pipe/p_defines.h"
/* Favor OS-provided implementations.
*
* Where no OS-provided implementation is available, fall back to
* locally coded assembly, compiler intrinsic or ultimately a
* mutex-based implementation.
*/
#if defined(PIPE_OS_SOLARIS)
#define PIPE_ATOMIC_OS_SOLARIS
#elif defined(PIPE_CC_MSVC)
#define PIPE_ATOMIC_MSVC_INTRINSIC
#elif (defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86))
#define PIPE_ATOMIC_ASM_MSVC_X86
#elif (defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86))
#define PIPE_ATOMIC_ASM_GCC_X86
#elif (defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86_64))
#define PIPE_ATOMIC_ASM_GCC_X86_64
#elif defined(PIPE_CC_GCC) && (PIPE_CC_GCC_VERSION >= 401)
#define PIPE_ATOMIC_GCC_INTRINSIC
#else
#error "Unsupported platform"
#endif
#if defined(PIPE_ATOMIC_ASM_GCC_X86_64)
#define PIPE_ATOMIC "GCC x86_64 assembly"
#ifdef __cplusplus
extern "C" {
#endif
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
__attribute__((no_sanitize("memory")))
static inline boolean
p_atomic_dec_zero(int32_t *v)
{
unsigned char c;
__asm__ __volatile__("lock; decl %0; sete %1":"+m"(*v), "=qm"(c)
::"memory");
return c != 0;
}
static inline void
p_atomic_inc(int32_t *v)
{
__asm__ __volatile__("lock; incl %0":"+m"(*v));
}
static inline void
p_atomic_dec(int32_t *v)
{
__asm__ __volatile__("lock; decl %0":"+m"(*v));
}
static inline int32_t
p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
{
return __sync_val_compare_and_swap(v, old, _new);
}
#ifdef __cplusplus
}
#endif
#endif /* PIPE_ATOMIC_ASM_GCC_X86_64 */
#if defined(PIPE_ATOMIC_ASM_GCC_X86)
#define PIPE_ATOMIC "GCC x86 assembly"
#ifdef __cplusplus
extern "C" {
#endif
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
__attribute__((no_sanitize("memory")))
static inline boolean
p_atomic_dec_zero(int32_t *v)
{
unsigned char c;
__asm__ __volatile__("lock; decl %0; sete %1":"+m"(*v), "=qm"(c)
::"memory");
return c != 0;
}
static inline void
p_atomic_inc(int32_t *v)
{
__asm__ __volatile__("lock; incl %0":"+m"(*v));
}
static inline void
p_atomic_dec(int32_t *v)
{
__asm__ __volatile__("lock; decl %0":"+m"(*v));
}
static inline int32_t
p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
{
return __sync_val_compare_and_swap(v, old, _new);
}
#ifdef __cplusplus
}
#endif
#endif
/* Implementation using GCC-provided synchronization intrinsics
*/
#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
#define PIPE_ATOMIC "GCC Sync Intrinsics"
#ifdef __cplusplus
extern "C" {
#endif
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
static inline boolean
p_atomic_dec_zero(int32_t *v)
{
return (__sync_sub_and_fetch(v, 1) == 0);
}
static inline void
p_atomic_inc(int32_t *v)
{
(void) __sync_add_and_fetch(v, 1);
}
static inline void
p_atomic_dec(int32_t *v)
{
(void) __sync_sub_and_fetch(v, 1);
}
static inline int32_t
p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
{
return __sync_val_compare_and_swap(v, old, _new);
}
#ifdef __cplusplus
}
#endif
#endif
/* Unlocked version for single threaded environments, such as some
* windows kernel modules.
*/
#if defined(PIPE_ATOMIC_OS_UNLOCKED)
#define PIPE_ATOMIC "Unlocked"
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_dec_zero(_v) ((boolean) --(*(_v)))
#define p_atomic_inc(_v) ((void) (*(_v))++)
#define p_atomic_dec(_v) ((void) (*(_v))--)
#define p_atomic_cmpxchg(_v, old, _new) (*(_v) == old ? *(_v) = (_new) : *(_v))
#endif
/* Locally coded assembly for MSVC on x86:
*/
#if defined(PIPE_ATOMIC_ASM_MSVC_X86)
#define PIPE_ATOMIC "MSVC x86 assembly"
#ifdef __cplusplus
extern "C" {
#endif
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
__attribute__((no_sanitize("memory")))
static inline boolean
p_atomic_dec_zero(int32_t *v)
{
unsigned char c;
__asm {
mov eax, [v]
lock dec dword ptr [eax]
sete byte ptr [c]
}
return c != 0;
}
static inline void
p_atomic_inc(int32_t *v)
{
__asm {
mov eax, [v]
lock inc dword ptr [eax]
}
}
static inline void
p_atomic_dec(int32_t *v)
{
__asm {
mov eax, [v]
lock dec dword ptr [eax]
}
}
static inline int32_t
p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
{
int32_t orig;
__asm {
mov ecx, [v]
mov eax, [old]
mov edx, [_new]
lock cmpxchg [ecx], edx
mov [orig], eax
}
return orig;
}
#ifdef __cplusplus
}
#endif
#endif
#if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
#define PIPE_ATOMIC "MSVC Intrinsics"
#include <intrin.h>
#pragma intrinsic(_InterlockedIncrement)
#pragma intrinsic(_InterlockedDecrement)
#pragma intrinsic(_InterlockedCompareExchange)
#ifdef __cplusplus
extern "C" {
#endif
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
static inline boolean
p_atomic_dec_zero(int32_t *v)
{
return _InterlockedDecrement((long *)v) == 0;
}
static inline void
p_atomic_inc(int32_t *v)
{
_InterlockedIncrement((long *)v);
}
static inline void
p_atomic_dec(int32_t *v)
{
_InterlockedDecrement((long *)v);
}
static inline int32_t
p_atomic_cmpxchg(int32_t *v, int32_t old, int32_t _new)
{
return _InterlockedCompareExchange((long *)v, _new, old);
}
#ifdef __cplusplus
}
#endif
#endif
#if defined(PIPE_ATOMIC_OS_SOLARIS)
#define PIPE_ATOMIC "Solaris OS atomic functions"
#include <atomic.h>
#ifdef __cplusplus
extern "C" {
#endif
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
static inline boolean
p_atomic_dec_zero(int32_t *v)
{
uint32_t n = atomic_dec_32_nv((uint32_t *) v);
return n != 0;
}
#define p_atomic_inc(_v) atomic_inc_32((uint32_t *) _v)
#define p_atomic_dec(_v) atomic_dec_32((uint32_t *) _v)
#define p_atomic_cmpxchg(_v, _old, _new) \
atomic_cas_32( (uint32_t *) _v, (uint32_t) _old, (uint32_t) _new)
#ifdef __cplusplus
}
#endif
#endif
#ifndef PIPE_ATOMIC
#error "No pipe_atomic implementation selected"
#endif
#endif /* U_ATOMIC_H */

@ -53,7 +53,6 @@ sources_libgallium = [
'auxiliary/util/u_half.h',
'auxiliary/util/u_prim.h',
'auxiliary/util/u_debug_describe.c',
'auxiliary/util/u_atomic.h',
'auxiliary/util/xxhash.h',
'auxiliary/cso_cache/cso_hash.h',
'auxiliary/cso_cache/cso_cache.h',

@ -0,0 +1,272 @@
/**
* Many similar implementations exist. See for example libwsbm
* or the linux kernel include/atomic.h
*
* No copyright claimed on this file.
*
*/
#include "no_extern_c.h"
#ifndef U_ATOMIC_H
#define U_ATOMIC_H
#include <stdbool.h>
#include <stdint.h>
/* Favor OS-provided implementations.
*
* Where no OS-provided implementation is available, fall back to
* locally coded assembly, compiler intrinsic or ultimately a
* mutex-based implementation.
*/
#if defined(__sun)
#define PIPE_ATOMIC_OS_SOLARIS
#elif defined(_MSC_VER)
#define PIPE_ATOMIC_MSVC_INTRINSIC
#elif defined(__GNUC__)
#define PIPE_ATOMIC_GCC_INTRINSIC
#else
#error "Unsupported platform"
#endif
/* Implementation using GCC-provided synchronization intrinsics
*/
#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
#define PIPE_ATOMIC "GCC Sync Intrinsics"
#if defined(USE_GCC_ATOMIC_BUILTINS)
/* The builtins with explicit memory model are available since GCC 4.7. */
#define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
#define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
#define p_atomic_read_relaxed(_v) __atomic_load_n((_v), __ATOMIC_RELAXED)
#define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
#define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
#define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
#define p_atomic_add_return(v, i) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
#define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
#define PIPE_NATIVE_ATOMIC_XCHG
#else
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_read_relaxed(_v) (*(_v))
#define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
#define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
#define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
#define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
#define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
#define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
#define p_atomic_add_return(v, i) __sync_add_and_fetch((v), (i))
#endif
/* There is no __atomic_* compare and exchange that returns the current value.
* Also, GCC 5.4 seems unable to optimize a compound statement expression that
* uses an additional stack variable with __atomic_compare_exchange[_n].
*/
#define p_atomic_cmpxchg(v, old, _new) \
__sync_val_compare_and_swap((v), (old), (_new))
#endif
/* Unlocked version for single threaded environments, such as some
* windows kernel modules.
*/
#if defined(PIPE_ATOMIC_OS_UNLOCKED)
#define PIPE_ATOMIC "Unlocked"
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_read_relaxed(_v) (*(_v))
#define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
#define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
#define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
#define p_atomic_add(_v, _i) ((void) p_atomic_add_return((_v), (_i)))
#define p_atomic_inc_return(_v) (++(*(_v)))
#define p_atomic_dec_return(_v) (--(*(_v)))
#define p_atomic_add_return(_v, _i) (*(_v) = *(_v) + (_i))
#define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
#endif
#if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
#define PIPE_ATOMIC "MSVC Intrinsics"
/* We use the Windows header's Interlocked*64 functions instead of the
* _Interlocked*64 intrinsics wherever we can, as support for the latter varies
* with target CPU, whereas Windows headers take care of all portability
* issues: using intrinsics where available, falling back to library
* implementations where not.
*/
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN 1
#endif
#include <windows.h>
#include <intrin.h>
#include <assert.h>
/* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
* quite work here; and if a C++-only solution is worthwhile, then it would be
* better to use templates / function overloading, instead of decltype magic.
* Therefore, we rely on implicit casting to LONGLONG for the functions that return
*/
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_read_relaxed(_v) (*(_v))
#define p_atomic_dec_zero(_v) \
(p_atomic_dec_return(_v) == 0)
#define p_atomic_inc(_v) \
((void) p_atomic_inc_return(_v))
#define p_atomic_inc_return(_v) (\
sizeof *(_v) == sizeof(short) ? _InterlockedIncrement16((short *) (_v)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedIncrement ((long *) (_v)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_dec(_v) \
((void) p_atomic_dec_return(_v))
#define p_atomic_dec_return(_v) (\
sizeof *(_v) == sizeof(short) ? _InterlockedDecrement16((short *) (_v)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedDecrement ((long *) (_v)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_add(_v, _i) \
((void) p_atomic_add_return((_v), (_i)))
#define p_atomic_add_return(_v, _i) (\
sizeof *(_v) == sizeof(char) ? _InterlockedExchangeAdd8 ((char *) (_v), (_i)) : \
sizeof *(_v) == sizeof(short) ? _InterlockedExchangeAdd16((short *) (_v), (_i)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedExchangeAdd ((long *) (_v), (_i)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
(assert(!"should not get here"), 0))
#define p_atomic_cmpxchg(_v, _old, _new) (\
sizeof *(_v) == sizeof(char) ? _InterlockedCompareExchange8 ((char *) (_v), (char) (_new), (char) (_old)) : \
sizeof *(_v) == sizeof(short) ? _InterlockedCompareExchange16((short *) (_v), (short) (_new), (short) (_old)) : \
sizeof *(_v) == sizeof(long) ? _InterlockedCompareExchange ((long *) (_v), (long) (_new), (long) (_old)) : \
sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
(assert(!"should not get here"), 0))
#endif
#if defined(PIPE_ATOMIC_OS_SOLARIS)
#define PIPE_ATOMIC "Solaris OS atomic functions"
#include <atomic.h>
#include <assert.h>
#define p_atomic_set(_v, _i) (*(_v) = (_i))
#define p_atomic_read(_v) (*(_v))
#define p_atomic_dec_zero(v) (\
sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) == 0 : \
sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
(assert(!"should not get here"), 0))
#define p_atomic_inc(v) (void) (\
sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8 ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_inc_return(v) (__typeof(*v))( \
sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8_nv ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_dec(v) (void) ( \
sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8 ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_dec_return(v) (__typeof(*v))( \
sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
(assert(!"should not get here"), 0))
#define p_atomic_add(v, i) (void) ( \
sizeof(*v) == sizeof(uint8_t) ? atomic_add_8 ((uint8_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
(assert(!"should not get here"), 0))
#define p_atomic_add_return(v, i) (__typeof(*v)) ( \
sizeof(*v) == sizeof(uint8_t) ? atomic_add_8_nv ((uint8_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) : \
(assert(!"should not get here"), 0))
#define p_atomic_cmpxchg(v, old, _new) (__typeof(*v))( \
sizeof(*v) == sizeof(uint8_t) ? atomic_cas_8 ((uint8_t *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
(assert(!"should not get here"), 0))
#endif
#ifndef PIPE_ATOMIC
#error "No pipe_atomic implementation selected"
#endif
#ifndef PIPE_NATIVE_ATOMIC_XCHG
static inline uint32_t p_atomic_xchg_32(uint32_t *v, uint32_t i)
{
uint32_t actual = p_atomic_read(v);
uint32_t expected;
do {
expected = actual;
actual = p_atomic_cmpxchg(v, expected, i);
} while (expected != actual);
return actual;
}
static inline uint64_t p_atomic_xchg_64(uint64_t *v, uint64_t i)
{
uint64_t actual = p_atomic_read(v);
uint64_t expected;
do {
expected = actual;
actual = p_atomic_cmpxchg(v, expected, i);
} while (expected != actual);
return actual;
}
#define p_atomic_xchg(v, i) (__typeof(*(v)))( \
sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
(assert(!"should not get here"), 0))
#endif
#endif /* U_ATOMIC_H */
Loading…
Cancel
Save