internal/refcount.h: overhaul fencing and add _MSC_VER section.
Relax memory_order on counter decrement itself, because mutable members of the reference-counted structure should be visible on all processors independently on counter. [Even re-format and minimize dependency on other headers.] Reviewed-by: Kurt Roeckx <kurt@roeckx.be> (Merged from https://github.com/openssl/openssl/pull/6900)
This commit is contained in:
parent
86ed2e1cb0
commit
96d7852cbd
1 changed files with 62 additions and 11 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the OpenSSL license (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
|
@ -18,26 +18,36 @@
|
|||
|
||||
# if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
|
||||
&& !defined(__STDC_NO_ATOMICS__)
|
||||
# include <stdatomic.h>
|
||||
# define HAVE_C11_ATOMICS
|
||||
# include <stdatomic.h>
|
||||
# define HAVE_C11_ATOMICS
|
||||
# endif
|
||||
|
||||
# if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
|
||||
&& ATOMIC_INT_LOCK_FREE > 0
|
||||
|
||||
# define HAVE_ATOMICS 1
|
||||
# define HAVE_ATOMICS 1
|
||||
|
||||
typedef _Atomic int CRYPTO_REF_COUNT;
|
||||
|
||||
static ossl_inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, void *lock)
|
||||
static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ossl_inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock)
|
||||
/*
|
||||
* Changes to shared structure other than reference counter have to be
|
||||
* serialized. And any kind of serialization implies a release fence. This
|
||||
* means that by the time reference counter is decremented all other
|
||||
* changes are visible on all processors. Hence decrement itself can be
|
||||
* relaxed. In case it hits zero, object will be destructed. Since it's
|
||||
* last use of the object, destructor programmer might reason that access
|
||||
* to mutable members doesn't have to be serialized anymore, which would
|
||||
* otherwise imply an acquire fence. Hence conditional acquire fence...
|
||||
*/
|
||||
static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = atomic_fetch_sub_explicit(val, 1, memory_order_release) - 1;
|
||||
*ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
|
||||
if (*ret == 0)
|
||||
atomic_thread_fence(memory_order_acquire);
|
||||
return 1;
|
||||
|
@ -45,24 +55,65 @@ static ossl_inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock)
|
|||
|
||||
# elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
|
||||
|
||||
# define HAVE_ATOMICS 1
|
||||
# define HAVE_ATOMICS 1
|
||||
|
||||
typedef int CRYPTO_REF_COUNT;
|
||||
|
||||
static ossl_inline int CRYPTO_UP_REF(int *val, int *ret, void *lock)
|
||||
static __inline__ int CRYPTO_UP_REF(int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static ossl_inline int CRYPTO_DOWN_REF(int *val, int *ret, void *lock)
|
||||
static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELEASE) - 1;
|
||||
*ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
|
||||
if (*ret == 0)
|
||||
__atomic_thread_fence(__ATOMIC_ACQUIRE);
|
||||
return 1;
|
||||
}
|
||||
|
||||
# elif defined(_MSC_VER) && _MSC_VER>=1200
|
||||
|
||||
# define HAVE_ATOMICS 1
|
||||
|
||||
typedef volatile int CRYPTO_REF_COUNT;
|
||||
|
||||
# if (defined(_M_ARM) && _M_ARM>=7) || defined(_M_ARM64)
|
||||
# include <intrin.h>
|
||||
# if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
|
||||
# define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
|
||||
# endif
|
||||
|
||||
static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
|
||||
if (*ret == 0)
|
||||
__dmb(_ARM_BARRIER_ISH);
|
||||
return 1;
|
||||
}
|
||||
# else
|
||||
# pragma intrinsic(_InterlockedExchangeAdd)
|
||||
|
||||
static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = _InterlockedExchangeAdd(val, 1) + 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock)
|
||||
{
|
||||
*ret = _InterlockedExchangeAdd(val, -1) - 1;
|
||||
return 1;
|
||||
}
|
||||
# endif
|
||||
|
||||
# else
|
||||
|
||||
typedef int CRYPTO_REF_COUNT;
|
||||
|
|
Loading…
Reference in a new issue