1 | /*
|
---|
2 | * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved.
|
---|
3 | *
|
---|
4 | * Licensed under the Apache License 2.0 (the "License"). You may not use
|
---|
5 | * this file except in compliance with the License. You can obtain a copy
|
---|
6 | * in the file LICENSE in the source distribution or at
|
---|
7 | * https://www.openssl.org/source/license.html
|
---|
8 | */
|
---|
9 | #ifndef OSSL_INTERNAL_REFCOUNT_H
|
---|
10 | # define OSSL_INTERNAL_REFCOUNT_H
|
---|
11 | # ifndef RT_WITHOUT_PRAGMA_ONCE /* VBOX */
|
---|
12 | # pragma once
|
---|
13 | # endif /* VBOX */
|
---|
14 |
|
---|
15 | # include <openssl/e_os2.h>
|
---|
16 | # include <openssl/trace.h>
|
---|
17 |
|
---|
18 | # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS)
|
---|
19 | # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
|
---|
20 | && !defined(__STDC_NO_ATOMICS__)
|
---|
21 | # include <stdatomic.h>
|
---|
22 | # define HAVE_C11_ATOMICS
|
---|
23 | # endif
|
---|
24 |
|
---|
25 | # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
|
---|
26 | && ATOMIC_INT_LOCK_FREE > 0
|
---|
27 |
|
---|
28 | # define HAVE_ATOMICS 1
|
---|
29 |
|
---|
30 | typedef _Atomic int CRYPTO_REF_COUNT;
|
---|
31 |
|
---|
32 | static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret,
|
---|
33 | ossl_unused void *lock)
|
---|
34 | {
|
---|
35 | *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1;
|
---|
36 | return 1;
|
---|
37 | }
|
---|
38 |
|
---|
39 | /*
|
---|
40 | * Changes to shared structure other than reference counter have to be
|
---|
41 | * serialized. And any kind of serialization implies a release fence. This
|
---|
42 | * means that by the time reference counter is decremented all other
|
---|
43 | * changes are visible on all processors. Hence decrement itself can be
|
---|
44 | * relaxed. In case it hits zero, object will be destructed. Since it's
|
---|
45 | * last use of the object, destructor programmer might reason that access
|
---|
46 | * to mutable members doesn't have to be serialized anymore, which would
|
---|
47 | * otherwise imply an acquire fence. Hence conditional acquire fence...
|
---|
48 | */
|
---|
49 | static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret,
|
---|
50 | ossl_unused void *lock)
|
---|
51 | {
|
---|
52 | *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1;
|
---|
53 | if (*ret == 0)
|
---|
54 | atomic_thread_fence(memory_order_acquire);
|
---|
55 | return 1;
|
---|
56 | }
|
---|
57 |
|
---|
58 | # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
|
---|
59 |
|
---|
60 | # define HAVE_ATOMICS 1
|
---|
61 |
|
---|
62 | typedef int CRYPTO_REF_COUNT;
|
---|
63 |
|
---|
64 | static __inline__ int CRYPTO_UP_REF(int *val, int *ret, ossl_unused void *lock)
|
---|
65 | {
|
---|
66 | *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1;
|
---|
67 | return 1;
|
---|
68 | }
|
---|
69 |
|
---|
70 | static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret,
|
---|
71 | ossl_unused void *lock)
|
---|
72 | {
|
---|
73 | *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1;
|
---|
74 | if (*ret == 0)
|
---|
75 | __atomic_thread_fence(__ATOMIC_ACQUIRE);
|
---|
76 | return 1;
|
---|
77 | }
|
---|
78 | # elif defined(__ICL) && defined(_WIN32)
|
---|
79 | # define HAVE_ATOMICS 1
|
---|
80 | typedef volatile int CRYPTO_REF_COUNT;
|
---|
81 |
|
---|
82 | static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
|
---|
83 | ossl_unused void *lock)
|
---|
84 | {
|
---|
85 | *ret = _InterlockedExchangeAdd((void *)val, 1) + 1;
|
---|
86 | return 1;
|
---|
87 | }
|
---|
88 |
|
---|
89 | static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
|
---|
90 | ossl_unused void *lock)
|
---|
91 | {
|
---|
92 | *ret = _InterlockedExchangeAdd((void *)val, -1) - 1;
|
---|
93 | return 1;
|
---|
94 | }
|
---|
95 |
|
---|
96 | # elif defined(_MSC_VER) && _MSC_VER>=1200
|
---|
97 |
|
---|
98 | # define HAVE_ATOMICS 1
|
---|
99 |
|
---|
100 | typedef volatile int CRYPTO_REF_COUNT;
|
---|
101 |
|
---|
102 | # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
|
---|
103 | # include <intrin.h>
|
---|
104 | # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
|
---|
105 | # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
|
---|
106 | # endif
|
---|
107 |
|
---|
108 | static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
|
---|
109 | ossl_unused void *lock)
|
---|
110 | {
|
---|
111 | *ret = _InterlockedExchangeAdd_nf(val, 1) + 1;
|
---|
112 | return 1;
|
---|
113 | }
|
---|
114 |
|
---|
115 | static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
|
---|
116 | ossl_unused void *lock)
|
---|
117 | {
|
---|
118 | *ret = _InterlockedExchangeAdd_nf(val, -1) - 1;
|
---|
119 | if (*ret == 0)
|
---|
120 | __dmb(_ARM_BARRIER_ISH);
|
---|
121 | return 1;
|
---|
122 | }
|
---|
123 | # else
|
---|
124 | # if !defined(_WIN32_WCE)
|
---|
125 | # pragma intrinsic(_InterlockedExchangeAdd)
|
---|
126 | # else
|
---|
127 | # if _WIN32_WCE >= 0x600
|
---|
128 | extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
|
---|
129 | # else
|
---|
130 | /* under Windows CE we still have old-style Interlocked* functions */
|
---|
131 | extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
|
---|
132 | # define _InterlockedExchangeAdd InterlockedExchangeAdd
|
---|
133 | # endif
|
---|
134 | # endif
|
---|
135 |
|
---|
136 | static __inline int CRYPTO_UP_REF(volatile int *val, int *ret,
|
---|
137 | ossl_unused void *lock)
|
---|
138 | {
|
---|
139 | *ret = _InterlockedExchangeAdd((long volatile *)val, 1) + 1;
|
---|
140 | return 1;
|
---|
141 | }
|
---|
142 |
|
---|
143 | static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret,
|
---|
144 | ossl_unused void *lock)
|
---|
145 | {
|
---|
146 | *ret = _InterlockedExchangeAdd((long volatile *)val, -1) - 1;
|
---|
147 | return 1;
|
---|
148 | }
|
---|
149 | # endif
|
---|
150 |
|
---|
151 | # endif
|
---|
152 | # endif /* !OPENSSL_DEV_NO_ATOMICS */
|
---|
153 |
|
---|
154 | /*
|
---|
155 | * All the refcounting implementations above define HAVE_ATOMICS, so if it's
|
---|
156 | * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
|
---|
157 | * means we need to implement a fallback. This fallback uses locks.
|
---|
158 | */
|
---|
159 | # ifndef HAVE_ATOMICS
|
---|
160 |
|
---|
161 | typedef int CRYPTO_REF_COUNT;
|
---|
162 |
|
---|
163 | # define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock)
|
---|
164 | # define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock)
|
---|
165 |
|
---|
166 | # endif
|
---|
167 |
|
---|
168 | # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
|
---|
169 | # define REF_ASSERT_ISNT(test) \
|
---|
170 | (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
|
---|
171 | # else
|
---|
172 | # define REF_ASSERT_ISNT(i)
|
---|
173 | # endif
|
---|
174 |
|
---|
175 | # define REF_PRINT_EX(text, count, object) \
|
---|
176 | OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
|
---|
177 | # define REF_PRINT_COUNT(text, object) \
|
---|
178 | REF_PRINT_EX(text, object->references, (void *)object)
|
---|
179 |
|
---|
180 | #endif
|
---|