GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: lib/libcompiler_rt/atomic.c Lines: 0 43 0.0 %
Date: 2017-11-13 Branches: 0 457 0.0 %

Line Branch Exec Source
1
/*===-- atomic.c - Implement support functions for atomic operations.------===
2
 *
3
 *                     The LLVM Compiler Infrastructure
4
 *
5
 * This file is dual licensed under the MIT and the University of Illinois Open
6
 * Source Licenses. See LICENSE.TXT for details.
7
 *
8
 *===----------------------------------------------------------------------===
9
 *
10
 *  atomic.c defines a set of functions for performing atomic accesses on
11
 *  arbitrary-sized memory locations.  This design uses locks that should
12
 *  be fast in the uncontended case, for two reasons:
13
 *
14
 *  1) This code must work with C programs that do not link to anything
15
 *     (including pthreads) and so it should not depend on any pthread
16
 *     functions.
17
 *  2) Atomic operations, rather than explicit mutexes, are most commonly used
18
 *     on code where contended operations are rate.
19
 *
20
 *  To avoid needing a per-object lock, this code allocates an array of
21
 *  locks and hashes the object pointers to find the one that it should use.
22
 *  For operations that must be atomic on two locations, the lower lock is
23
 *  always acquired first, to avoid deadlock.
24
 *
25
 *===----------------------------------------------------------------------===
26
 */
27
28
#include <stdint.h>
29
#include <string.h>
30
31
#include "assembly.h"
32
33
// Clang objects if you redefine a builtin.  This little hack allows us to
34
// define a function with the same name as an intrinsic.
35
#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
36
#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
37
#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
38
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(__atomic_compare_exchange)
39
40
/// Number of locks.  This allocates one page on 32-bit platforms, two on
41
/// 64-bit.  This can be specified externally if a different trade between
42
/// memory usage and contention probability is required for a given platform.
43
#ifndef SPINLOCK_COUNT
44
#define SPINLOCK_COUNT (1<<10)
45
#endif
46
static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
47
48
////////////////////////////////////////////////////////////////////////////////
49
// Platform-specific lock implementation.  Falls back to spinlocks if none is
50
// defined.  Each platform should define the Lock type, and corresponding
51
// lock() and unlock() functions.
52
////////////////////////////////////////////////////////////////////////////////
53
#ifdef __FreeBSD__
54
#include <errno.h>
55
#include <sys/types.h>
56
#include <machine/atomic.h>
57
#include <sys/umtx.h>
58
typedef struct _usem Lock;
59
__inline static void unlock(Lock *l) {
60
  __c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
61
  __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
62
  if (l->_has_waiters)
63
      _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
64
}
65
__inline static void lock(Lock *l) {
66
  uint32_t old = 1;
67
  while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
68
        0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
69
    _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
70
    old = 1;
71
  }
72
}
73
/// locks for atomic operations
74
static Lock locks[SPINLOCK_COUNT] = { [0 ...  SPINLOCK_COUNT-1] = {0,1,0} };
75
76
#elif defined(__APPLE__)
77
#include <libkern/OSAtomic.h>
78
typedef OSSpinLock Lock;
79
__inline static void unlock(Lock *l) {
80
  OSSpinLockUnlock(l);
81
}
82
/// Locks a lock.  In the current implementation, this is potentially
83
/// unbounded in the contended case.
84
__inline static void lock(Lock *l) {
85
  OSSpinLockLock(l);
86
}
87
static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
88
89
#else
90
typedef _Atomic(uintptr_t) Lock;
91
/// Unlock a lock.  This is a release operation.
92
__inline static void unlock(Lock *l) {
93
  __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
94
}
95
/// Locks a lock.  In the current implementation, this is potentially
96
/// unbounded in the contended case.
97
__inline static void lock(Lock *l) {
98
  uintptr_t old = 0;
99
  while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
100
        __ATOMIC_RELAXED))
101
    old = 0;
102
}
103
/// locks for atomic operations
104
static Lock locks[SPINLOCK_COUNT];
105
#endif
106
107
108
/// Returns a lock to use for a given pointer.
109
static __inline Lock *lock_for_pointer(void *ptr) {
110
  intptr_t hash = (intptr_t)ptr;
111
  // Disregard the lowest 4 bits.  We want all values that may be part of the
112
  // same memory operation to hash to the same value and therefore use the same
113
  // lock.
114
  hash >>= 4;
115
  // Use the next bits as the basis for the hash
116
  intptr_t low = hash & SPINLOCK_MASK;
117
  // Now use the high(er) set of bits to perturb the hash, so that we don't
118
  // get collisions from atomic fields in a single object
119
  hash >>= 16;
120
  hash ^= low;
121
  // Return a pointer to the word to use
122
  return locks + (hash & SPINLOCK_MASK);
123
}
124
125
/// Macros for determining whether a size is lock free.  Clang can not yet
126
/// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
127
/// not lock free.
128
#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
129
#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
130
#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
131
#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
132
#define IS_LOCK_FREE_16 0
133
134
/// Macro that calls the compiler-generated lock-free versions of functions
135
/// when they exist.
136
#define LOCK_FREE_CASES() \
137
  do {\
138
  switch (size) {\
139
    case 2:\
140
      if (IS_LOCK_FREE_2) {\
141
        LOCK_FREE_ACTION(uint16_t);\
142
      }\
143
    case 4:\
144
      if (IS_LOCK_FREE_4) {\
145
        LOCK_FREE_ACTION(uint32_t);\
146
      }\
147
    case 8:\
148
      if (IS_LOCK_FREE_8) {\
149
        LOCK_FREE_ACTION(uint64_t);\
150
      }\
151
    case 16:\
152
      if (IS_LOCK_FREE_16) {\
153
        /* FIXME: __uint128_t isn't available on 32 bit platforms.
154
        LOCK_FREE_ACTION(__uint128_t);*/\
155
      }\
156
  }\
157
  } while (0)
158
159
160
/// An atomic load operation.  This is atomic with respect to the source
161
/// pointer only.
162
void __atomic_load_c(int size, void *src, void *dest, int model) {
163
#define LOCK_FREE_ACTION(type) \
164
    *((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
165
    return;
166
  LOCK_FREE_CASES();
167
#undef LOCK_FREE_ACTION
168
  Lock *l = lock_for_pointer(src);
169
  lock(l);
170
  memcpy(dest, src, size);
171
  unlock(l);
172
}
173
174
/// An atomic store operation.  This is atomic with respect to the destination
175
/// pointer only.
176
void __atomic_store_c(int size, void *dest, void *src, int model) {
177
#define LOCK_FREE_ACTION(type) \
178
    __c11_atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
179
    return;
180
  LOCK_FREE_CASES();
181
#undef LOCK_FREE_ACTION
182
  Lock *l = lock_for_pointer(dest);
183
  lock(l);
184
  memcpy(dest, src, size);
185
  unlock(l);
186
}
187
188
/// Atomic compare and exchange operation.  If the value at *ptr is identical
189
/// to the value at *expected, then this copies value at *desired to *ptr.  If
190
/// they  are not, then this stores the current value from *ptr in *expected.
191
///
192
/// This function returns 1 if the exchange takes place or 0 if it fails.
193
int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
194
    void *desired, int success, int failure) {
195
#define LOCK_FREE_ACTION(type) \
196
  return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
197
      *(type*)desired, success, failure)
198
  LOCK_FREE_CASES();
199
#undef LOCK_FREE_ACTION
200
  Lock *l = lock_for_pointer(ptr);
201
  lock(l);
202
  if (memcmp(ptr, expected, size) == 0) {
203
    memcpy(ptr, desired, size);
204
    unlock(l);
205
    return 1;
206
  }
207
  memcpy(expected, ptr, size);
208
  unlock(l);
209
  return 0;
210
}
211
212
/// Performs an atomic exchange operation between two pointers.  This is atomic
213
/// with respect to the target address.
214
void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
215
#define LOCK_FREE_ACTION(type) \
216
    *(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
217
        model);\
218
    return;
219
  LOCK_FREE_CASES();
220
#undef LOCK_FREE_ACTION
221
  Lock *l = lock_for_pointer(ptr);
222
  lock(l);
223
  memcpy(old, ptr, size);
224
  memcpy(ptr, val, size);
225
  unlock(l);
226
}
227
228
////////////////////////////////////////////////////////////////////////////////
229
// Where the size is known at compile time, the compiler may emit calls to
230
// specialised versions of the above functions.
231
////////////////////////////////////////////////////////////////////////////////
232
#define OPTIMISED_CASES\
233
  OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
234
  OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
235
  OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
236
  OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
237
  /* FIXME: __uint128_t isn't available on 32 bit platforms.
238
  OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
239
240
#define OPTIMISED_CASE(n, lockfree, type)\
241
type __atomic_load_##n(type *src, int model) {\
242
  if (lockfree)\
243
    return __c11_atomic_load((_Atomic(type)*)src, model);\
244
  Lock *l = lock_for_pointer(src);\
245
  lock(l);\
246
  type val = *src;\
247
  unlock(l);\
248
  return val;\
249
}
250
OPTIMISED_CASES
251
#undef OPTIMISED_CASE
252
253
#define OPTIMISED_CASE(n, lockfree, type)\
254
void  __atomic_store_##n(type *dest, type val, int model) {\
255
  if (lockfree) {\
256
    __c11_atomic_store((_Atomic(type)*)dest, val, model);\
257
    return;\
258
  }\
259
  Lock *l = lock_for_pointer(dest);\
260
  lock(l);\
261
  *dest = val;\
262
  unlock(l);\
263
  return;\
264
}
265
OPTIMISED_CASES
266
#undef OPTIMISED_CASE
267
268
#define OPTIMISED_CASE(n, lockfree, type)\
269
type __atomic_exchange_##n(type *dest, type val, int model) {\
270
  if (lockfree)\
271
    return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
272
  Lock *l = lock_for_pointer(dest);\
273
  lock(l);\
274
  type tmp = *dest;\
275
  *dest = val;\
276
  unlock(l);\
277
  return tmp;\
278
}
279
OPTIMISED_CASES
280
#undef OPTIMISED_CASE
281
282
#define OPTIMISED_CASE(n, lockfree, type)\
283
int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
284
    int success, int failure) {\
285
  if (lockfree)\
286
    return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
287
        success, failure);\
288
  Lock *l = lock_for_pointer(ptr);\
289
  lock(l);\
290
  if (*ptr == *expected) {\
291
    *ptr = desired;\
292
    unlock(l);\
293
    return 1;\
294
  }\
295
  *expected = *ptr;\
296
  unlock(l);\
297
  return 0;\
298
}
299
OPTIMISED_CASES
300
#undef OPTIMISED_CASE
301
302
////////////////////////////////////////////////////////////////////////////////
303
// Atomic read-modify-write operations for integers of various sizes.
304
////////////////////////////////////////////////////////////////////////////////
305
#define ATOMIC_RMW(n, lockfree, type, opname, op) \
306
type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
307
  if (lockfree) \
308
    return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
309
  Lock *l = lock_for_pointer(ptr);\
310
  lock(l);\
311
  type tmp = *ptr;\
312
  *ptr = tmp op val;\
313
  unlock(l);\
314
  return tmp;\
315
}
316
317
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
318
OPTIMISED_CASES
319
#undef OPTIMISED_CASE
320
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
321
OPTIMISED_CASES
322
#undef OPTIMISED_CASE
323
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
324
OPTIMISED_CASES
325
#undef OPTIMISED_CASE
326
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
327
OPTIMISED_CASES
328
#undef OPTIMISED_CASE
329
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
330
OPTIMISED_CASES
331
#undef OPTIMISED_CASE