1 | // RUN: %clang_builtins %s %librt -o %t && %run %t |
---|---|

2 | // REQUIRES: librt_has_atomic |

3 | //===-- atomic_test.c - Test support functions for atomic operations ------===// |

4 | // |

5 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |

6 | // See https://llvm.org/LICENSE.txt for license information. |

7 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |

8 | // |

9 | //===----------------------------------------------------------------------===// |

10 | // |

11 | // This file performs some simple testing of the support functions for the |

12 | // atomic builtins. All tests are single-threaded, so this is only a sanity |

13 | // check. |

14 | // |

15 | //===----------------------------------------------------------------------===// |

16 | |

17 | #include <stdbool.h> |

18 | #include <stdint.h> |

19 | #include <stdio.h> |

20 | #include <stdlib.h> |

21 | #include <string.h> |

22 | #undef NDEBUG |

23 | #include <assert.h> |

24 | |

25 | // We directly test the library atomic functions, not using the C builtins. This |

26 | // should avoid confounding factors, ensuring that we actually test the |

27 | // functions themselves, regardless of how the builtins are lowered. We need to |

28 | // use asm labels because we can't redeclare the builtins. |

29 | // Note: we need to prepend an underscore to this name for e.g. macOS. |

30 | #define _STRINGIFY(x) #x |

31 | #define STRINGIFY(x) _STRINGIFY(x) |

32 | #define EXTERNAL_NAME(name) asm(STRINGIFY(__USER_LABEL_PREFIX__) #name) |

33 | |

34 | bool __atomic_is_lock_free_c(size_t size, void *ptr) |

35 | EXTERNAL_NAME(__atomic_is_lock_free); |

36 | |

37 | void __atomic_load_c(int size, void *src, void *dest, |

38 | int model) EXTERNAL_NAME(__atomic_load); |

39 | |

40 | uint8_t __atomic_load_1(uint8_t *src, int model); |

41 | uint16_t __atomic_load_2(uint16_t *src, int model); |

42 | uint32_t __atomic_load_4(uint32_t *src, int model); |

43 | uint64_t __atomic_load_8(uint64_t *src, int model); |

44 | |

45 | void __atomic_store_c(int size, void *dest, const void *src, |

46 | int model) EXTERNAL_NAME(__atomic_store); |

47 | |

48 | void __atomic_store_1(uint8_t *dest, uint8_t val, int model); |

49 | void __atomic_store_2(uint16_t *dest, uint16_t val, int model); |

50 | void __atomic_store_4(uint32_t *dest, uint32_t val, int model); |

51 | void __atomic_store_8(uint64_t *dest, uint64_t val, int model); |

52 | |

53 | void __atomic_exchange_c(int size, void *ptr, const void *val, void *old, |

54 | int model) EXTERNAL_NAME(__atomic_exchange); |

55 | |

56 | uint8_t __atomic_exchange_1(uint8_t *dest, uint8_t val, int model); |

57 | uint16_t __atomic_exchange_2(uint16_t *dest, uint16_t val, int model); |

58 | uint32_t __atomic_exchange_4(uint32_t *dest, uint32_t val, int model); |

59 | uint64_t __atomic_exchange_8(uint64_t *dest, uint64_t val, int model); |

60 | |

61 | int __atomic_compare_exchange_c(int size, void *ptr, void *expected, |

62 | const void *desired, int success, int failure) |

63 | EXTERNAL_NAME(__atomic_compare_exchange); |

64 | |

65 | bool __atomic_compare_exchange_1(uint8_t *ptr, uint8_t *expected, |

66 | uint8_t desired, int success, int failure); |

67 | bool __atomic_compare_exchange_2(uint16_t *ptr, uint16_t *expected, |

68 | uint16_t desired, int success, int failure); |

69 | bool __atomic_compare_exchange_4(uint32_t *ptr, uint32_t *expected, |

70 | uint32_t desired, int success, int failure); |

71 | bool __atomic_compare_exchange_8(uint64_t *ptr, uint64_t *expected, |

72 | uint64_t desired, int success, int failure); |

73 | |

74 | uint8_t __atomic_fetch_add_1(uint8_t *ptr, uint8_t val, int model); |

75 | uint16_t __atomic_fetch_add_2(uint16_t *ptr, uint16_t val, int model); |

76 | uint32_t __atomic_fetch_add_4(uint32_t *ptr, uint32_t val, int model); |

77 | uint64_t __atomic_fetch_add_8(uint64_t *ptr, uint64_t val, int model); |

78 | |

79 | uint8_t __atomic_fetch_sub_1(uint8_t *ptr, uint8_t val, int model); |

80 | uint16_t __atomic_fetch_sub_2(uint16_t *ptr, uint16_t val, int model); |

81 | uint32_t __atomic_fetch_sub_4(uint32_t *ptr, uint32_t val, int model); |

82 | uint64_t __atomic_fetch_sub_8(uint64_t *ptr, uint64_t val, int model); |

83 | |

84 | uint8_t __atomic_fetch_and_1(uint8_t *ptr, uint8_t val, int model); |

85 | uint16_t __atomic_fetch_and_2(uint16_t *ptr, uint16_t val, int model); |

86 | uint32_t __atomic_fetch_and_4(uint32_t *ptr, uint32_t val, int model); |

87 | uint64_t __atomic_fetch_and_8(uint64_t *ptr, uint64_t val, int model); |

88 | |

89 | uint8_t __atomic_fetch_or_1(uint8_t *ptr, uint8_t val, int model); |

90 | uint16_t __atomic_fetch_or_2(uint16_t *ptr, uint16_t val, int model); |

91 | uint32_t __atomic_fetch_or_4(uint32_t *ptr, uint32_t val, int model); |

92 | uint64_t __atomic_fetch_or_8(uint64_t *ptr, uint64_t val, int model); |

93 | |

94 | uint8_t __atomic_fetch_xor_1(uint8_t *ptr, uint8_t val, int model); |

95 | uint16_t __atomic_fetch_xor_2(uint16_t *ptr, uint16_t val, int model); |

96 | uint32_t __atomic_fetch_xor_4(uint32_t *ptr, uint32_t val, int model); |

97 | uint64_t __atomic_fetch_xor_8(uint64_t *ptr, uint64_t val, int model); |

98 | |

99 | // We conditionally test the *_16 atomic function variants based on the same |

100 | // condition that compiler_rt (atomic.c) uses to conditionally generate them. |

101 | // Currently atomic.c tests if __SIZEOF_INT128__ is defined (which can be the |

102 | // case on 32-bit platforms, by using -fforce-enable-int128), instead of using |

103 | // CRT_HAS_128BIT. |

104 | |

105 | #ifdef __SIZEOF_INT128__ |

106 | #define TEST_16 |

107 | #endif |

108 | |

109 | #ifdef TEST_16 |

110 | typedef __uint128_t uint128_t; |

111 | typedef uint128_t maxuint_t; |

112 | uint128_t __atomic_load_16(uint128_t *src, int model); |

113 | void __atomic_store_16(uint128_t *dest, uint128_t val, int model); |

114 | uint128_t __atomic_exchange_16(uint128_t *dest, uint128_t val, int model); |

115 | bool __atomic_compare_exchange_16(uint128_t *ptr, uint128_t *expected, |

116 | uint128_t desired, int success, int failure); |

117 | uint128_t __atomic_fetch_add_16(uint128_t *ptr, uint128_t val, int model); |

118 | uint128_t __atomic_fetch_sub_16(uint128_t *ptr, uint128_t val, int model); |

119 | uint128_t __atomic_fetch_and_16(uint128_t *ptr, uint128_t val, int model); |

120 | uint128_t __atomic_fetch_or_16(uint128_t *ptr, uint128_t val, int model); |

121 | uint128_t __atomic_fetch_xor_16(uint128_t *ptr, uint128_t val, int model); |

122 | #else |

123 | typedef uint64_t maxuint_t; |

124 | #endif |

125 | |

126 | #define U8(value) ((uint8_t)(value)) |

127 | #define U16(value) ((uint16_t)(value)) |

128 | #define U32(value) ((uint32_t)(value)) |

129 | #define U64(value) ((uint64_t)(value)) |

130 | |

131 | #ifdef TEST_16 |

132 | #define V ((((uint128_t)0x4243444546474849) << 64) | 0x4a4b4c4d4e4f5051) |

133 | #define ONES ((((uint128_t)0x0101010101010101) << 64) | 0x0101010101010101) |

134 | #else |

135 | #define V 0x4243444546474849 |

136 | #define ONES 0x0101010101010101 |

137 | #endif |

138 | |

139 | #define LEN(array) (sizeof(array) / sizeof(array[0])) |

140 | |

141 | __attribute__((aligned(16))) static const char data[] = { |

142 | 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, |

143 | 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, |

144 | 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, |

145 | 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, |

146 | }; |

147 | |

148 | uint8_t a8, b8; |

149 | uint16_t a16, b16; |

150 | uint32_t a32, b32; |

151 | uint64_t a64, b64; |

152 | #ifdef TEST_16 |

153 | uint128_t a128, b128; |

154 | #endif |

155 | |

156 | void set_a_values(maxuint_t value) { |

157 | a8 = U8(value); |

158 | a16 = U16(value); |

159 | a32 = U32(value); |

160 | a64 = U64(value); |

161 | #ifdef TEST_16 |

162 | a128 = value; |

163 | #endif |

164 | } |

165 | |

166 | void set_b_values(maxuint_t value) { |

167 | b8 = U8(value); |

168 | b16 = U16(value); |

169 | b32 = U32(value); |

170 | b64 = U64(value); |

171 | #ifdef TEST_16 |

172 | b128 = value; |

173 | #endif |

174 | } |

175 | |

176 | void test_loads(void) { |

177 | static int atomic_load_models[] = { |

178 | __ATOMIC_RELAXED, |

179 | __ATOMIC_CONSUME, |

180 | __ATOMIC_ACQUIRE, |

181 | __ATOMIC_SEQ_CST, |

182 | }; |

183 | |

184 | for (int m = 0; m < LEN(atomic_load_models); m++) { |

185 | int model = atomic_load_models[m]; |

186 | |

187 | // Test with aligned data. |

188 | for (int n = 1; n <= LEN(data); n++) { |

189 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |

190 | __atomic_load_c(n, data, dst, model); |

191 | if (memcmp(dst, data, n) != 0) |

192 | abort(); |

193 | } |

194 | |

195 | // Test with unaligned data. |

196 | for (int n = 1; n < LEN(data); n++) { |

197 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |

198 | __atomic_load_c(n, data + 1, dst + 1, model); |

199 | if (memcmp(dst + 1, data + 1, n) != 0) |

200 | abort(); |

201 | } |

202 | |

203 | set_a_values(V + m); |

204 | if (__atomic_load_1(&a8, model) != U8(V + m)) |

205 | abort(); |

206 | if (__atomic_load_2(&a16, model) != U16(V + m)) |

207 | abort(); |

208 | if (__atomic_load_4(&a32, model) != U32(V + m)) |

209 | abort(); |

210 | if (__atomic_load_8(&a64, model) != U64(V + m)) |

211 | abort(); |

212 | #ifdef TEST_16 |

213 | if (__atomic_load_16(&a128, model) != V + m) |

214 | abort(); |

215 | #endif |

216 | } |

217 | } |

218 | |

219 | void test_stores(void) { |

220 | static int atomic_store_models[] = { |

221 | __ATOMIC_RELAXED, |

222 | __ATOMIC_RELEASE, |

223 | __ATOMIC_SEQ_CST, |

224 | }; |

225 | |

226 | for (int m = 0; m < LEN(atomic_store_models); m++) { |

227 | int model = atomic_store_models[m]; |

228 | |

229 | // Test with aligned data. |

230 | for (int n = 1; n <= LEN(data); n++) { |

231 | __attribute__((aligned(16))) char dst[LEN(data)]; |

232 | __atomic_store_c(n, dst, data, model); |

233 | if (memcmp(data, dst, n) != 0) |

234 | abort(); |

235 | } |

236 | |

237 | // Test with unaligned data. |

238 | for (int n = 1; n < LEN(data); n++) { |

239 | __attribute__((aligned(16))) char dst[LEN(data)]; |

240 | __atomic_store_c(n, dst + 1, data + 1, model); |

241 | if (memcmp(data + 1, dst + 1, n) != 0) |

242 | abort(); |

243 | } |

244 | |

245 | __atomic_store_1(&a8, U8(V + m), model); |

246 | if (a8 != U8(V + m)) |

247 | abort(); |

248 | __atomic_store_2(&a16, U16(V + m), model); |

249 | if (a16 != U16(V + m)) |

250 | abort(); |

251 | __atomic_store_4(&a32, U32(V + m), model); |

252 | if (a32 != U32(V + m)) |

253 | abort(); |

254 | __atomic_store_8(&a64, U64(V + m), model); |

255 | if (a64 != U64(V + m)) |

256 | abort(); |

257 | #ifdef TEST_16 |

258 | __atomic_store_16(&a128, V + m, model); |

259 | if (a128 != V + m) |

260 | abort(); |

261 | #endif |

262 | } |

263 | } |

264 | |

265 | void test_exchanges(void) { |

266 | static int atomic_exchange_models[] = { |

267 | __ATOMIC_RELAXED, |

268 | __ATOMIC_ACQUIRE, |

269 | __ATOMIC_RELEASE, |

270 | __ATOMIC_ACQ_REL, |

271 | __ATOMIC_SEQ_CST, |

272 | }; |

273 | |

274 | set_a_values(V); |

275 | |

276 | for (int m = 0; m < LEN(atomic_exchange_models); m++) { |

277 | int model = atomic_exchange_models[m]; |

278 | |

279 | // Test with aligned data. |

280 | for (int n = 1; n <= LEN(data); n++) { |

281 | __attribute__((aligned(16))) char dst[LEN(data)]; |

282 | __attribute__((aligned(16))) char old[LEN(data)]; |

283 | for (int i = 0; i < LEN(dst); i++) |

284 | dst[i] = i + m; |

285 | __atomic_exchange_c(n, dst, data, old, model); |

286 | for (int i = 0; i < n; i++) { |

287 | if (dst[i] != 0x10 + i || old[i] != i + m) |

288 | abort(); |

289 | } |

290 | } |

291 | |

292 | // Test with unaligned data. |

293 | for (int n = 1; n < LEN(data); n++) { |

294 | __attribute__((aligned(16))) char dst[LEN(data)]; |

295 | __attribute__((aligned(16))) char old[LEN(data)]; |

296 | for (int i = 1; i < LEN(dst); i++) |

297 | dst[i] = i - 1 + m; |

298 | __atomic_exchange_c(n, dst + 1, data + 1, old + 1, model); |

299 | for (int i = 1; i < n; i++) { |

300 | if (dst[i] != 0x10 + i || old[i] != i - 1 + m) |

301 | abort(); |

302 | } |

303 | } |

304 | |

305 | if (__atomic_exchange_1(&a8, U8(V + m + 1), model) != U8(V + m)) |

306 | abort(); |

307 | if (__atomic_exchange_2(&a16, U16(V + m + 1), model) != U16(V + m)) |

308 | abort(); |

309 | if (__atomic_exchange_4(&a32, U32(V + m + 1), model) != U32(V + m)) |

310 | abort(); |

311 | if (__atomic_exchange_8(&a64, U64(V + m + 1), model) != U64(V + m)) |

312 | abort(); |

313 | #ifdef TEST_16 |

314 | if (__atomic_exchange_16(&a128, V + m + 1, model) != V + m) |

315 | abort(); |

316 | #endif |

317 | } |

318 | } |

319 | |

320 | void test_compare_exchanges(void) { |

321 | static int atomic_compare_exchange_models[] = { |

322 | __ATOMIC_RELAXED, |

323 | __ATOMIC_CONSUME, |

324 | __ATOMIC_ACQUIRE, |

325 | __ATOMIC_SEQ_CST, |

326 | __ATOMIC_RELEASE, |

327 | __ATOMIC_ACQ_REL, |

328 | }; |

329 | |

330 | for (int m1 = 0; m1 < LEN(atomic_compare_exchange_models); m1++) { |

331 | // Skip the last two: __ATOMIC_RELEASE and __ATOMIC_ACQ_REL. |

332 | // See <http://wg21.link/p0418> for details. |

333 | for (int m2 = 0; m2 < LEN(atomic_compare_exchange_models) - 2; m2++) { |

334 | int m_succ = atomic_compare_exchange_models[m1]; |

335 | int m_fail = atomic_compare_exchange_models[m2]; |

336 | |

337 | // Test with aligned data. |

338 | for (int n = 1; n <= LEN(data); n++) { |

339 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |

340 | __attribute__((aligned(16))) char exp[LEN(data)] = {0}; |

341 | if (!__atomic_compare_exchange_c(n, dst, exp, data, m_succ, m_fail)) |

342 | abort(); |

343 | if (memcmp(dst, data, n) != 0) |

344 | abort(); |

345 | if (__atomic_compare_exchange_c(n, dst, exp, data, m_succ, m_fail)) |

346 | abort(); |

347 | if (memcmp(exp, data, n) != 0) |

348 | abort(); |

349 | } |

350 | |

351 | // Test with unaligned data. |

352 | for (int n = 1; n < LEN(data); n++) { |

353 | __attribute__((aligned(16))) char dst[LEN(data)] = {0}; |

354 | __attribute__((aligned(16))) char exp[LEN(data)] = {0}; |

355 | if (!__atomic_compare_exchange_c(n, dst + 1, exp + 1, data + 1, |

356 | m_succ, m_fail)) |

357 | abort(); |

358 | if (memcmp(dst + 1, data + 1, n) != 0) |

359 | abort(); |

360 | if (__atomic_compare_exchange_c(n, dst + 1, exp + 1, data + 1, m_succ, |

361 | m_fail)) |

362 | abort(); |

363 | if (memcmp(exp + 1, data + 1, n) != 0) |

364 | abort(); |

365 | } |

366 | |

367 | set_a_values(ONES); |

368 | set_b_values(ONES * 2); |

369 | |

370 | if (__atomic_compare_exchange_1(&a8, &b8, U8(V + m1), m_succ, m_fail)) |

371 | abort(); |

372 | if (a8 != U8(ONES) || b8 != U8(ONES)) |

373 | abort(); |

374 | if (!__atomic_compare_exchange_1(&a8, &b8, U8(V + m1), m_succ, m_fail)) |

375 | abort(); |

376 | if (a8 != U8(V + m1) || b8 != U8(ONES)) |

377 | abort(); |

378 | |

379 | if (__atomic_compare_exchange_2(&a16, &b16, U16(V + m1), m_succ, m_fail)) |

380 | abort(); |

381 | if (a16 != U16(ONES) || b16 != U16(ONES)) |

382 | abort(); |

383 | if (!__atomic_compare_exchange_2(&a16, &b16, U16(V + m1), m_succ, m_fail)) |

384 | abort(); |

385 | if (a16 != U16(V + m1) || b16 != U16(ONES)) |

386 | abort(); |

387 | |

388 | if (__atomic_compare_exchange_4(&a32, &b32, U32(V + m1), m_succ, m_fail)) |

389 | abort(); |

390 | if (a32 != U32(ONES) || b32 != U32(ONES)) |

391 | abort(); |

392 | if (!__atomic_compare_exchange_4(&a32, &b32, U32(V + m1), m_succ, m_fail)) |

393 | abort(); |

394 | if (a32 != U32(V + m1) || b32 != U32(ONES)) |

395 | abort(); |

396 | |

397 | if (__atomic_compare_exchange_8(&a64, &b64, U64(V + m1), m_succ, m_fail)) |

398 | abort(); |

399 | if (a64 != U64(ONES) || b64 != U64(ONES)) |

400 | abort(); |

401 | if (!__atomic_compare_exchange_8(&a64, &b64, U64(V + m1), m_succ, m_fail)) |

402 | abort(); |

403 | if (a64 != U64(V + m1) || b64 != U64(ONES)) |

404 | abort(); |

405 | |

406 | #ifdef TEST_16 |

407 | if (__atomic_compare_exchange_16(&a128, &b128, V + m1, m_succ, m_fail)) |

408 | abort(); |

409 | if (a128 != ONES || b128 != ONES) |

410 | abort(); |

411 | if (!__atomic_compare_exchange_16(&a128, &b128, V + m1, m_succ, m_fail)) |

412 | abort(); |

413 | if (a128 != V + m1 || b128 != ONES) |

414 | abort(); |

415 | #endif |

416 | } |

417 | } |

418 | } |

419 | |

420 | void test_fetch_op(void) { |

421 | static int atomic_fetch_models[] = { |

422 | __ATOMIC_RELAXED, |

423 | __ATOMIC_CONSUME, |

424 | __ATOMIC_ACQUIRE, |

425 | __ATOMIC_RELEASE, |

426 | __ATOMIC_ACQ_REL, |

427 | __ATOMIC_SEQ_CST, |

428 | }; |

429 | |

430 | for (int m = 0; m < LEN(atomic_fetch_models); m++) { |

431 | int model = atomic_fetch_models[m]; |

432 | |

433 | // Fetch add. |

434 | |

435 | set_a_values(V + m); |

436 | set_b_values(0); |

437 | b8 = __atomic_fetch_add_1(&a8, U8(ONES), model); |

438 | if (b8 != U8(V + m) || a8 != U8(V + m + ONES)) |

439 | abort(); |

440 | b16 = __atomic_fetch_add_2(&a16, U16(ONES), model); |

441 | if (b16 != U16(V + m) || a16 != U16(V + m + ONES)) |

442 | abort(); |

443 | b32 = __atomic_fetch_add_4(&a32, U32(ONES), model); |

444 | if (b32 != U32(V + m) || a32 != U32(V + m + ONES)) |

445 | abort(); |

446 | b64 = __atomic_fetch_add_8(&a64, U64(ONES), model); |

447 | if (b64 != U64(V + m) || a64 != U64(V + m + ONES)) |

448 | abort(); |

449 | #ifdef TEST_16 |

450 | b128 = __atomic_fetch_add_16(&a128, ONES, model); |

451 | if (b128 != V + m || a128 != V + m + ONES) |

452 | abort(); |

453 | #endif |

454 | |

455 | // Fetch sub. |

456 | |

457 | set_a_values(V + m); |

458 | set_b_values(0); |

459 | b8 = __atomic_fetch_sub_1(&a8, U8(ONES), model); |

460 | if (b8 != U8(V + m) || a8 != U8(V + m - ONES)) |

461 | abort(); |

462 | b16 = __atomic_fetch_sub_2(&a16, U16(ONES), model); |

463 | if (b16 != U16(V + m) || a16 != U16(V + m - ONES)) |

464 | abort(); |

465 | b32 = __atomic_fetch_sub_4(&a32, U32(ONES), model); |

466 | if (b32 != U32(V + m) || a32 != U32(V + m - ONES)) |

467 | abort(); |

468 | b64 = __atomic_fetch_sub_8(&a64, U64(ONES), model); |

469 | if (b64 != U64(V + m) || a64 != U64(V + m - ONES)) |

470 | abort(); |

471 | #ifdef TEST_16 |

472 | b128 = __atomic_fetch_sub_16(&a128, ONES, model); |

473 | if (b128 != V + m || a128 != V + m - ONES) |

474 | abort(); |

475 | #endif |

476 | |

477 | // Fetch and. |

478 | |

479 | set_a_values(V + m); |

480 | set_b_values(0); |

481 | b8 = __atomic_fetch_and_1(&a8, U8(V + m), model); |

482 | if (b8 != U8(V + m) || a8 != U8(V + m)) |

483 | abort(); |

484 | b16 = __atomic_fetch_and_2(&a16, U16(V + m), model); |

485 | if (b16 != U16(V + m) || a16 != U16(V + m)) |

486 | abort(); |

487 | b32 = __atomic_fetch_and_4(&a32, U32(V + m), model); |

488 | if (b32 != U32(V + m) || a32 != U32(V + m)) |

489 | abort(); |

490 | b64 = __atomic_fetch_and_8(&a64, U64(V + m), model); |

491 | if (b64 != U64(V + m) || a64 != U64(V + m)) |

492 | abort(); |

493 | #ifdef TEST_16 |

494 | b128 = __atomic_fetch_and_16(&a128, V + m, model); |

495 | if (b128 != V + m || a128 != V + m) |

496 | abort(); |

497 | #endif |

498 | |

499 | // Fetch or. |

500 | |

501 | set_a_values(V + m); |

502 | set_b_values(0); |

503 | b8 = __atomic_fetch_or_1(&a8, U8(ONES), model); |

504 | if (b8 != U8(V + m) || a8 != U8((V + m) | ONES)) |

505 | abort(); |

506 | b16 = __atomic_fetch_or_2(&a16, U16(ONES), model); |

507 | if (b16 != U16(V + m) || a16 != U16((V + m) | ONES)) |

508 | abort(); |

509 | b32 = __atomic_fetch_or_4(&a32, U32(ONES), model); |

510 | if (b32 != U32(V + m) || a32 != U32((V + m) | ONES)) |

511 | abort(); |

512 | b64 = __atomic_fetch_or_8(&a64, U64(ONES), model); |

513 | if (b64 != U64(V + m) || a64 != U64((V + m) | ONES)) |

514 | abort(); |

515 | #ifdef TEST_16 |

516 | b128 = __atomic_fetch_or_16(&a128, ONES, model); |

517 | if (b128 != V + m || a128 != ((V + m) | ONES)) |

518 | abort(); |

519 | #endif |

520 | |

521 | // Fetch xor. |

522 | |

523 | set_a_values(V + m); |

524 | set_b_values(0); |

525 | b8 = __atomic_fetch_xor_1(&a8, U8(ONES), model); |

526 | if (b8 != U8(V + m) || a8 != U8((V + m) ^ ONES)) |

527 | abort(); |

528 | b16 = __atomic_fetch_xor_2(&a16, U16(ONES), model); |

529 | if (b16 != U16(V + m) || a16 != U16((V + m) ^ ONES)) |

530 | abort(); |

531 | b32 = __atomic_fetch_xor_4(&a32, U32(ONES), model); |

532 | if (b32 != U32(V + m) || a32 != U32((V + m) ^ ONES)) |

533 | abort(); |

534 | b64 = __atomic_fetch_xor_8(&a64, U64(ONES), model); |

535 | if (b64 != U64(V + m) || a64 != U64((V + m) ^ ONES)) |

536 | abort(); |

537 | #ifdef TEST_16 |

538 | b128 = __atomic_fetch_xor_16(&a128, ONES, model); |

539 | if (b128 != (V + m) || a128 != ((V + m) ^ ONES)) |

540 | abort(); |

541 | #endif |

542 | |

543 | // Check signed integer overflow behavior |

544 | |

545 | set_a_values(V + m); |

546 | __atomic_fetch_add_1(&a8, U8(V), model); |

547 | if (a8 != U8(V * 2 + m)) |

548 | abort(); |

549 | __atomic_fetch_sub_1(&a8, U8(V), model); |

550 | if (a8 != U8(V + m)) |

551 | abort(); |

552 | __atomic_fetch_add_2(&a16, U16(V), model); |

553 | if (a16 != U16(V * 2 + m)) |

554 | abort(); |

555 | __atomic_fetch_sub_2(&a16, U16(V), model); |

556 | if (a16 != U16(V + m)) |

557 | abort(); |

558 | __atomic_fetch_add_4(&a32, U32(V), model); |

559 | if (a32 != U32(V * 2 + m)) |

560 | abort(); |

561 | __atomic_fetch_sub_4(&a32, U32(V), model); |

562 | if (a32 != U32(V + m)) |

563 | abort(); |

564 | __atomic_fetch_add_8(&a64, U64(V), model); |

565 | if (a64 != U64(V * 2 + m)) |

566 | abort(); |

567 | __atomic_fetch_sub_8(&a64, U64(V), model); |

568 | if (a64 != U64(V + m)) |

569 | abort(); |

570 | #ifdef TEST_16 |

571 | __atomic_fetch_add_16(&a128, V, model); |

572 | if (a128 != V * 2 + m) |

573 | abort(); |

574 | __atomic_fetch_sub_16(&a128, V, model); |

575 | if (a128 != V + m) |

576 | abort(); |

577 | #endif |

578 | } |

579 | } |

580 | |

581 | void test_is_lock_free(void) { |

582 | // The result of __atomic_is_lock_free is architecture dependent, so we only |

583 | // check for a true return value for the sizes where we know that at compile |

584 | // time that they are supported. If __atomic_always_lock_free() returns false |

585 | // for a given size, we can only check that __atomic_is_lock_free() returns |

586 | // false for unaligned values. |

587 | // Note: This assumption will have to be revisited when we support an |

588 | // architecture that allows for unaligned atomics. |

589 | // XXX: Do any architectures report true for unaligned atomics? |

590 | |

591 | // All atomic.c implementations fall back to the non-specialized case for |

592 | // size=0, so despite the operation being a no-op, they still take locks and |

593 | // therefore __atomic_is_lock_free should return false. |

594 | assert(!__atomic_is_lock_free_c(0, NULL) && "size zero should never be lock-free"); |

595 | assert(!__atomic_is_lock_free_c(0, (void *)8) && "size zero should never be lock-free"); |

596 | |

597 | if (__atomic_always_lock_free(1, 0)) { |

598 | assert(__atomic_is_lock_free_c(1, NULL) && "aligned size=1 should always be lock-free"); |

599 | assert(__atomic_is_lock_free_c(1, (void *)1) && "aligned size=1 should always be lock-free"); |

600 | } |

601 | |

602 | if (__atomic_always_lock_free(2, 0)) { |

603 | assert(__atomic_is_lock_free_c(2, NULL) && "aligned size=2 should always be lock-free"); |

604 | assert(__atomic_is_lock_free_c(2, (void *)2) && "aligned size=2 should always be lock-free"); |

605 | } |

606 | assert(!__atomic_is_lock_free_c(2, (void *)1) && "unaligned size=2 should not be lock-free"); |

607 | |

608 | if (__atomic_always_lock_free(4, 0)) { |

609 | assert(__atomic_is_lock_free_c(4, NULL) && "aligned size=4 should always be lock-free"); |

610 | assert(__atomic_is_lock_free_c(4, (void *)4) && "aligned size=4 should always be lock-free"); |

611 | } |

612 | assert(!__atomic_is_lock_free_c(4, (void *)3) && "unaligned size=4 should not be lock-free"); |

613 | assert(!__atomic_is_lock_free_c(4, (void *)2) && "unaligned size=4 should not be lock-free"); |

614 | assert(!__atomic_is_lock_free_c(4, (void *)1) && "unaligned size=4 should not be lock-free"); |

615 | |

616 | if (__atomic_always_lock_free(8, 0)) { |

617 | assert(__atomic_is_lock_free_c(8, NULL) && "aligned size=8 should always be lock-free"); |

618 | assert(__atomic_is_lock_free_c(8, (void *)8) && "aligned size=8 should always be lock-free"); |

619 | } |

620 | assert(!__atomic_is_lock_free_c(8, (void *)7) && "unaligned size=8 should not be lock-free"); |

621 | assert(!__atomic_is_lock_free_c(8, (void *)4) && "unaligned size=8 should not be lock-free"); |

622 | assert(!__atomic_is_lock_free_c(8, (void *)2) && "unaligned size=8 should not be lock-free"); |

623 | assert(!__atomic_is_lock_free_c(8, (void *)1) && "unaligned size=8 should not be lock-free"); |

624 | |

625 | if (__atomic_always_lock_free(16, 0)) { |

626 | assert(__atomic_is_lock_free_c(16, NULL) && "aligned size=16 should always be lock-free"); |

627 | assert(__atomic_is_lock_free_c(16, (void *)16) && "aligned size=16 should always be lock-free"); |

628 | } |

629 | assert(!__atomic_is_lock_free_c(16, (void *)15) && "unaligned size=16 should not be lock-free"); |

630 | assert(!__atomic_is_lock_free_c(16, (void *)8) && "unaligned size=16 should not be lock-free"); |

631 | assert(!__atomic_is_lock_free_c(16, (void *)4) && "unaligned size=16 should not be lock-free"); |

632 | assert(!__atomic_is_lock_free_c(16, (void *)2) && "unaligned size=16 should not be lock-free"); |

633 | assert(!__atomic_is_lock_free_c(16, (void *)1) && "unaligned size=16 should not be lock-free"); |

634 | |

635 | // In the current implementation > 16 bytes are never lock-free: |

636 | assert(!__atomic_is_lock_free_c(32, NULL) && "aligned size=32 should not be lock-free"); |

637 | assert(!__atomic_is_lock_free_c(32, (void*)32) && "aligned size=32 should not be lock-free"); |

638 | assert(!__atomic_is_lock_free_c(32, (void*)31) && "unaligned size=32 should not be lock-free"); |

639 | |

640 | // We also don't support non-power-of-two sizes: |

641 | assert(!__atomic_is_lock_free_c(3, NULL) && "aligned size=3 should not be lock-free"); |

642 | assert(!__atomic_is_lock_free_c(5, NULL) && "aligned size=5 should not be lock-free"); |

643 | assert(!__atomic_is_lock_free_c(6, NULL) && "aligned size=6 should not be lock-free"); |

644 | assert(!__atomic_is_lock_free_c(7, NULL) && "aligned size=7 should not be lock-free"); |

645 | assert(!__atomic_is_lock_free_c(9, NULL) && "aligned size=9 should not be lock-free"); |

646 | assert(!__atomic_is_lock_free_c(10, NULL) && "aligned size=10 should not be lock-free"); |

647 | assert(!__atomic_is_lock_free_c(11, NULL) && "aligned size=11 should not be lock-free"); |

648 | assert(!__atomic_is_lock_free_c(12, NULL) && "aligned size=12 should not be lock-free"); |

649 | assert(!__atomic_is_lock_free_c(13, NULL) && "aligned size=13 should not be lock-free"); |

650 | assert(!__atomic_is_lock_free_c(14, NULL) && "aligned size=14 should not be lock-free"); |

651 | assert(!__atomic_is_lock_free_c(15, NULL) && "aligned size=15 should not be lock-free"); |

652 | assert(!__atomic_is_lock_free_c(17, NULL) && "aligned size=17 should not be lock-free"); |

653 | } |

654 | |

655 | int main() { |

656 | test_loads(); |

657 | test_stores(); |

658 | test_exchanges(); |

659 | test_compare_exchanges(); |

660 | test_fetch_op(); |

661 | test_is_lock_free(); |

662 | return 0; |

663 | } |

664 |