1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/macros.h"
6 #include "src/base/platform/mutex.h"
7 #include "src/execution/arguments-inl.h"
8 #include "src/heap/factory.h"
9 #include "src/logging/counters.h"
10 #include "src/numbers/conversions-inl.h"
11 #include "src/objects/js-array-buffer-inl.h"
12 #include "src/runtime/runtime-utils.h"
13
14 // Implement Atomic accesses to ArrayBuffers and SharedArrayBuffers.
15 // https://tc39.es/ecma262/#sec-atomics
16
17 namespace v8 {
18 namespace internal {
19
20 // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
21 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
22 V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
23
24 namespace {
25
26 #if V8_CC_GNU
27
28 // GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms
29 // can be slow. Good to know, but we don't have a choice.
30 #ifdef V8_TARGET_ARCH_32_BIT
31 #pragma GCC diagnostic push
32 #pragma GCC diagnostic ignored "-Wpragmas"
33 #pragma GCC diagnostic ignored "-Watomic-alignment"
34 #endif // V8_TARGET_ARCH_32_BIT
35
36 template <typename T>
LoadSeqCst(T * p)37 inline T LoadSeqCst(T* p) {
38 return __atomic_load_n(p, __ATOMIC_SEQ_CST);
39 }
40
41 template <typename T>
StoreSeqCst(T * p,T value)42 inline void StoreSeqCst(T* p, T value) {
43 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
44 }
45
46 template <typename T>
ExchangeSeqCst(T * p,T value)47 inline T ExchangeSeqCst(T* p, T value) {
48 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
49 }
50
51 template <typename T>
CompareExchangeSeqCst(T * p,T oldval,T newval)52 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
53 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
54 __ATOMIC_SEQ_CST);
55 return oldval;
56 }
57
58 template <typename T>
AddSeqCst(T * p,T value)59 inline T AddSeqCst(T* p, T value) {
60 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
61 }
62
63 template <typename T>
SubSeqCst(T * p,T value)64 inline T SubSeqCst(T* p, T value) {
65 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
66 }
67
68 template <typename T>
AndSeqCst(T * p,T value)69 inline T AndSeqCst(T* p, T value) {
70 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
71 }
72
73 template <typename T>
OrSeqCst(T * p,T value)74 inline T OrSeqCst(T* p, T value) {
75 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
76 }
77
78 template <typename T>
XorSeqCst(T * p,T value)79 inline T XorSeqCst(T* p, T value) {
80 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
81 }
82
83 #ifdef V8_TARGET_ARCH_32_BIT
84 #pragma GCC diagnostic pop
85 #endif // V8_TARGET_ARCH_32_BIT
86
87 #elif V8_CC_MSVC
88
89 #define InterlockedExchange32 _InterlockedExchange
90 #define InterlockedCompareExchange32 _InterlockedCompareExchange
91 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
92 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
93 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
94 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
95 #define InterlockedAnd32 _InterlockedAnd
96 #define InterlockedOr64 _InterlockedOr64
97 #define InterlockedOr32 _InterlockedOr
98 #define InterlockedXor32 _InterlockedXor
99
100 #if defined(V8_HOST_ARCH_ARM64)
101 #define InterlockedExchange8 _InterlockedExchange8
102 #endif
103
104 #define ATOMIC_OPS(type, suffix, vctype) \
105 inline type ExchangeSeqCst(type* p, type value) { \
106 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
107 bit_cast<vctype>(value)); \
108 } \
109 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
110 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
111 bit_cast<vctype>(newval), \
112 bit_cast<vctype>(oldval)); \
113 } \
114 inline type AddSeqCst(type* p, type value) { \
115 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
116 bit_cast<vctype>(value)); \
117 } \
118 inline type SubSeqCst(type* p, type value) { \
119 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
120 -bit_cast<vctype>(value)); \
121 } \
122 inline type AndSeqCst(type* p, type value) { \
123 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
124 bit_cast<vctype>(value)); \
125 } \
126 inline type OrSeqCst(type* p, type value) { \
127 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
128 bit_cast<vctype>(value)); \
129 } \
130 inline type XorSeqCst(type* p, type value) { \
131 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
132 bit_cast<vctype>(value)); \
133 }
134
135 ATOMIC_OPS(int8_t, 8, char)
136 ATOMIC_OPS(uint8_t, 8, char)
137 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
138 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
139 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
140 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
141 ATOMIC_OPS(int64_t, 64, __int64)
142 ATOMIC_OPS(uint64_t, 64, __int64)
143
144 template <typename T>
145 inline T LoadSeqCst(T* p) {
146 UNREACHABLE();
147 }
148
149 template <typename T>
150 inline void StoreSeqCst(T* p, T value) {
151 UNREACHABLE();
152 }
153
154 #undef ATOMIC_OPS
155
156 #undef InterlockedExchange32
157 #undef InterlockedCompareExchange32
158 #undef InterlockedCompareExchange8
159 #undef InterlockedExchangeAdd32
160 #undef InterlockedExchangeAdd16
161 #undef InterlockedExchangeAdd8
162 #undef InterlockedAnd32
163 #undef InterlockedOr64
164 #undef InterlockedOr32
165 #undef InterlockedXor32
166
167 #if defined(V8_HOST_ARCH_ARM64)
168 #undef InterlockedExchange8
169 #endif
170
171 #else
172
173 #error Unsupported platform!
174
175 #endif
176
177 template <typename T>
178 T FromObject(Handle<Object> number);
179
180 template <>
FromObject(Handle<Object> number)181 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
182 return NumberToUint32(*number);
183 }
184
185 template <>
FromObject(Handle<Object> number)186 inline int8_t FromObject<int8_t>(Handle<Object> number) {
187 return NumberToInt32(*number);
188 }
189
190 template <>
FromObject(Handle<Object> number)191 inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
192 return NumberToUint32(*number);
193 }
194
195 template <>
FromObject(Handle<Object> number)196 inline int16_t FromObject<int16_t>(Handle<Object> number) {
197 return NumberToInt32(*number);
198 }
199
200 template <>
FromObject(Handle<Object> number)201 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
202 return NumberToUint32(*number);
203 }
204
205 template <>
FromObject(Handle<Object> number)206 inline int32_t FromObject<int32_t>(Handle<Object> number) {
207 return NumberToInt32(*number);
208 }
209
210 template <>
FromObject(Handle<Object> bigint)211 inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
212 return Handle<BigInt>::cast(bigint)->AsUint64();
213 }
214
215 template <>
FromObject(Handle<Object> bigint)216 inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
217 return Handle<BigInt>::cast(bigint)->AsInt64();
218 }
219
ToObject(Isolate * isolate,int8_t t)220 inline Object ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
221
ToObject(Isolate * isolate,uint8_t t)222 inline Object ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
223
ToObject(Isolate * isolate,int16_t t)224 inline Object ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
225
ToObject(Isolate * isolate,uint16_t t)226 inline Object ToObject(Isolate* isolate, uint16_t t) { return Smi::FromInt(t); }
227
ToObject(Isolate * isolate,int32_t t)228 inline Object ToObject(Isolate* isolate, int32_t t) {
229 return *isolate->factory()->NewNumber(t);
230 }
231
ToObject(Isolate * isolate,uint32_t t)232 inline Object ToObject(Isolate* isolate, uint32_t t) {
233 return *isolate->factory()->NewNumber(t);
234 }
235
ToObject(Isolate * isolate,int64_t t)236 inline Object ToObject(Isolate* isolate, int64_t t) {
237 return *BigInt::FromInt64(isolate, t);
238 }
239
ToObject(Isolate * isolate,uint64_t t)240 inline Object ToObject(Isolate* isolate, uint64_t t) {
241 return *BigInt::FromUint64(isolate, t);
242 }
243
244 template <typename T>
245 struct Load {
Dov8::internal::__anonf83d7ef80111::Load246 static inline Object Do(Isolate* isolate, void* buffer, size_t index) {
247 T result = LoadSeqCst(static_cast<T*>(buffer) + index);
248 return ToObject(isolate, result);
249 }
250 };
251
252 template <typename T>
253 struct Store {
Dov8::internal::__anonf83d7ef80111::Store254 static inline void Do(Isolate* isolate, void* buffer, size_t index,
255 Handle<Object> obj) {
256 T value = FromObject<T>(obj);
257 StoreSeqCst(static_cast<T*>(buffer) + index, value);
258 }
259 };
260
261 template <typename T>
262 struct Exchange {
Dov8::internal::__anonf83d7ef80111::Exchange263 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
264 Handle<Object> obj) {
265 T value = FromObject<T>(obj);
266 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
267 return ToObject(isolate, result);
268 }
269 };
270
271 template <typename T>
DoCompareExchange(Isolate * isolate,void * buffer,size_t index,Handle<Object> oldobj,Handle<Object> newobj)272 inline Object DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
273 Handle<Object> oldobj, Handle<Object> newobj) {
274 T oldval = FromObject<T>(oldobj);
275 T newval = FromObject<T>(newobj);
276 T result =
277 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
278 return ToObject(isolate, result);
279 }
280
281 template <typename T>
282 struct Add {
Dov8::internal::__anonf83d7ef80111::Add283 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
284 Handle<Object> obj) {
285 T value = FromObject<T>(obj);
286 T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
287 return ToObject(isolate, result);
288 }
289 };
290
291 template <typename T>
292 struct Sub {
Dov8::internal::__anonf83d7ef80111::Sub293 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
294 Handle<Object> obj) {
295 T value = FromObject<T>(obj);
296 T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
297 return ToObject(isolate, result);
298 }
299 };
300
301 template <typename T>
302 struct And {
Dov8::internal::__anonf83d7ef80111::And303 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
304 Handle<Object> obj) {
305 T value = FromObject<T>(obj);
306 T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
307 return ToObject(isolate, result);
308 }
309 };
310
311 template <typename T>
312 struct Or {
Dov8::internal::__anonf83d7ef80111::Or313 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
314 Handle<Object> obj) {
315 T value = FromObject<T>(obj);
316 T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
317 return ToObject(isolate, result);
318 }
319 };
320
321 template <typename T>
322 struct Xor {
Dov8::internal::__anonf83d7ef80111::Xor323 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
324 Handle<Object> obj) {
325 T value = FromObject<T>(obj);
326 T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
327 return ToObject(isolate, result);
328 }
329 };
330
331 } // anonymous namespace
332
333 // Duplicated from objects.h
334 // V has parameters (Type, type, TYPE, C type)
335 #define INTEGER_TYPED_ARRAYS(V) \
336 V(Uint8, uint8, UINT8, uint8_t) \
337 V(Int8, int8, INT8, int8_t) \
338 V(Uint16, uint16, UINT16, uint16_t) \
339 V(Int16, int16, INT16, int16_t) \
340 V(Uint32, uint32, UINT32, uint32_t) \
341 V(Int32, int32, INT32, int32_t)
342
343 #define THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, method_name) \
344 do { \
345 if (V8_UNLIKELY(sta->WasDetached())) { \
346 THROW_NEW_ERROR_RETURN_FAILURE( \
347 isolate, NewTypeError(MessageTemplate::kDetachedOperation, \
348 isolate->factory()->NewStringFromAsciiChecked( \
349 method_name))); \
350 } \
351 } while (false)
352
353 // This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer
354 // but also includes the ToInteger/ToBigInt conversion that's part of
355 // https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
356 template <template <typename> class Op>
GetModifySetValueInBuffer(RuntimeArguments args,Isolate * isolate,const char * method_name)357 Object GetModifySetValueInBuffer(RuntimeArguments args, Isolate* isolate,
358 const char* method_name) {
359 HandleScope scope(isolate);
360 DCHECK_EQ(3, args.length());
361 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
362 CONVERT_SIZE_ARG_CHECKED(index, 1);
363 CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
364
365 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
366 sta->byte_offset();
367
368 if (sta->type() >= kExternalBigInt64Array) {
369 Handle<BigInt> bigint;
370 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
371 BigInt::FromObject(isolate, value_obj));
372
373 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, method_name);
374
375 CHECK_LT(index, sta->length());
376 if (sta->type() == kExternalBigInt64Array) {
377 return Op<int64_t>::Do(isolate, source, index, bigint);
378 }
379 DCHECK(sta->type() == kExternalBigUint64Array);
380 return Op<uint64_t>::Do(isolate, source, index, bigint);
381 }
382
383 Handle<Object> value;
384 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
385 Object::ToInteger(isolate, value_obj));
386
387 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, method_name);
388
389 CHECK_LT(index, sta->length());
390
391 switch (sta->type()) {
392 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
393 case kExternal##Type##Array: \
394 return Op<ctype>::Do(isolate, source, index, value);
395
396 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
397 #undef TYPED_ARRAY_CASE
398
399 default:
400 break;
401 }
402
403 UNREACHABLE();
404 }
405
RUNTIME_FUNCTION(Runtime_AtomicsLoad64)406 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
407 HandleScope scope(isolate);
408 DCHECK_EQ(2, args.length());
409 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
410 CONVERT_SIZE_ARG_CHECKED(index, 1);
411
412 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
413 sta->byte_offset();
414
415 DCHECK(sta->type() == kExternalBigInt64Array ||
416 sta->type() == kExternalBigUint64Array);
417 DCHECK(!sta->WasDetached());
418 CHECK_LT(index, sta->length());
419 if (sta->type() == kExternalBigInt64Array) {
420 return Load<int64_t>::Do(isolate, source, index);
421 }
422 DCHECK(sta->type() == kExternalBigUint64Array);
423 return Load<uint64_t>::Do(isolate, source, index);
424 }
425
RUNTIME_FUNCTION(Runtime_AtomicsStore64)426 RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
427 HandleScope scope(isolate);
428 DCHECK_EQ(3, args.length());
429 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
430 CONVERT_SIZE_ARG_CHECKED(index, 1);
431 CONVERT_ARG_HANDLE_CHECKED(Object, value_obj, 2);
432
433 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
434 sta->byte_offset();
435
436 Handle<BigInt> bigint;
437 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
438 BigInt::FromObject(isolate, value_obj));
439
440 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, "Atomics.store");
441
442 DCHECK(sta->type() == kExternalBigInt64Array ||
443 sta->type() == kExternalBigUint64Array);
444 CHECK_LT(index, sta->length());
445 if (sta->type() == kExternalBigInt64Array) {
446 Store<int64_t>::Do(isolate, source, index, bigint);
447 return *bigint;
448 }
449 DCHECK(sta->type() == kExternalBigUint64Array);
450 Store<uint64_t>::Do(isolate, source, index, bigint);
451 return *bigint;
452 }
453
RUNTIME_FUNCTION(Runtime_AtomicsExchange)454 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
455 return GetModifySetValueInBuffer<Exchange>(args, isolate, "Atomics.exchange");
456 }
457
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange)458 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
459 HandleScope scope(isolate);
460 DCHECK_EQ(4, args.length());
461 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
462 CONVERT_SIZE_ARG_CHECKED(index, 1);
463 CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
464 CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
465 CHECK_LT(index, sta->length());
466
467 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
468 sta->byte_offset();
469
470 if (sta->type() >= kExternalBigInt64Array) {
471 Handle<BigInt> old_bigint;
472 Handle<BigInt> new_bigint;
473 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
474 isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
475 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
476 isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
477
478 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta,
479 "Atomics.compareExchange");
480
481 CHECK_LT(index, sta->length());
482 if (sta->type() == kExternalBigInt64Array) {
483 return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
484 new_bigint);
485 }
486 DCHECK(sta->type() == kExternalBigUint64Array);
487 return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
488 new_bigint);
489 }
490
491 Handle<Object> old_value;
492 Handle<Object> new_value;
493 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
494 Object::ToInteger(isolate, old_value_obj));
495 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
496 Object::ToInteger(isolate, new_value_obj));
497
498 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta,
499 "Atomics.compareExchange");
500
501 switch (sta->type()) {
502 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
503 case kExternal##Type##Array: \
504 return DoCompareExchange<ctype>(isolate, source, index, old_value, \
505 new_value);
506
507 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
508 #undef TYPED_ARRAY_CASE
509
510 default:
511 break;
512 }
513
514 UNREACHABLE();
515 }
516
517 // ES #sec-atomics.add
518 // Atomics.add( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAdd)519 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
520 return GetModifySetValueInBuffer<Add>(args, isolate, "Atomics.add");
521 }
522
523 // ES #sec-atomics.sub
524 // Atomics.sub( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsSub)525 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
526 return GetModifySetValueInBuffer<Sub>(args, isolate, "Atomics.sub");
527 }
528
529 // ES #sec-atomics.and
530 // Atomics.and( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAnd)531 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
532 return GetModifySetValueInBuffer<And>(args, isolate, "Atomics.and");
533 }
534
535 // ES #sec-atomics.or
536 // Atomics.or( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsOr)537 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
538 return GetModifySetValueInBuffer<Or>(args, isolate, "Atomics.or");
539 }
540
541 // ES #sec-atomics.xor
542 // Atomics.xor( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsXor)543 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
544 return GetModifySetValueInBuffer<Xor>(args, isolate, "Atomics.xor");
545 }
546
547 #undef INTEGER_TYPED_ARRAYS
548
549 #else
550
551 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
552
553 RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
554
555 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
556
557 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
558
559 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
560
561 RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }
562
563 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }
564
565 RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
566
567 RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
568
569 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
570 // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
571
572 } // namespace internal
573 } // namespace v8
574