1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/macros.h"
6 #include "src/base/platform/mutex.h"
7 #include "src/execution/arguments-inl.h"
8 #include "src/heap/factory.h"
9 #include "src/logging/counters.h"
10 #include "src/numbers/conversions-inl.h"
11 #include "src/objects/js-array-buffer-inl.h"
12 #include "src/objects/js-struct-inl.h"
13 #include "src/runtime/runtime-utils.h"
14
15 // Implement Atomic accesses to ArrayBuffers and SharedArrayBuffers.
16 // https://tc39.es/ecma262/#sec-atomics
17
18 namespace v8 {
19 namespace internal {
20
21 // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
22 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
23 V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
24 V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
25
26 namespace {
27
28 #if defined(V8_OS_STARBOARD)
29
30 template <typename T>
ExchangeSeqCst(T * p,T value)31 inline T ExchangeSeqCst(T* p, T value) {
32 UNIMPLEMENTED();
33 }
34
35 template <typename T>
CompareExchangeSeqCst(T * p,T oldval,T newval)36 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
37 UNIMPLEMENTED();
38 }
39
40 template <typename T>
AddSeqCst(T * p,T value)41 inline T AddSeqCst(T* p, T value) {
42 UNIMPLEMENTED();
43 }
44
45 template <typename T>
SubSeqCst(T * p,T value)46 inline T SubSeqCst(T* p, T value) {
47 UNIMPLEMENTED();
48 }
49
50 template <typename T>
AndSeqCst(T * p,T value)51 inline T AndSeqCst(T* p, T value) {
52 UNIMPLEMENTED();
53 }
54
55 template <typename T>
OrSeqCst(T * p,T value)56 inline T OrSeqCst(T* p, T value) {
57 UNIMPLEMENTED();
58 }
59
60 template <typename T>
XorSeqCst(T * p,T value)61 inline T XorSeqCst(T* p, T value) {
62 UNIMPLEMENTED();
63 }
64
65 #elif V8_CC_GNU
66
67 // GCC/Clang helpfully warn us that using 64-bit atomics on 32-bit platforms
68 // can be slow. Good to know, but we don't have a choice.
69 #ifdef V8_TARGET_ARCH_32_BIT
70 #pragma GCC diagnostic push
71 #pragma GCC diagnostic ignored "-Wpragmas"
72 #pragma GCC diagnostic ignored "-Watomic-alignment"
73 #endif // V8_TARGET_ARCH_32_BIT
74
75 template <typename T>
76 inline T LoadSeqCst(T* p) {
77 return __atomic_load_n(p, __ATOMIC_SEQ_CST);
78 }
79
80 template <typename T>
81 inline void StoreSeqCst(T* p, T value) {
82 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
83 }
84
85 template <typename T>
86 inline T ExchangeSeqCst(T* p, T value) {
87 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
88 }
89
90 template <typename T>
91 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
92 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
93 __ATOMIC_SEQ_CST);
94 return oldval;
95 }
96
97 template <typename T>
98 inline T AddSeqCst(T* p, T value) {
99 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
100 }
101
102 template <typename T>
103 inline T SubSeqCst(T* p, T value) {
104 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
105 }
106
107 template <typename T>
108 inline T AndSeqCst(T* p, T value) {
109 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
110 }
111
112 template <typename T>
113 inline T OrSeqCst(T* p, T value) {
114 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
115 }
116
117 template <typename T>
118 inline T XorSeqCst(T* p, T value) {
119 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
120 }
121
122 #ifdef V8_TARGET_ARCH_32_BIT
123 #pragma GCC diagnostic pop
124 #endif // V8_TARGET_ARCH_32_BIT
125
126 #elif V8_CC_MSVC
127
128 #define InterlockedExchange32 _InterlockedExchange
129 #define InterlockedCompareExchange32 _InterlockedCompareExchange
130 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
131 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
132 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
133 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
134 #define InterlockedAnd32 _InterlockedAnd
135 #define InterlockedOr64 _InterlockedOr64
136 #define InterlockedOr32 _InterlockedOr
137 #define InterlockedXor32 _InterlockedXor
138
139 #if defined(V8_HOST_ARCH_ARM64)
140 #define InterlockedExchange8 _InterlockedExchange8
141 #endif
142
143 #define ATOMIC_OPS(type, suffix, vctype) \
144 inline type ExchangeSeqCst(type* p, type value) { \
145 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
146 bit_cast<vctype>(value)); \
147 } \
148 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
149 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
150 bit_cast<vctype>(newval), \
151 bit_cast<vctype>(oldval)); \
152 } \
153 inline type AddSeqCst(type* p, type value) { \
154 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
155 bit_cast<vctype>(value)); \
156 } \
157 inline type SubSeqCst(type* p, type value) { \
158 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
159 -bit_cast<vctype>(value)); \
160 } \
161 inline type AndSeqCst(type* p, type value) { \
162 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
163 bit_cast<vctype>(value)); \
164 } \
165 inline type OrSeqCst(type* p, type value) { \
166 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
167 bit_cast<vctype>(value)); \
168 } \
169 inline type XorSeqCst(type* p, type value) { \
170 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
171 bit_cast<vctype>(value)); \
172 }
173
174 ATOMIC_OPS(int8_t, 8, char)
175 ATOMIC_OPS(uint8_t, 8, char)
176 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
177 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
178 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
179 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
180 ATOMIC_OPS(int64_t, 64, __int64)
181 ATOMIC_OPS(uint64_t, 64, __int64)
182
183 template <typename T>
184 inline T LoadSeqCst(T* p) {
185 UNREACHABLE();
186 }
187
188 template <typename T>
189 inline void StoreSeqCst(T* p, T value) {
190 UNREACHABLE();
191 }
192
193 #undef ATOMIC_OPS
194
195 #undef InterlockedExchange32
196 #undef InterlockedCompareExchange32
197 #undef InterlockedCompareExchange8
198 #undef InterlockedExchangeAdd32
199 #undef InterlockedExchangeAdd16
200 #undef InterlockedExchangeAdd8
201 #undef InterlockedAnd32
202 #undef InterlockedOr64
203 #undef InterlockedOr32
204 #undef InterlockedXor32
205
206 #if defined(V8_HOST_ARCH_ARM64)
207 #undef InterlockedExchange8
208 #endif
209
210 #else
211
212 #error Unsupported platform!
213
214 #endif
215
216 template <typename T>
217 T FromObject(Handle<Object> number);
218
219 template <>
FromObject(Handle<Object> number)220 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
221 return NumberToUint32(*number);
222 }
223
224 template <>
FromObject(Handle<Object> number)225 inline int8_t FromObject<int8_t>(Handle<Object> number) {
226 return NumberToInt32(*number);
227 }
228
229 template <>
FromObject(Handle<Object> number)230 inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
231 return NumberToUint32(*number);
232 }
233
234 template <>
FromObject(Handle<Object> number)235 inline int16_t FromObject<int16_t>(Handle<Object> number) {
236 return NumberToInt32(*number);
237 }
238
239 template <>
FromObject(Handle<Object> number)240 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
241 return NumberToUint32(*number);
242 }
243
244 template <>
FromObject(Handle<Object> number)245 inline int32_t FromObject<int32_t>(Handle<Object> number) {
246 return NumberToInt32(*number);
247 }
248
249 template <>
FromObject(Handle<Object> bigint)250 inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
251 return Handle<BigInt>::cast(bigint)->AsUint64();
252 }
253
254 template <>
FromObject(Handle<Object> bigint)255 inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
256 return Handle<BigInt>::cast(bigint)->AsInt64();
257 }
258
ToObject(Isolate * isolate,int8_t t)259 inline Object ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
260
ToObject(Isolate * isolate,uint8_t t)261 inline Object ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
262
ToObject(Isolate * isolate,int16_t t)263 inline Object ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
264
ToObject(Isolate * isolate,uint16_t t)265 inline Object ToObject(Isolate* isolate, uint16_t t) { return Smi::FromInt(t); }
266
ToObject(Isolate * isolate,int32_t t)267 inline Object ToObject(Isolate* isolate, int32_t t) {
268 return *isolate->factory()->NewNumber(t);
269 }
270
ToObject(Isolate * isolate,uint32_t t)271 inline Object ToObject(Isolate* isolate, uint32_t t) {
272 return *isolate->factory()->NewNumber(t);
273 }
274
ToObject(Isolate * isolate,int64_t t)275 inline Object ToObject(Isolate* isolate, int64_t t) {
276 return *BigInt::FromInt64(isolate, t);
277 }
278
ToObject(Isolate * isolate,uint64_t t)279 inline Object ToObject(Isolate* isolate, uint64_t t) {
280 return *BigInt::FromUint64(isolate, t);
281 }
282
283 template <typename T>
284 struct Load {
Dov8::internal::__anon223cfc100111::Load285 static inline Object Do(Isolate* isolate, void* buffer, size_t index) {
286 T result = LoadSeqCst(static_cast<T*>(buffer) + index);
287 return ToObject(isolate, result);
288 }
289 };
290
291 template <typename T>
292 struct Store {
Dov8::internal::__anon223cfc100111::Store293 static inline void Do(Isolate* isolate, void* buffer, size_t index,
294 Handle<Object> obj) {
295 T value = FromObject<T>(obj);
296 StoreSeqCst(static_cast<T*>(buffer) + index, value);
297 }
298 };
299
300 template <typename T>
301 struct Exchange {
Dov8::internal::__anon223cfc100111::Exchange302 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
303 Handle<Object> obj) {
304 T value = FromObject<T>(obj);
305 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
306 return ToObject(isolate, result);
307 }
308 };
309
310 template <typename T>
DoCompareExchange(Isolate * isolate,void * buffer,size_t index,Handle<Object> oldobj,Handle<Object> newobj)311 inline Object DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
312 Handle<Object> oldobj, Handle<Object> newobj) {
313 T oldval = FromObject<T>(oldobj);
314 T newval = FromObject<T>(newobj);
315 T result =
316 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
317 return ToObject(isolate, result);
318 }
319
320 template <typename T>
321 struct Add {
Dov8::internal::__anon223cfc100111::Add322 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
323 Handle<Object> obj) {
324 T value = FromObject<T>(obj);
325 T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
326 return ToObject(isolate, result);
327 }
328 };
329
330 template <typename T>
331 struct Sub {
Dov8::internal::__anon223cfc100111::Sub332 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
333 Handle<Object> obj) {
334 T value = FromObject<T>(obj);
335 T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
336 return ToObject(isolate, result);
337 }
338 };
339
340 template <typename T>
341 struct And {
Dov8::internal::__anon223cfc100111::And342 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
343 Handle<Object> obj) {
344 T value = FromObject<T>(obj);
345 T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
346 return ToObject(isolate, result);
347 }
348 };
349
350 template <typename T>
351 struct Or {
Dov8::internal::__anon223cfc100111::Or352 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
353 Handle<Object> obj) {
354 T value = FromObject<T>(obj);
355 T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
356 return ToObject(isolate, result);
357 }
358 };
359
360 template <typename T>
361 struct Xor {
Dov8::internal::__anon223cfc100111::Xor362 static inline Object Do(Isolate* isolate, void* buffer, size_t index,
363 Handle<Object> obj) {
364 T value = FromObject<T>(obj);
365 T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
366 return ToObject(isolate, result);
367 }
368 };
369
370 } // anonymous namespace
371
372 // Duplicated from objects.h
373 // V has parameters (Type, type, TYPE, C type)
374 #define INTEGER_TYPED_ARRAYS(V) \
375 V(Uint8, uint8, UINT8, uint8_t) \
376 V(Int8, int8, INT8, int8_t) \
377 V(Uint16, uint16, UINT16, uint16_t) \
378 V(Int16, int16, INT16, int16_t) \
379 V(Uint32, uint32, UINT32, uint32_t) \
380 V(Int32, int32, INT32, int32_t)
381
382 #define THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, method_name) \
383 do { \
384 if (V8_UNLIKELY(sta->WasDetached())) { \
385 THROW_NEW_ERROR_RETURN_FAILURE( \
386 isolate, NewTypeError(MessageTemplate::kDetachedOperation, \
387 isolate->factory()->NewStringFromAsciiChecked( \
388 method_name))); \
389 } \
390 } while (false)
391
392 // This is https://tc39.github.io/ecma262/#sec-getmodifysetvalueinbuffer
393 // but also includes the ToInteger/ToBigInt conversion that's part of
394 // https://tc39.github.io/ecma262/#sec-atomicreadmodifywrite
395 template <template <typename> class Op>
GetModifySetValueInBuffer(RuntimeArguments args,Isolate * isolate,const char * method_name)396 Object GetModifySetValueInBuffer(RuntimeArguments args, Isolate* isolate,
397 const char* method_name) {
398 HandleScope scope(isolate);
399 DCHECK_EQ(3, args.length());
400 Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
401 size_t index = NumberToSize(args[1]);
402 Handle<Object> value_obj = args.at(2);
403
404 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
405 sta->byte_offset();
406
407 if (sta->type() >= kExternalBigInt64Array) {
408 Handle<BigInt> bigint;
409 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
410 BigInt::FromObject(isolate, value_obj));
411
412 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, method_name);
413
414 CHECK_LT(index, sta->length());
415 if (sta->type() == kExternalBigInt64Array) {
416 return Op<int64_t>::Do(isolate, source, index, bigint);
417 }
418 DCHECK(sta->type() == kExternalBigUint64Array);
419 return Op<uint64_t>::Do(isolate, source, index, bigint);
420 }
421
422 Handle<Object> value;
423 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
424 Object::ToInteger(isolate, value_obj));
425
426 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, method_name);
427
428 CHECK_LT(index, sta->length());
429
430 switch (sta->type()) {
431 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
432 case kExternal##Type##Array: \
433 return Op<ctype>::Do(isolate, source, index, value);
434
435 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
436 #undef TYPED_ARRAY_CASE
437
438 default:
439 break;
440 }
441
442 UNREACHABLE();
443 }
444
RUNTIME_FUNCTION(Runtime_AtomicsLoad64)445 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
446 HandleScope scope(isolate);
447 DCHECK_EQ(2, args.length());
448 Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
449 size_t index = NumberToSize(args[1]);
450
451 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
452 sta->byte_offset();
453
454 DCHECK(sta->type() == kExternalBigInt64Array ||
455 sta->type() == kExternalBigUint64Array);
456 DCHECK(!sta->WasDetached());
457 CHECK_LT(index, sta->length());
458 if (sta->type() == kExternalBigInt64Array) {
459 return Load<int64_t>::Do(isolate, source, index);
460 }
461 DCHECK(sta->type() == kExternalBigUint64Array);
462 return Load<uint64_t>::Do(isolate, source, index);
463 }
464
RUNTIME_FUNCTION(Runtime_AtomicsStore64)465 RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
466 HandleScope scope(isolate);
467 DCHECK_EQ(3, args.length());
468 Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
469 size_t index = NumberToSize(args[1]);
470 Handle<Object> value_obj = args.at(2);
471
472 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
473 sta->byte_offset();
474
475 Handle<BigInt> bigint;
476 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
477 BigInt::FromObject(isolate, value_obj));
478
479 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta, "Atomics.store");
480
481 DCHECK(sta->type() == kExternalBigInt64Array ||
482 sta->type() == kExternalBigUint64Array);
483 CHECK_LT(index, sta->length());
484 if (sta->type() == kExternalBigInt64Array) {
485 Store<int64_t>::Do(isolate, source, index, bigint);
486 return *bigint;
487 }
488 DCHECK(sta->type() == kExternalBigUint64Array);
489 Store<uint64_t>::Do(isolate, source, index, bigint);
490 return *bigint;
491 }
492
RUNTIME_FUNCTION(Runtime_AtomicsExchange)493 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
494 return GetModifySetValueInBuffer<Exchange>(args, isolate, "Atomics.exchange");
495 }
496
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange)497 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
498 HandleScope scope(isolate);
499 DCHECK_EQ(4, args.length());
500 Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
501 size_t index = NumberToSize(args[1]);
502 Handle<Object> old_value_obj = args.at(2);
503 Handle<Object> new_value_obj = args.at(3);
504 CHECK_LT(index, sta->length());
505
506 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
507 sta->byte_offset();
508
509 if (sta->type() >= kExternalBigInt64Array) {
510 Handle<BigInt> old_bigint;
511 Handle<BigInt> new_bigint;
512 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
513 isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
514 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
515 isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
516
517 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta,
518 "Atomics.compareExchange");
519
520 CHECK_LT(index, sta->length());
521 if (sta->type() == kExternalBigInt64Array) {
522 return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
523 new_bigint);
524 }
525 DCHECK(sta->type() == kExternalBigUint64Array);
526 return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
527 new_bigint);
528 }
529
530 Handle<Object> old_value;
531 Handle<Object> new_value;
532 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
533 Object::ToInteger(isolate, old_value_obj));
534 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
535 Object::ToInteger(isolate, new_value_obj));
536
537 THROW_ERROR_RETURN_FAILURE_ON_DETACHED(isolate, sta,
538 "Atomics.compareExchange");
539
540 switch (sta->type()) {
541 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
542 case kExternal##Type##Array: \
543 return DoCompareExchange<ctype>(isolate, source, index, old_value, \
544 new_value);
545
546 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
547 #undef TYPED_ARRAY_CASE
548
549 default:
550 break;
551 }
552
553 UNREACHABLE();
554 }
555
556 // ES #sec-atomics.add
557 // Atomics.add( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAdd)558 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
559 return GetModifySetValueInBuffer<Add>(args, isolate, "Atomics.add");
560 }
561
562 // ES #sec-atomics.sub
563 // Atomics.sub( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsSub)564 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
565 return GetModifySetValueInBuffer<Sub>(args, isolate, "Atomics.sub");
566 }
567
568 // ES #sec-atomics.and
569 // Atomics.and( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsAnd)570 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
571 return GetModifySetValueInBuffer<And>(args, isolate, "Atomics.and");
572 }
573
574 // ES #sec-atomics.or
575 // Atomics.or( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsOr)576 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
577 return GetModifySetValueInBuffer<Or>(args, isolate, "Atomics.or");
578 }
579
580 // ES #sec-atomics.xor
581 // Atomics.xor( typedArray, index, value )
RUNTIME_FUNCTION(Runtime_AtomicsXor)582 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
583 return GetModifySetValueInBuffer<Xor>(args, isolate, "Atomics.xor");
584 }
585
586 #undef INTEGER_TYPED_ARRAYS
587
588 #else
589
590 RUNTIME_FUNCTION(Runtime_AtomicsLoad64) { UNREACHABLE(); }
591
592 RUNTIME_FUNCTION(Runtime_AtomicsStore64) { UNREACHABLE(); }
593
594 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { UNREACHABLE(); }
595
596 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { UNREACHABLE(); }
597
598 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { UNREACHABLE(); }
599
600 RUNTIME_FUNCTION(Runtime_AtomicsSub) { UNREACHABLE(); }
601
602 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { UNREACHABLE(); }
603
604 RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); }
605
606 RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
607
608 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
609 // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
610 // || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
611
RUNTIME_FUNCTION(Runtime_AtomicsLoadSharedStructField)612 RUNTIME_FUNCTION(Runtime_AtomicsLoadSharedStructField) {
613 HandleScope scope(isolate);
614 DCHECK_EQ(2, args.length());
615 Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
616 Handle<Name> field_name;
617 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
618 Object::ToName(isolate, args.at(1)));
619 // Shared structs are prototypeless.
620 LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
621 if (it.IsFound()) return *it.GetDataValue(kSeqCstAccess);
622 return ReadOnlyRoots(isolate).undefined_value();
623 }
624
RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructField)625 RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructField) {
626 HandleScope scope(isolate);
627 DCHECK_EQ(3, args.length());
628 Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
629 Handle<Name> field_name;
630 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
631 Object::ToName(isolate, args.at(1)));
632 Handle<Object> shared_value;
633 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
634 isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
635 // Shared structs are prototypeless.
636 LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
637 if (it.IsFound()) {
638 it.WriteDataValue(shared_value, kSeqCstAccess);
639 return *shared_value;
640 }
641 // Shared structs are non-extensible. Instead of duplicating logic, call
642 // Object::AddDataProperty to handle the error case.
643 Maybe<bool> result =
644 Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
645 StoreOrigin::kMaybeKeyed);
646 DCHECK(result.IsNothing());
647 USE(result);
648 return ReadOnlyRoots(isolate).exception();
649 }
650
RUNTIME_FUNCTION(Runtime_AtomicsExchangeSharedStructField)651 RUNTIME_FUNCTION(Runtime_AtomicsExchangeSharedStructField) {
652 HandleScope scope(isolate);
653 DCHECK_EQ(3, args.length());
654 Handle<JSSharedStruct> shared_struct = args.at<JSSharedStruct>(0);
655 Handle<Name> field_name;
656 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, field_name,
657 Object::ToName(isolate, args.at(1)));
658 Handle<Object> shared_value;
659 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
660 isolate, shared_value, Object::Share(isolate, args.at(2), kThrowOnError));
661 // Shared structs are prototypeless.
662 LookupIterator it(isolate, shared_struct, field_name, LookupIterator::OWN);
663 if (it.IsFound()) return *it.SwapDataValue(shared_value, kSeqCstAccess);
664 // Shared structs are non-extensible. Instead of duplicating logic, call
665 // Object::AddDataProperty to handle the error case.
666 Maybe<bool> result =
667 Object::AddDataProperty(&it, shared_value, NONE, Nothing<ShouldThrow>(),
668 StoreOrigin::kMaybeKeyed);
669 DCHECK(result.IsNothing());
670 USE(result);
671 return ReadOnlyRoots(isolate).exception();
672 }
673
674 } // namespace internal
675 } // namespace v8
676