1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/runtime/runtime-utils.h"
6
7 #include "src/arguments.h"
8 #include "src/base/macros.h"
9 #include "src/base/platform/mutex.h"
10 #include "src/conversions-inl.h"
11 #include "src/factory.h"
12
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the
14 // SharedArrayBuffer draft spec, found here
15 // https://github.com/tc39/ecmascript_sharedmem
16
17 namespace v8 {
18 namespace internal {
19
20 namespace {
21
AtomicIsLockFree(uint32_t size)22 inline bool AtomicIsLockFree(uint32_t size) {
23 return size == 1 || size == 2 || size == 4;
24 }
25
26 #if V8_CC_GNU
27
28 template <typename T>
CompareExchangeSeqCst(T * p,T oldval,T newval)29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
31 __ATOMIC_SEQ_CST);
32 return oldval;
33 }
34
35 template <typename T>
AddSeqCst(T * p,T value)36 inline T AddSeqCst(T* p, T value) {
37 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
38 }
39
40 template <typename T>
SubSeqCst(T * p,T value)41 inline T SubSeqCst(T* p, T value) {
42 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
43 }
44
45 template <typename T>
AndSeqCst(T * p,T value)46 inline T AndSeqCst(T* p, T value) {
47 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
48 }
49
50 template <typename T>
OrSeqCst(T * p,T value)51 inline T OrSeqCst(T* p, T value) {
52 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
53 }
54
55 template <typename T>
XorSeqCst(T * p,T value)56 inline T XorSeqCst(T* p, T value) {
57 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
58 }
59
60 template <typename T>
ExchangeSeqCst(T * p,T value)61 inline T ExchangeSeqCst(T* p, T value) {
62 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
63 }
64
65 #elif V8_CC_MSVC
66
67 #define InterlockedCompareExchange32 _InterlockedCompareExchange
68 #define InterlockedExchange32 _InterlockedExchange
69 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
70 #define InterlockedAnd32 _InterlockedAnd
71 #define InterlockedOr32 _InterlockedOr
72 #define InterlockedXor32 _InterlockedXor
73 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
74 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
75 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
76
77 #define ATOMIC_OPS(type, suffix, vctype) \
78 inline type AddSeqCst(type* p, type value) { \
79 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
80 bit_cast<vctype>(value)); \
81 } \
82 inline type SubSeqCst(type* p, type value) { \
83 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
84 -bit_cast<vctype>(value)); \
85 } \
86 inline type AndSeqCst(type* p, type value) { \
87 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
88 bit_cast<vctype>(value)); \
89 } \
90 inline type OrSeqCst(type* p, type value) { \
91 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
92 bit_cast<vctype>(value)); \
93 } \
94 inline type XorSeqCst(type* p, type value) { \
95 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
96 bit_cast<vctype>(value)); \
97 } \
98 inline type ExchangeSeqCst(type* p, type value) { \
99 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
100 bit_cast<vctype>(value)); \
101 } \
102 \
103 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
104 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
105 bit_cast<vctype>(newval), \
106 bit_cast<vctype>(oldval)); \
107 }
108
109 ATOMIC_OPS(int8_t, 8, char)
110 ATOMIC_OPS(uint8_t, 8, char)
111 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
112 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
113 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
114 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
115
116 #undef ATOMIC_OPS_INTEGER
117 #undef ATOMIC_OPS
118
119 #undef InterlockedCompareExchange32
120 #undef InterlockedExchange32
121 #undef InterlockedExchangeAdd32
122 #undef InterlockedAnd32
123 #undef InterlockedOr32
124 #undef InterlockedXor32
125 #undef InterlockedExchangeAdd16
126 #undef InterlockedCompareExchange8
127 #undef InterlockedExchangeAdd8
128
129 #else
130
131 #error Unsupported platform!
132
133 #endif
134
135 template <typename T>
136 T FromObject(Handle<Object> number);
137
138 template <>
FromObject(Handle<Object> number)139 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
140 return NumberToUint32(*number);
141 }
142
143 template <>
FromObject(Handle<Object> number)144 inline int8_t FromObject<int8_t>(Handle<Object> number) {
145 return NumberToInt32(*number);
146 }
147
148 template <>
FromObject(Handle<Object> number)149 inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
150 return NumberToUint32(*number);
151 }
152
153 template <>
FromObject(Handle<Object> number)154 inline int16_t FromObject<int16_t>(Handle<Object> number) {
155 return NumberToInt32(*number);
156 }
157
158 template <>
FromObject(Handle<Object> number)159 inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
160 return NumberToUint32(*number);
161 }
162
163 template <>
FromObject(Handle<Object> number)164 inline int32_t FromObject<int32_t>(Handle<Object> number) {
165 return NumberToInt32(*number);
166 }
167
168
ToObject(Isolate * isolate,int8_t t)169 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); }
170
ToObject(Isolate * isolate,uint8_t t)171 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); }
172
ToObject(Isolate * isolate,int16_t t)173 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); }
174
ToObject(Isolate * isolate,uint16_t t)175 inline Object* ToObject(Isolate* isolate, uint16_t t) {
176 return Smi::FromInt(t);
177 }
178
179
ToObject(Isolate * isolate,int32_t t)180 inline Object* ToObject(Isolate* isolate, int32_t t) {
181 return *isolate->factory()->NewNumber(t);
182 }
183
184
ToObject(Isolate * isolate,uint32_t t)185 inline Object* ToObject(Isolate* isolate, uint32_t t) {
186 return *isolate->factory()->NewNumber(t);
187 }
188
189
190 template <typename T>
DoCompareExchange(Isolate * isolate,void * buffer,size_t index,Handle<Object> oldobj,Handle<Object> newobj)191 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
192 Handle<Object> oldobj, Handle<Object> newobj) {
193 T oldval = FromObject<T>(oldobj);
194 T newval = FromObject<T>(newobj);
195 T result =
196 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
197 return ToObject(isolate, result);
198 }
199
200
201 template <typename T>
DoAdd(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)202 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
203 Handle<Object> obj) {
204 T value = FromObject<T>(obj);
205 T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
206 return ToObject(isolate, result);
207 }
208
209
210 template <typename T>
DoSub(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)211 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
212 Handle<Object> obj) {
213 T value = FromObject<T>(obj);
214 T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
215 return ToObject(isolate, result);
216 }
217
218
219 template <typename T>
DoAnd(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)220 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
221 Handle<Object> obj) {
222 T value = FromObject<T>(obj);
223 T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
224 return ToObject(isolate, result);
225 }
226
227
228 template <typename T>
DoOr(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)229 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
230 Handle<Object> obj) {
231 T value = FromObject<T>(obj);
232 T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
233 return ToObject(isolate, result);
234 }
235
236
237 template <typename T>
DoXor(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)238 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
239 Handle<Object> obj) {
240 T value = FromObject<T>(obj);
241 T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
242 return ToObject(isolate, result);
243 }
244
245
246 template <typename T>
DoExchange(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)247 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
248 Handle<Object> obj) {
249 T value = FromObject<T>(obj);
250 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
251 return ToObject(isolate, result);
252 }
253
254
255 // Uint8Clamped functions
256
ClampToUint8(int32_t value)257 uint8_t ClampToUint8(int32_t value) {
258 if (value < 0) return 0;
259 if (value > 255) return 255;
260 return value;
261 }
262
263
DoCompareExchangeUint8Clamped(Isolate * isolate,void * buffer,size_t index,Handle<Object> oldobj,Handle<Object> newobj)264 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
265 size_t index,
266 Handle<Object> oldobj,
267 Handle<Object> newobj) {
268 typedef int32_t convert_type;
269 uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj));
270 uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj));
271 uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index,
272 oldval, newval);
273 return ToObject(isolate, result);
274 }
275
276
277 #define DO_UINT8_CLAMPED_OP(name, op) \
278 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
279 size_t index, Handle<Object> obj) { \
280 typedef int32_t convert_type; \
281 uint8_t* p = static_cast<uint8_t*>(buffer) + index; \
282 convert_type operand = FromObject<convert_type>(obj); \
283 uint8_t expected; \
284 uint8_t result; \
285 do { \
286 expected = *p; \
287 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
288 } while (CompareExchangeSeqCst(p, expected, result) != expected); \
289 return ToObject(isolate, expected); \
290 }
291
292 DO_UINT8_CLAMPED_OP(Add, +)
293 DO_UINT8_CLAMPED_OP(Sub, -)
294 DO_UINT8_CLAMPED_OP(And, &)
295 DO_UINT8_CLAMPED_OP(Or, | )
296 DO_UINT8_CLAMPED_OP(Xor, ^)
297
298 #undef DO_UINT8_CLAMPED_OP
299
300
DoExchangeUint8Clamped(Isolate * isolate,void * buffer,size_t index,Handle<Object> obj)301 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
302 size_t index, Handle<Object> obj) {
303 typedef int32_t convert_type;
304 uint8_t* p = static_cast<uint8_t*>(buffer) + index;
305 uint8_t result = ClampToUint8(FromObject<convert_type>(obj));
306 uint8_t expected;
307 do {
308 expected = *p;
309 } while (CompareExchangeSeqCst(p, expected, result) != expected);
310 return ToObject(isolate, expected);
311 }
312
313
314 } // anonymous namespace
315
316 // Duplicated from objects.h
317 // V has parameters (Type, type, TYPE, C type, element_size)
318 #define INTEGER_TYPED_ARRAYS(V) \
319 V(Uint8, uint8, UINT8, uint8_t, 1) \
320 V(Int8, int8, INT8, int8_t, 1) \
321 V(Uint16, uint16, UINT16, uint16_t, 2) \
322 V(Int16, int16, INT16, int16_t, 2) \
323 V(Uint32, uint32, UINT32, uint32_t, 4) \
324 V(Int32, int32, INT32, int32_t, 4)
325
RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError)326 RUNTIME_FUNCTION(Runtime_ThrowNotIntegerSharedTypedArrayError) {
327 HandleScope scope(isolate);
328 DCHECK_EQ(1, args.length());
329 CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
330 THROW_NEW_ERROR_RETURN_FAILURE(
331 isolate,
332 NewTypeError(MessageTemplate::kNotIntegerSharedTypedArray, value));
333 }
334
RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError)335 RUNTIME_FUNCTION(Runtime_ThrowNotInt32SharedTypedArrayError) {
336 HandleScope scope(isolate);
337 DCHECK_EQ(1, args.length());
338 CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
339 THROW_NEW_ERROR_RETURN_FAILURE(
340 isolate, NewTypeError(MessageTemplate::kNotInt32SharedTypedArray, value));
341 }
342
RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError)343 RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
344 HandleScope scope(isolate);
345 DCHECK_EQ(0, args.length());
346 THROW_NEW_ERROR_RETURN_FAILURE(
347 isolate, NewRangeError(MessageTemplate::kInvalidAtomicAccessIndex));
348 }
349
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange)350 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
351 HandleScope scope(isolate);
352 DCHECK_EQ(4, args.length());
353 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
354 CONVERT_SIZE_ARG_CHECKED(index, 1);
355 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
356 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3);
357 CHECK(sta->GetBuffer()->is_shared());
358 CHECK_LT(index, NumberToSize(sta->length()));
359
360 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
361 NumberToSize(sta->byte_offset());
362
363 switch (sta->type()) {
364 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
365 case kExternal##Type##Array: \
366 return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj);
367
368 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
369 #undef TYPED_ARRAY_CASE
370
371 case kExternalUint8ClampedArray:
372 return DoCompareExchangeUint8Clamped(isolate, source, index, oldobj,
373 newobj);
374
375 default:
376 break;
377 }
378
379 UNREACHABLE();
380 return isolate->heap()->undefined_value();
381 }
382
383
RUNTIME_FUNCTION(Runtime_AtomicsAdd)384 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
385 HandleScope scope(isolate);
386 DCHECK_EQ(3, args.length());
387 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
388 CONVERT_SIZE_ARG_CHECKED(index, 1);
389 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
390 CHECK(sta->GetBuffer()->is_shared());
391 CHECK_LT(index, NumberToSize(sta->length()));
392
393 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
394 NumberToSize(sta->byte_offset());
395
396 switch (sta->type()) {
397 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
398 case kExternal##Type##Array: \
399 return DoAdd<ctype>(isolate, source, index, value);
400
401 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
402 #undef TYPED_ARRAY_CASE
403
404 case kExternalUint8ClampedArray:
405 return DoAddUint8Clamped(isolate, source, index, value);
406
407 default:
408 break;
409 }
410
411 UNREACHABLE();
412 return isolate->heap()->undefined_value();
413 }
414
415
RUNTIME_FUNCTION(Runtime_AtomicsSub)416 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
417 HandleScope scope(isolate);
418 DCHECK_EQ(3, args.length());
419 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
420 CONVERT_SIZE_ARG_CHECKED(index, 1);
421 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
422 CHECK(sta->GetBuffer()->is_shared());
423 CHECK_LT(index, NumberToSize(sta->length()));
424
425 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
426 NumberToSize(sta->byte_offset());
427
428 switch (sta->type()) {
429 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
430 case kExternal##Type##Array: \
431 return DoSub<ctype>(isolate, source, index, value);
432
433 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
434 #undef TYPED_ARRAY_CASE
435
436 case kExternalUint8ClampedArray:
437 return DoSubUint8Clamped(isolate, source, index, value);
438
439 default:
440 break;
441 }
442
443 UNREACHABLE();
444 return isolate->heap()->undefined_value();
445 }
446
447
RUNTIME_FUNCTION(Runtime_AtomicsAnd)448 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
449 HandleScope scope(isolate);
450 DCHECK_EQ(3, args.length());
451 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
452 CONVERT_SIZE_ARG_CHECKED(index, 1);
453 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
454 CHECK(sta->GetBuffer()->is_shared());
455 CHECK_LT(index, NumberToSize(sta->length()));
456
457 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
458 NumberToSize(sta->byte_offset());
459
460 switch (sta->type()) {
461 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
462 case kExternal##Type##Array: \
463 return DoAnd<ctype>(isolate, source, index, value);
464
465 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
466 #undef TYPED_ARRAY_CASE
467
468 case kExternalUint8ClampedArray:
469 return DoAndUint8Clamped(isolate, source, index, value);
470
471 default:
472 break;
473 }
474
475 UNREACHABLE();
476 return isolate->heap()->undefined_value();
477 }
478
479
RUNTIME_FUNCTION(Runtime_AtomicsOr)480 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
481 HandleScope scope(isolate);
482 DCHECK_EQ(3, args.length());
483 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
484 CONVERT_SIZE_ARG_CHECKED(index, 1);
485 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
486 CHECK(sta->GetBuffer()->is_shared());
487 CHECK_LT(index, NumberToSize(sta->length()));
488
489 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
490 NumberToSize(sta->byte_offset());
491
492 switch (sta->type()) {
493 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
494 case kExternal##Type##Array: \
495 return DoOr<ctype>(isolate, source, index, value);
496
497 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
498 #undef TYPED_ARRAY_CASE
499
500 case kExternalUint8ClampedArray:
501 return DoOrUint8Clamped(isolate, source, index, value);
502
503 default:
504 break;
505 }
506
507 UNREACHABLE();
508 return isolate->heap()->undefined_value();
509 }
510
511
RUNTIME_FUNCTION(Runtime_AtomicsXor)512 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
513 HandleScope scope(isolate);
514 DCHECK_EQ(3, args.length());
515 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
516 CONVERT_SIZE_ARG_CHECKED(index, 1);
517 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
518 CHECK(sta->GetBuffer()->is_shared());
519 CHECK_LT(index, NumberToSize(sta->length()));
520
521 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
522 NumberToSize(sta->byte_offset());
523
524 switch (sta->type()) {
525 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
526 case kExternal##Type##Array: \
527 return DoXor<ctype>(isolate, source, index, value);
528
529 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
530 #undef TYPED_ARRAY_CASE
531
532 case kExternalUint8ClampedArray:
533 return DoXorUint8Clamped(isolate, source, index, value);
534
535 default:
536 break;
537 }
538
539 UNREACHABLE();
540 return isolate->heap()->undefined_value();
541 }
542
543
RUNTIME_FUNCTION(Runtime_AtomicsExchange)544 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
545 HandleScope scope(isolate);
546 DCHECK_EQ(3, args.length());
547 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
548 CONVERT_SIZE_ARG_CHECKED(index, 1);
549 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
550 CHECK(sta->GetBuffer()->is_shared());
551 CHECK_LT(index, NumberToSize(sta->length()));
552
553 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
554 NumberToSize(sta->byte_offset());
555
556 switch (sta->type()) {
557 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \
558 case kExternal##Type##Array: \
559 return DoExchange<ctype>(isolate, source, index, value);
560
561 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
562 #undef TYPED_ARRAY_CASE
563
564 case kExternalUint8ClampedArray:
565 return DoExchangeUint8Clamped(isolate, source, index, value);
566
567 default:
568 break;
569 }
570
571 UNREACHABLE();
572 return isolate->heap()->undefined_value();
573 }
574
575
RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree)576 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
577 HandleScope scope(isolate);
578 DCHECK_EQ(1, args.length());
579 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
580 uint32_t usize = NumberToUint32(*size);
581 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
582 }
583 } // namespace internal
584 } // namespace v8
585