1 // Copyright 2016 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 // Note 1: Any file that includes this one should include object-macros-undef.h 6 // at the bottom. 7 8 // Note 2: This file is deliberately missing the include guards (the undeffing 9 // approach wouldn't work otherwise). 10 // 11 // PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD 12 13 // The accessors with RELAXED_, ACQUIRE_, and RELEASE_ prefixes should be used 14 // for fields that can be written to and read from multiple threads at the same 15 // time. See comments in src/base/atomicops.h for the memory ordering sematics. 16 17 #include <src/v8memory.h> 18 19 #define DECL_PRIMITIVE_ACCESSORS(name, type) \ 20 inline type name() const; \ 21 inline void set_##name(type value); 22 23 #define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool) 24 25 #define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int) 26 27 #define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t) 28 29 #define DECL_UINT16_ACCESSORS(name) \ 30 inline uint16_t name() const; \ 31 inline void set_##name(int value); 32 33 #define DECL_UINT8_ACCESSORS(name) \ 34 inline uint8_t name() const; \ 35 inline void set_##name(int value); 36 37 #define DECL_ACCESSORS(name, type) \ 38 inline type* name() const; \ 39 inline void set_##name(type* value, \ 40 WriteBarrierMode mode = UPDATE_WRITE_BARRIER); 41 42 #define DECL_CAST(type) \ 43 V8_INLINE static type* cast(Object* object); \ 44 V8_INLINE static const type* cast(const Object* object); 45 46 #define CAST_ACCESSOR(type) \ 47 type* type::cast(Object* object) { \ 48 SLOW_DCHECK(object->Is##type()); \ 49 return reinterpret_cast<type*>(object); \ 50 } \ 51 const type* type::cast(const Object* object) { \ 52 SLOW_DCHECK(object->Is##type()); \ 53 return reinterpret_cast<const type*>(object); \ 54 } 55 56 #define INT_ACCESSORS(holder, name, offset) \ 57 int holder::name() const { return READ_INT_FIELD(this, offset); } \ 58 void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); } 59 60 #define INT32_ACCESSORS(holder, name, offset) \ 61 int32_t holder::name() const { return READ_INT32_FIELD(this, offset); } \ 62 void holder::set_##name(int32_t value) { \ 63 WRITE_INT32_FIELD(this, offset, value); \ 64 } 65 66 #define UINT16_ACCESSORS(holder, name, offset) \ 67 uint16_t holder::name() const { return READ_UINT16_FIELD(this, offset); } \ 68 void holder::set_##name(int value) { \ 69 DCHECK_GE(value, 0); \ 70 DCHECK_LE(value, static_cast<uint16_t>(-1)); \ 71 WRITE_UINT16_FIELD(this, offset, value); \ 72 } 73 74 #define UINT8_ACCESSORS(holder, name, offset) \ 75 uint8_t holder::name() const { return READ_UINT8_FIELD(this, offset); } \ 76 void holder::set_##name(int value) { \ 77 DCHECK_GE(value, 0); \ 78 DCHECK_LE(value, static_cast<uint8_t>(-1)); \ 79 WRITE_UINT8_FIELD(this, offset, value); \ 80 } 81 82 #define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \ 83 set_condition) \ 84 type* holder::name() const { \ 85 type* value = type::cast(READ_FIELD(this, offset)); \ 86 DCHECK(get_condition); \ 87 return value; \ 88 } \ 89 void holder::set_##name(type* value, WriteBarrierMode mode) { \ 90 DCHECK(set_condition); \ 91 WRITE_FIELD(this, offset, value); \ 92 CONDITIONAL_WRITE_BARRIER(this, offset, value, mode); \ 93 } 94 #define ACCESSORS_CHECKED(holder, name, type, offset, condition) \ 95 ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition) 96 97 #define ACCESSORS(holder, name, type, offset) \ 98 ACCESSORS_CHECKED(holder, name, type, offset, true) 99 100 #define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \ 101 set_condition) \ 102 MaybeObject* holder::name() const { \ 103 MaybeObject* value = READ_WEAK_FIELD(this, offset); \ 104 DCHECK(get_condition); \ 105 return value; \ 106 } \ 107 void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \ 108 DCHECK(set_condition); \ 109 WRITE_WEAK_FIELD(this, offset, value); \ 110 CONDITIONAL_WEAK_WRITE_BARRIER(this, offset, value, mode); \ 111 } 112 113 #define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \ 114 WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, condition) 115 116 #define WEAK_ACCESSORS(holder, name, offset) \ 117 WEAK_ACCESSORS_CHECKED(holder, name, offset, true) 118 119 // Getter that returns a Smi as an int and writes an int as a Smi. 120 #define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \ 121 int holder::name() const { \ 122 DCHECK(condition); \ 123 Object* value = READ_FIELD(this, offset); \ 124 return Smi::ToInt(value); \ 125 } \ 126 void holder::set_##name(int value) { \ 127 DCHECK(condition); \ 128 WRITE_FIELD(this, offset, Smi::FromInt(value)); \ 129 } 130 131 #define SMI_ACCESSORS(holder, name, offset) \ 132 SMI_ACCESSORS_CHECKED(holder, name, offset, true) 133 134 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \ 135 int holder::synchronized_##name() const { \ 136 Object* value = ACQUIRE_READ_FIELD(this, offset); \ 137 return Smi::ToInt(value); \ 138 } \ 139 void holder::synchronized_set_##name(int value) { \ 140 RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ 141 } 142 143 #define RELAXED_SMI_ACCESSORS(holder, name, offset) \ 144 int holder::relaxed_read_##name() const { \ 145 Object* value = RELAXED_READ_FIELD(this, offset); \ 146 return Smi::ToInt(value); \ 147 } \ 148 void holder::relaxed_write_##name(int value) { \ 149 RELAXED_WRITE_FIELD(this, offset, Smi::FromInt(value)); \ 150 } 151 152 #define BOOL_GETTER(holder, field, name, offset) \ 153 bool holder::name() const { return BooleanBit::get(field(), offset); } 154 155 #define BOOL_ACCESSORS(holder, field, name, offset) \ 156 bool holder::name() const { return BooleanBit::get(field(), offset); } \ 157 void holder::set_##name(bool value) { \ 158 set_##field(BooleanBit::set(field(), offset, value)); \ 159 } 160 161 #define BIT_FIELD_ACCESSORS(holder, field, name, BitField) \ 162 typename BitField::FieldType holder::name() const { \ 163 return BitField::decode(field()); \ 164 } \ 165 void holder::set_##name(typename BitField::FieldType value) { \ 166 set_##field(BitField::update(field(), value)); \ 167 } 168 169 #define INSTANCE_TYPE_CHECKER(type, forinstancetype) \ 170 V8_INLINE bool Is##type(InstanceType instance_type) { \ 171 return instance_type == forinstancetype; \ 172 } 173 174 #define TYPE_CHECKER(type, ...) \ 175 bool HeapObject::Is##type() const { \ 176 return InstanceTypeChecker::Is##type(map()->instance_type()); \ 177 } 178 179 #define FIELD_ADDR(p, offset) \ 180 (reinterpret_cast<Address>(p) + offset - kHeapObjectTag) 181 182 #define READ_FIELD(p, offset) \ 183 (*reinterpret_cast<Object* const*>(FIELD_ADDR(p, offset))) 184 185 #define READ_WEAK_FIELD(p, offset) \ 186 (*reinterpret_cast<MaybeObject* const*>(FIELD_ADDR(p, offset))) 187 188 #define ACQUIRE_READ_FIELD(p, offset) \ 189 reinterpret_cast<Object*>(base::Acquire_Load( \ 190 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset)))) 191 192 #define RELAXED_READ_FIELD(p, offset) \ 193 reinterpret_cast<Object*>(base::Relaxed_Load( \ 194 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset)))) 195 196 #define RELAXED_READ_WEAK_FIELD(p, offset) \ 197 reinterpret_cast<MaybeObject*>(base::Relaxed_Load( \ 198 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset)))) 199 200 #ifdef V8_CONCURRENT_MARKING 201 #define WRITE_FIELD(p, offset, value) \ 202 base::Relaxed_Store( \ 203 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ 204 reinterpret_cast<base::AtomicWord>(value)); 205 #define WRITE_WEAK_FIELD(p, offset, value) \ 206 base::Relaxed_Store( \ 207 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ 208 reinterpret_cast<base::AtomicWord>(value)); 209 #else 210 #define WRITE_FIELD(p, offset, value) \ 211 (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value) 212 #define WRITE_WEAK_FIELD(p, offset, value) \ 213 (*reinterpret_cast<MaybeObject**>(FIELD_ADDR(p, offset)) = value) 214 #endif 215 216 #define RELEASE_WRITE_FIELD(p, offset, value) \ 217 base::Release_Store( \ 218 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ 219 reinterpret_cast<base::AtomicWord>(value)); 220 221 #define RELAXED_WRITE_FIELD(p, offset, value) \ 222 base::Relaxed_Store( \ 223 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ 224 reinterpret_cast<base::AtomicWord>(value)); 225 226 #define WRITE_BARRIER(object, offset, value) \ 227 do { \ 228 DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ 229 MarkingBarrier(object, HeapObject::RawField(object, offset), value); \ 230 GenerationalBarrier(object, HeapObject::RawField(object, offset), value); \ 231 } while (false) 232 233 #define WEAK_WRITE_BARRIER(object, offset, value) \ 234 do { \ 235 DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ 236 MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \ 237 value); \ 238 GenerationalBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \ 239 value); \ 240 } while (false) 241 242 #define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \ 243 do { \ 244 DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ 245 if (mode != SKIP_WRITE_BARRIER) { \ 246 if (mode == UPDATE_WRITE_BARRIER) { \ 247 MarkingBarrier(object, HeapObject::RawField(object, offset), value); \ 248 } \ 249 GenerationalBarrier(object, HeapObject::RawField(object, offset), \ 250 value); \ 251 } \ 252 } while (false) 253 254 #define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \ 255 do { \ 256 DCHECK_NOT_NULL(Heap::FromWritableHeapObject(object)); \ 257 if (mode != SKIP_WRITE_BARRIER) { \ 258 if (mode == UPDATE_WRITE_BARRIER) { \ 259 MarkingBarrier(object, HeapObject::RawMaybeWeakField(object, offset), \ 260 value); \ 261 } \ 262 GenerationalBarrier( \ 263 object, HeapObject::RawMaybeWeakField(object, offset), value); \ 264 } \ 265 } while (false) 266 267 #define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset)) 268 269 #define WRITE_DOUBLE_FIELD(p, offset, value) \ 270 WriteDoubleValue(FIELD_ADDR(p, offset), value) 271 272 #define READ_INT_FIELD(p, offset) \ 273 (*reinterpret_cast<const int*>(FIELD_ADDR(p, offset))) 274 275 #define WRITE_INT_FIELD(p, offset, value) \ 276 (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value) 277 278 #define RELAXED_READ_INTPTR_FIELD(p, offset) \ 279 static_cast<intptr_t>(base::Relaxed_Load( \ 280 reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR(p, offset)))) 281 282 #define READ_INTPTR_FIELD(p, offset) \ 283 (*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset))) 284 285 #define RELAXED_WRITE_INTPTR_FIELD(p, offset, value) \ 286 base::Relaxed_Store( \ 287 reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \ 288 static_cast<base::AtomicWord>(value)); 289 290 #define WRITE_INTPTR_FIELD(p, offset, value) \ 291 (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value) 292 293 #define READ_UINT8_FIELD(p, offset) \ 294 (*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset))) 295 296 #define WRITE_UINT8_FIELD(p, offset, value) \ 297 (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value) 298 299 #define RELAXED_WRITE_INT8_FIELD(p, offset, value) \ 300 base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ 301 static_cast<base::Atomic8>(value)); 302 303 #define READ_INT8_FIELD(p, offset) \ 304 (*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset))) 305 306 #define RELAXED_READ_INT8_FIELD(p, offset) \ 307 static_cast<int8_t>(base::Relaxed_Load( \ 308 reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset)))) 309 310 #define WRITE_INT8_FIELD(p, offset, value) \ 311 (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value) 312 313 #define READ_UINT16_FIELD(p, offset) \ 314 (*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset))) 315 316 #define WRITE_UINT16_FIELD(p, offset, value) \ 317 (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value) 318 319 #define READ_INT16_FIELD(p, offset) \ 320 (*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset))) 321 322 #define WRITE_INT16_FIELD(p, offset, value) \ 323 (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value) 324 325 #define READ_UINT32_FIELD(p, offset) \ 326 (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset))) 327 328 #define WRITE_UINT32_FIELD(p, offset, value) \ 329 (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value) 330 331 #define READ_INT32_FIELD(p, offset) \ 332 (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset))) 333 334 #define WRITE_INT32_FIELD(p, offset, value) \ 335 (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value) 336 337 #define READ_FLOAT_FIELD(p, offset) \ 338 (*reinterpret_cast<const float*>(FIELD_ADDR(p, offset))) 339 340 #define WRITE_FLOAT_FIELD(p, offset, value) \ 341 (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value) 342 343 #define READ_UINT64_FIELD(p, offset) \ 344 (*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset))) 345 346 #define WRITE_UINT64_FIELD(p, offset, value) \ 347 (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value) 348 349 #define READ_INT64_FIELD(p, offset) \ 350 (*reinterpret_cast<const int64_t*>(FIELD_ADDR(p, offset))) 351 352 #define WRITE_INT64_FIELD(p, offset, value) \ 353 (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value) 354 355 #define READ_BYTE_FIELD(p, offset) \ 356 (*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset))) 357 358 #define RELAXED_READ_BYTE_FIELD(p, offset) \ 359 static_cast<byte>(base::Relaxed_Load( \ 360 reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset)))) 361 362 #define WRITE_BYTE_FIELD(p, offset, value) \ 363 (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value) 364 365 #define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \ 366 base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ 367 static_cast<base::Atomic8>(value)); 368 369 #ifdef VERIFY_HEAP 370 #define DECL_VERIFIER(Name) void Name##Verify(Isolate* isolate); 371 #else 372 #define DECL_VERIFIER(Name) 373 #endif 374 375 #define DEFINE_DEOPT_ELEMENT_ACCESSORS(name, type) \ 376 type* DeoptimizationData::name() { return type::cast(get(k##name##Index)); } \ 377 void DeoptimizationData::Set##name(type* value) { \ 378 set(k##name##Index, value); \ 379 } 380 381 #define DEFINE_DEOPT_ENTRY_ACCESSORS(name, type) \ 382 type* DeoptimizationData::name(int i) { \ 383 return type::cast(get(IndexForEntry(i) + k##name##Offset)); \ 384 } \ 385 void DeoptimizationData::Set##name(int i, type* value) { \ 386 set(IndexForEntry(i) + k##name##Offset, value); \ 387 } 388