1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_GLOBALS_H_
29 #define V8_GLOBALS_H_
30
31 namespace v8 {
32 namespace internal {
33
34 // Processor architecture detection. For more info on what's defined, see:
35 // http://msdn.microsoft.com/en-us/library/b0084kay.aspx
36 // http://www.agner.org/optimize/calling_conventions.pdf
37 // or with gcc, run: "echo | gcc -E -dM -"
38 #if defined(_M_X64) || defined(__x86_64__)
39 #define V8_HOST_ARCH_X64 1
40 #define V8_HOST_ARCH_64_BIT 1
41 #define V8_HOST_CAN_READ_UNALIGNED 1
42 #elif defined(_M_IX86) || defined(__i386__)
43 #define V8_HOST_ARCH_IA32 1
44 #define V8_HOST_ARCH_32_BIT 1
45 #define V8_HOST_CAN_READ_UNALIGNED 1
46 #elif defined(__ARMEL__)
47 #define V8_HOST_ARCH_ARM 1
48 #define V8_HOST_ARCH_32_BIT 1
49 #else
50 #error Your host architecture was not detected as supported by v8
51 #endif
52
53 #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
54 #define V8_TARGET_CAN_READ_UNALIGNED 1
55 #elif V8_TARGET_ARCH_ARM
56 #else
57 #error Your target architecture is not supported by v8
58 #endif
59
60 // Support for alternative bool type. This is only enabled if the code is
61 // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
62 // For instance, 'bool b = "false";' results in b == true! This is a hidden
63 // source of bugs.
64 // However, redefining the bool type does have some negative impact on some
65 // platforms. It gives rise to compiler warnings (i.e. with
66 // MSVC) in the API header files when mixing code that uses the standard
67 // bool with code that uses the redefined version.
68 // This does not actually belong in the platform code, but needs to be
69 // defined here because the platform code uses bool, and platform.h is
70 // include very early in the main include file.
71
72 #ifdef USE_MYBOOL
73 typedef unsigned int __my_bool__;
74 #define bool __my_bool__ // use 'indirection' to avoid name clashes
75 #endif
76
77 typedef uint8_t byte;
78 typedef byte* Address;
79
80 // Define our own macros for writing 64-bit constants. This is less fragile
81 // than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
82 // works on compilers that don't have it (like MSVC).
83 #if V8_HOST_ARCH_64_BIT
84 #ifdef _MSC_VER
85 #define V8_UINT64_C(x) (x ## UI64)
86 #define V8_INT64_C(x) (x ## I64)
87 #define V8_PTR_PREFIX "ll"
88 #else // _MSC_VER
89 #define V8_UINT64_C(x) (x ## UL)
90 #define V8_INT64_C(x) (x ## L)
91 #define V8_PTR_PREFIX "l"
92 #endif // _MSC_VER
93 #else // V8_HOST_ARCH_64_BIT
94 #define V8_PTR_PREFIX ""
95 #endif // V8_HOST_ARCH_64_BIT
96
97 #define V8PRIxPTR V8_PTR_PREFIX "x"
98 #define V8PRIdPTR V8_PTR_PREFIX "d"
99
100 // Fix for Mac OS X defining uintptr_t as "unsigned long":
101 #if defined(__APPLE__) && defined(__MACH__)
102 #undef V8PRIxPTR
103 #define V8PRIxPTR "lx"
104 #endif
105
106 // Code-point values in Unicode 4.0 are 21 bits wide.
107 typedef uint16_t uc16;
108 typedef int32_t uc32;
109
110 // -----------------------------------------------------------------------------
111 // Constants
112
113 const int KB = 1024;
114 const int MB = KB * KB;
115 const int GB = KB * KB * KB;
116 const int kMaxInt = 0x7FFFFFFF;
117 const int kMinInt = -kMaxInt - 1;
118
119 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
120
121 const int kCharSize = sizeof(char); // NOLINT
122 const int kShortSize = sizeof(short); // NOLINT
123 const int kIntSize = sizeof(int); // NOLINT
124 const int kDoubleSize = sizeof(double); // NOLINT
125 const int kPointerSize = sizeof(void*); // NOLINT
126 const int kIntptrSize = sizeof(intptr_t); // NOLINT
127
128 #if V8_HOST_ARCH_64_BIT
129 const int kPointerSizeLog2 = 3;
130 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
131 #else
132 const int kPointerSizeLog2 = 2;
133 const intptr_t kIntptrSignBit = 0x80000000;
134 #endif
135
136 const int kObjectAlignmentBits = kPointerSizeLog2;
137 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
138 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
139
140 // Desired alignment for pointers.
141 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
142 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
143
144
145 // Tag information for Failure.
146 const int kFailureTag = 3;
147 const int kFailureTagSize = 2;
148 const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
149
150
151 const int kBitsPerByte = 8;
152 const int kBitsPerByteLog2 = 3;
153 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
154 const int kBitsPerInt = kIntSize * kBitsPerByte;
155
156
157 // Zap-value: The value used for zapping dead objects.
158 // Should be a recognizable hex value tagged as a heap object pointer.
159 #ifdef V8_HOST_ARCH_64_BIT
160 const Address kZapValue =
161 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
162 const Address kHandleZapValue =
163 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
164 const Address kFromSpaceZapValue =
165 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
166 #else
167 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
168 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
169 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
170 #endif
171
172
173 // -----------------------------------------------------------------------------
174 // Forward declarations for frequently used classes
175 // (sorted alphabetically)
176
177 class AccessorInfo;
178 class Allocation;
179 class Arguments;
180 class Assembler;
181 class BreakableStatement;
182 class Code;
183 class CodeGenerator;
184 class CodeStub;
185 class Context;
186 class Debug;
187 class Debugger;
188 class DebugInfo;
189 class Descriptor;
190 class DescriptorArray;
191 class Expression;
192 class ExternalReference;
193 class FixedArray;
194 class FunctionEntry;
195 class FunctionLiteral;
196 class FunctionTemplateInfo;
197 class NumberDictionary;
198 class StringDictionary;
199 class FreeStoreAllocationPolicy;
200 template <typename T> class Handle;
201 class Heap;
202 class HeapObject;
203 class IC;
204 class InterceptorInfo;
205 class IterationStatement;
206 class Array;
207 class JSArray;
208 class JSFunction;
209 class JSObject;
210 class LargeObjectSpace;
211 template <typename T, class P = FreeStoreAllocationPolicy> class List;
212 class LookupResult;
213 class MacroAssembler;
214 class Map;
215 class MapSpace;
216 class MarkCompactCollector;
217 class NewSpace;
218 class NodeVisitor;
219 class Object;
220 class OldSpace;
221 class Property;
222 class Proxy;
223 class RegExpNode;
224 struct RegExpCompileData;
225 class RegExpTree;
226 class RegExpCompiler;
227 class RegExpVisitor;
228 class Scope;
229 template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
230 class Script;
231 class Slot;
232 class Smi;
233 class Statement;
234 class String;
235 class Struct;
236 class SwitchStatement;
237 class AstVisitor;
238 class Variable;
239 class VariableProxy;
240 class RelocInfo;
241 class Deserializer;
242 class MessageLocation;
243 class ObjectGroup;
244 class TickSample;
245 class VirtualMemory;
246 class Mutex;
247 class ZoneScopeInfo;
248
249 typedef bool (*WeakSlotCallback)(Object** pointer);
250
251 // -----------------------------------------------------------------------------
252 // Miscellaneous
253
254 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
255 // consecutive.
256 enum AllocationSpace {
257 NEW_SPACE, // Semispaces collected with copying collector.
258 OLD_POINTER_SPACE, // May contain pointers to new space.
259 OLD_DATA_SPACE, // Must not have pointers to new space.
260 CODE_SPACE, // No pointers to new space, marked executable.
261 MAP_SPACE, // Only and all map objects.
262 CELL_SPACE, // Only and all cell objects.
263 LO_SPACE, // Promoted large objects.
264
265 FIRST_SPACE = NEW_SPACE,
266 LAST_SPACE = LO_SPACE
267 };
268 const int kSpaceTagSize = 3;
269 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
270
271
272 // A flag that indicates whether objects should be pretenured when
273 // allocated (allocated directly into the old generation) or not
274 // (allocated in the young generation if the object size and type
275 // allows).
276 enum PretenureFlag { NOT_TENURED, TENURED };
277
278 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
279
280 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
281
282
283 // A CodeDesc describes a buffer holding instructions and relocation
284 // information. The instructions start at the beginning of the buffer
285 // and grow forward, the relocation information starts at the end of
286 // the buffer and grows backward.
287 //
288 // |<--------------- buffer_size ---------------->|
289 // |<-- instr_size -->| |<-- reloc_size -->|
290 // +==================+========+==================+
291 // | instructions | free | reloc info |
292 // +==================+========+==================+
293 // ^
294 // |
295 // buffer
296
297 struct CodeDesc {
298 byte* buffer;
299 int buffer_size;
300 int instr_size;
301 int reloc_size;
302 Assembler* origin;
303 };
304
305
306 // Callback function on object slots, used for iterating heap object slots in
307 // HeapObjects, global pointers to heap objects, etc. The callback allows the
308 // callback function to change the value of the slot.
309 typedef void (*ObjectSlotCallback)(HeapObject** pointer);
310
311
312 // Callback function used for iterating objects in heap spaces,
313 // for example, scanning heap objects.
314 typedef int (*HeapObjectCallback)(HeapObject* obj);
315
316
317 // Callback function used for checking constraints when copying/relocating
318 // objects. Returns true if an object can be copied/relocated from its
319 // old_addr to a new_addr.
320 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
321
322
323 // Callback function on inline caches, used for iterating over inline caches
324 // in compiled code.
325 typedef void (*InlineCacheCallback)(Code* code, Address ic);
326
327
328 // State for inline cache call sites. Aliased as IC::State.
329 enum InlineCacheState {
330 // Has never been executed.
331 UNINITIALIZED,
332 // Has been executed but monomorhic state has been delayed.
333 PREMONOMORPHIC,
334 // Has been executed and only one receiver type has been seen.
335 MONOMORPHIC,
336 // Like MONOMORPHIC but check failed due to prototype.
337 MONOMORPHIC_PROTOTYPE_FAILURE,
338 // Multiple receiver types have been seen.
339 MEGAMORPHIC,
340 // Special states for debug break or step in prepare stubs.
341 DEBUG_BREAK,
342 DEBUG_PREPARE_STEP_IN
343 };
344
345
346 enum InLoopFlag {
347 NOT_IN_LOOP,
348 IN_LOOP
349 };
350
351
352 // Type of properties.
353 // Order of properties is significant.
354 // Must fit in the BitField PropertyDetails::TypeField.
355 // A copy of this is in mirror-delay.js.
356 enum PropertyType {
357 NORMAL = 0, // only in slow mode
358 FIELD = 1, // only in fast mode
359 CONSTANT_FUNCTION = 2, // only in fast mode
360 CALLBACKS = 3,
361 INTERCEPTOR = 4, // only in lookup results, not in descriptors.
362 MAP_TRANSITION = 5, // only in fast mode
363 CONSTANT_TRANSITION = 6, // only in fast mode
364 NULL_DESCRIPTOR = 7, // only in fast mode
365 // All properties before MAP_TRANSITION are real.
366 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION
367 };
368
369
370 // Whether to remove map transitions and constant transitions from a
371 // DescriptorArray.
372 enum TransitionFlag {
373 REMOVE_TRANSITIONS,
374 KEEP_TRANSITIONS
375 };
376
377
378 // Union used for fast testing of specific double values.
379 union DoubleRepresentation {
380 double value;
381 int64_t bits;
DoubleRepresentation(double x)382 DoubleRepresentation(double x) { value = x; }
383 };
384
385
386 // AccessorCallback
387 struct AccessorDescriptor {
388 Object* (*getter)(Object* object, void* data);
389 Object* (*setter)(JSObject* object, Object* value, void* data);
390 void* data;
391 };
392
393
394 // Logging and profiling.
395 // A StateTag represents a possible state of the VM. When compiled with
396 // ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these.
397 // Creating a VMState object enters a state by pushing on the stack, and
398 // destroying a VMState object leaves a state by popping the current state
399 // from the stack.
400
401 #define STATE_TAG_LIST(V) \
402 V(JS) \
403 V(GC) \
404 V(COMPILER) \
405 V(OTHER) \
406 V(EXTERNAL)
407
408 enum StateTag {
409 #define DEF_STATE_TAG(name) name,
410 STATE_TAG_LIST(DEF_STATE_TAG)
411 #undef DEF_STATE_TAG
412 // Pseudo-types.
413 state_tag_count
414 };
415
416
417 // -----------------------------------------------------------------------------
418 // Macros
419
420 // Testers for test.
421
422 #define HAS_SMI_TAG(value) \
423 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
424
425 #define HAS_FAILURE_TAG(value) \
426 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
427
428 // OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
429 #define OBJECT_SIZE_ALIGN(value) \
430 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
431
432 // POINTER_SIZE_ALIGN returns the value aligned as a pointer.
433 #define POINTER_SIZE_ALIGN(value) \
434 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
435
436 // The expression OFFSET_OF(type, field) computes the byte-offset
437 // of the specified field relative to the containing type. This
438 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
439 // use 0 or NULL, which causes a problem with the compiler warnings
440 // we have enabled (which is also why 'offsetof' doesn't seem to work).
441 // Here we simply use the non-zero value 4, which seems to work.
442 #define OFFSET_OF(type, field) \
443 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
444
445
446 // The expression ARRAY_SIZE(a) is a compile-time constant of type
447 // size_t which represents the number of elements of the given
448 // array. You should only use ARRAY_SIZE on statically allocated
449 // arrays.
450 #define ARRAY_SIZE(a) \
451 ((sizeof(a) / sizeof(*(a))) / \
452 static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
453
454
455 // The USE(x) template is used to silence C++ compiler warnings
456 // issued for (yet) unused variables (typically parameters).
457 template <typename T>
USE(T)458 static inline void USE(T) { }
459
460
461 // FUNCTION_ADDR(f) gets the address of a C function f.
462 #define FUNCTION_ADDR(f) \
463 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
464
465
466 // FUNCTION_CAST<F>(addr) casts an address into a function
467 // of type F. Used to invoke generated code from within C.
468 template <typename F>
FUNCTION_CAST(Address addr)469 F FUNCTION_CAST(Address addr) {
470 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
471 }
472
473
474 // A macro to disallow the evil copy constructor and operator= functions
475 // This should be used in the private: declarations for a class
476 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
477 TypeName(const TypeName&); \
478 void operator=(const TypeName&)
479
480
481 // A macro to disallow all the implicit constructors, namely the
482 // default constructor, copy constructor and operator= functions.
483 //
484 // This should be used in the private: declarations for a class
485 // that wants to prevent anyone from instantiating it. This is
486 // especially useful for classes containing only static methods.
487 #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
488 TypeName(); \
489 DISALLOW_COPY_AND_ASSIGN(TypeName)
490
491
492 // Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
493 // inside a C++ class and new and delete will be overloaded so logging is
494 // performed.
495 // This file (globals.h) is included before log.h, so we use direct calls to
496 // the Logger rather than the LOG macro.
497 #ifdef DEBUG
498 #define TRACK_MEMORY(name) \
499 void* operator new(size_t size) { \
500 void* result = ::operator new(size); \
501 Logger::NewEvent(name, result, size); \
502 return result; \
503 } \
504 void operator delete(void* object) { \
505 Logger::DeleteEvent(name, object); \
506 ::operator delete(object); \
507 }
508 #else
509 #define TRACK_MEMORY(name)
510 #endif
511
512 // define used for helping GCC to make better inlining. Don't bother for debug
513 // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
514 // errors in debug build.
515 #if defined(__GNUC__) && !defined(DEBUG)
516 #if (__GNUC__ >= 4)
517 #define INLINE(header) inline header __attribute__((always_inline))
518 #else
519 #define INLINE(header) inline __attribute__((always_inline)) header
520 #endif
521 #else
522 #define INLINE(header) inline header
523 #endif
524
525 // The type-based aliasing rule allows the compiler to assume that pointers of
526 // different types (for some definition of different) never alias each other.
527 // Thus the following code does not work:
528 //
529 // float f = foo();
530 // int fbits = *(int*)(&f);
531 //
532 // The compiler 'knows' that the int pointer can't refer to f since the types
533 // don't match, so the compiler may cache f in a register, leaving random data
534 // in fbits. Using C++ style casts makes no difference, however a pointer to
535 // char data is assumed to alias any other pointer. This is the 'memcpy
536 // exception'.
537 //
538 // Bit_cast uses the memcpy exception to move the bits from a variable of one
539 // type of a variable of another type. Of course the end result is likely to
540 // be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
541 // will completely optimize bit_cast away.
542 //
543 // There is an additional use for bit_cast.
544 // Recent gccs will warn when they see casts that may result in breakage due to
545 // the type-based aliasing rule. If you have checked that there is no breakage
546 // you can use bit_cast to cast one pointer type to another. This confuses gcc
547 // enough that it can no longer see that you have cast one pointer type to
548 // another thus avoiding the warning.
549 template <class Dest, class Source>
bit_cast(const Source & source)550 inline Dest bit_cast(const Source& source) {
551 // Compile time assertion: sizeof(Dest) == sizeof(Source)
552 // A compile error here means your Dest and Source have different sizes.
553 typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
554
555 Dest dest;
556 memcpy(&dest, &source, sizeof(dest));
557 return dest;
558 }
559
560
561 } } // namespace v8::internal
562
563 #endif // V8_GLOBALS_H_
564