1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_GLOBALS_H_
29 #define V8_GLOBALS_H_
30
31 namespace v8 {
32 namespace internal {
33
34 // Processor architecture detection. For more info on what's defined, see:
35 // http://msdn.microsoft.com/en-us/library/b0084kay.aspx
36 // http://www.agner.org/optimize/calling_conventions.pdf
37 // or with gcc, run: "echo | gcc -E -dM -"
38 #if defined(_M_X64) || defined(__x86_64__)
39 #define V8_HOST_ARCH_X64 1
40 #define V8_HOST_ARCH_64_BIT 1
41 #define V8_HOST_CAN_READ_UNALIGNED 1
42 #elif defined(_M_IX86) || defined(__i386__)
43 #define V8_HOST_ARCH_IA32 1
44 #define V8_HOST_ARCH_32_BIT 1
45 #define V8_HOST_CAN_READ_UNALIGNED 1
46 #elif defined(__ARMEL__)
47 #define V8_HOST_ARCH_ARM 1
48 #define V8_HOST_ARCH_32_BIT 1
49 #elif defined(_MIPS_ARCH_MIPS32R2)
50 #define V8_HOST_ARCH_MIPS 1
51 #define V8_HOST_ARCH_32_BIT 1
52 #else
53 #error Your host architecture was not detected as supported by v8
54 #endif
55
56 #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
57 #define V8_TARGET_CAN_READ_UNALIGNED 1
58 #elif V8_TARGET_ARCH_ARM
59 #elif V8_TARGET_ARCH_MIPS
60 #else
61 #error Your target architecture is not supported by v8
62 #endif
63
64 // Support for alternative bool type. This is only enabled if the code is
65 // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
66 // For instance, 'bool b = "false";' results in b == true! This is a hidden
67 // source of bugs.
68 // However, redefining the bool type does have some negative impact on some
69 // platforms. It gives rise to compiler warnings (i.e. with
70 // MSVC) in the API header files when mixing code that uses the standard
71 // bool with code that uses the redefined version.
72 // This does not actually belong in the platform code, but needs to be
73 // defined here because the platform code uses bool, and platform.h is
74 // include very early in the main include file.
75
76 #ifdef USE_MYBOOL
77 typedef unsigned int __my_bool__;
78 #define bool __my_bool__ // use 'indirection' to avoid name clashes
79 #endif
80
81 typedef uint8_t byte;
82 typedef byte* Address;
83
84 // Define our own macros for writing 64-bit constants. This is less fragile
85 // than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
86 // works on compilers that don't have it (like MSVC).
87 #if V8_HOST_ARCH_64_BIT
88 #ifdef _MSC_VER
89 #define V8_UINT64_C(x) (x ## UI64)
90 #define V8_INT64_C(x) (x ## I64)
91 #define V8_PTR_PREFIX "ll"
92 #else // _MSC_VER
93 #define V8_UINT64_C(x) (x ## UL)
94 #define V8_INT64_C(x) (x ## L)
95 #define V8_PTR_PREFIX "l"
96 #endif // _MSC_VER
97 #else // V8_HOST_ARCH_64_BIT
98 #define V8_PTR_PREFIX ""
99 #endif // V8_HOST_ARCH_64_BIT
100
101 #define V8PRIxPTR V8_PTR_PREFIX "x"
102 #define V8PRIdPTR V8_PTR_PREFIX "d"
103
104 // Fix for Mac OS X defining uintptr_t as "unsigned long":
105 #if defined(__APPLE__) && defined(__MACH__)
106 #undef V8PRIxPTR
107 #define V8PRIxPTR "lx"
108 #endif
109
110 #if defined(__APPLE__) && defined(__MACH__)
111 #define USING_MAC_ABI
112 #endif
113
114 // Code-point values in Unicode 4.0 are 21 bits wide.
115 typedef uint16_t uc16;
116 typedef int32_t uc32;
117
118 // -----------------------------------------------------------------------------
119 // Constants
120
121 const int KB = 1024;
122 const int MB = KB * KB;
123 const int GB = KB * KB * KB;
124 const int kMaxInt = 0x7FFFFFFF;
125 const int kMinInt = -kMaxInt - 1;
126
127 const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
128
129 const int kCharSize = sizeof(char); // NOLINT
130 const int kShortSize = sizeof(short); // NOLINT
131 const int kIntSize = sizeof(int); // NOLINT
132 const int kDoubleSize = sizeof(double); // NOLINT
133 const int kPointerSize = sizeof(void*); // NOLINT
134 const int kIntptrSize = sizeof(intptr_t); // NOLINT
135
136 #if V8_HOST_ARCH_64_BIT
137 const int kPointerSizeLog2 = 3;
138 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
139 #else
140 const int kPointerSizeLog2 = 2;
141 const intptr_t kIntptrSignBit = 0x80000000;
142 #endif
143
144 const int kObjectAlignmentBits = kPointerSizeLog2;
145 const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
146 const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
147
148 // Desired alignment for pointers.
149 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
150 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
151
152 // Desired alignment for maps.
153 #if V8_HOST_ARCH_64_BIT
154 const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
155 #else
156 const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
157 #endif
158 const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
159 const intptr_t kMapAlignmentMask = kMapAlignment - 1;
160
161 // Tag information for Failure.
162 const int kFailureTag = 3;
163 const int kFailureTagSize = 2;
164 const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
165
166
167 const int kBitsPerByte = 8;
168 const int kBitsPerByteLog2 = 3;
169 const int kBitsPerPointer = kPointerSize * kBitsPerByte;
170 const int kBitsPerInt = kIntSize * kBitsPerByte;
171
172
173 // Zap-value: The value used for zapping dead objects.
174 // Should be a recognizable hex value tagged as a heap object pointer.
175 #ifdef V8_HOST_ARCH_64_BIT
176 const Address kZapValue =
177 reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
178 const Address kHandleZapValue =
179 reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
180 const Address kFromSpaceZapValue =
181 reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
182 #else
183 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
184 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
185 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
186 #endif
187
188
189 // Number of bits to represent the page size for paged spaces. The value of 13
190 // gives 8K bytes per page.
191 const int kPageSizeBits = 13;
192
193
194 // Constants relevant to double precision floating point numbers.
195
196 // Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
197 // other bits set.
198 const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
199 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
200 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
201
202
203 // -----------------------------------------------------------------------------
204 // Forward declarations for frequently used classes
205 // (sorted alphabetically)
206
207 class AccessorInfo;
208 class Allocation;
209 class Arguments;
210 class Assembler;
211 class AssertNoAllocation;
212 class BreakableStatement;
213 class Code;
214 class CodeGenerator;
215 class CodeStub;
216 class Context;
217 class Debug;
218 class Debugger;
219 class DebugInfo;
220 class Descriptor;
221 class DescriptorArray;
222 class Expression;
223 class ExternalReference;
224 class FixedArray;
225 class FunctionEntry;
226 class FunctionLiteral;
227 class FunctionTemplateInfo;
228 class NumberDictionary;
229 class StringDictionary;
230 class FreeStoreAllocationPolicy;
231 template <typename T> class Handle;
232 class Heap;
233 class HeapObject;
234 class IC;
235 class InterceptorInfo;
236 class IterationStatement;
237 class Array;
238 class JSArray;
239 class JSFunction;
240 class JSObject;
241 class LargeObjectSpace;
242 template <typename T, class P = FreeStoreAllocationPolicy> class List;
243 class LookupResult;
244 class MacroAssembler;
245 class Map;
246 class MapSpace;
247 class MarkCompactCollector;
248 class NewSpace;
249 class NodeVisitor;
250 class Object;
251 class OldSpace;
252 class Property;
253 class Proxy;
254 class RegExpNode;
255 struct RegExpCompileData;
256 class RegExpTree;
257 class RegExpCompiler;
258 class RegExpVisitor;
259 class Scope;
260 template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
261 class Script;
262 class Slot;
263 class Smi;
264 class Statement;
265 class String;
266 class Struct;
267 class SwitchStatement;
268 class AstVisitor;
269 class Variable;
270 class VariableProxy;
271 class RelocInfo;
272 class Deserializer;
273 class MessageLocation;
274 class ObjectGroup;
275 class TickSample;
276 class VirtualMemory;
277 class Mutex;
278 class ZoneScopeInfo;
279
280 typedef bool (*WeakSlotCallback)(Object** pointer);
281
282 // -----------------------------------------------------------------------------
283 // Miscellaneous
284
285 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
286 // consecutive.
287 enum AllocationSpace {
288 NEW_SPACE, // Semispaces collected with copying collector.
289 OLD_POINTER_SPACE, // May contain pointers to new space.
290 OLD_DATA_SPACE, // Must not have pointers to new space.
291 CODE_SPACE, // No pointers to new space, marked executable.
292 MAP_SPACE, // Only and all map objects.
293 CELL_SPACE, // Only and all cell objects.
294 LO_SPACE, // Promoted large objects.
295
296 FIRST_SPACE = NEW_SPACE,
297 LAST_SPACE = LO_SPACE,
298 FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
299 LAST_PAGED_SPACE = CELL_SPACE
300 };
301 const int kSpaceTagSize = 3;
302 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
303
304
305 // A flag that indicates whether objects should be pretenured when
306 // allocated (allocated directly into the old generation) or not
307 // (allocated in the young generation if the object size and type
308 // allows).
309 enum PretenureFlag { NOT_TENURED, TENURED };
310
311 enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
312
313 enum Executability { NOT_EXECUTABLE, EXECUTABLE };
314
315 enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
316
317 // Flag indicating whether code is built in to the VM (one of the natives
318 // files).
319 enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
320
321
322 // A CodeDesc describes a buffer holding instructions and relocation
323 // information. The instructions start at the beginning of the buffer
324 // and grow forward, the relocation information starts at the end of
325 // the buffer and grows backward.
326 //
327 // |<--------------- buffer_size ---------------->|
328 // |<-- instr_size -->| |<-- reloc_size -->|
329 // +==================+========+==================+
330 // | instructions | free | reloc info |
331 // +==================+========+==================+
332 // ^
333 // |
334 // buffer
335
336 struct CodeDesc {
337 byte* buffer;
338 int buffer_size;
339 int instr_size;
340 int reloc_size;
341 Assembler* origin;
342 };
343
344
345 // Callback function on object slots, used for iterating heap object slots in
346 // HeapObjects, global pointers to heap objects, etc. The callback allows the
347 // callback function to change the value of the slot.
348 typedef void (*ObjectSlotCallback)(HeapObject** pointer);
349
350
351 // Callback function used for iterating objects in heap spaces,
352 // for example, scanning heap objects.
353 typedef int (*HeapObjectCallback)(HeapObject* obj);
354
355
356 // Callback function used for checking constraints when copying/relocating
357 // objects. Returns true if an object can be copied/relocated from its
358 // old_addr to a new_addr.
359 typedef bool (*ConstraintCallback)(Address new_addr, Address old_addr);
360
361
362 // Callback function on inline caches, used for iterating over inline caches
363 // in compiled code.
364 typedef void (*InlineCacheCallback)(Code* code, Address ic);
365
366
367 // State for inline cache call sites. Aliased as IC::State.
368 enum InlineCacheState {
369 // Has never been executed.
370 UNINITIALIZED,
371 // Has been executed but monomorhic state has been delayed.
372 PREMONOMORPHIC,
373 // Has been executed and only one receiver type has been seen.
374 MONOMORPHIC,
375 // Like MONOMORPHIC but check failed due to prototype.
376 MONOMORPHIC_PROTOTYPE_FAILURE,
377 // Multiple receiver types have been seen.
378 MEGAMORPHIC,
379 // Special states for debug break or step in prepare stubs.
380 DEBUG_BREAK,
381 DEBUG_PREPARE_STEP_IN
382 };
383
384
385 enum InLoopFlag {
386 NOT_IN_LOOP,
387 IN_LOOP
388 };
389
390
391 enum CallFunctionFlags {
392 NO_CALL_FUNCTION_FLAGS = 0,
393 RECEIVER_MIGHT_BE_VALUE = 1 << 0 // Receiver might not be a JSObject.
394 };
395
396
397 // Type of properties.
398 // Order of properties is significant.
399 // Must fit in the BitField PropertyDetails::TypeField.
400 // A copy of this is in mirror-debugger.js.
401 enum PropertyType {
402 NORMAL = 0, // only in slow mode
403 FIELD = 1, // only in fast mode
404 CONSTANT_FUNCTION = 2, // only in fast mode
405 CALLBACKS = 3,
406 INTERCEPTOR = 4, // only in lookup results, not in descriptors.
407 MAP_TRANSITION = 5, // only in fast mode
408 CONSTANT_TRANSITION = 6, // only in fast mode
409 NULL_DESCRIPTOR = 7, // only in fast mode
410 // All properties before MAP_TRANSITION are real.
411 FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION
412 };
413
414
415 // Whether to remove map transitions and constant transitions from a
416 // DescriptorArray.
417 enum TransitionFlag {
418 REMOVE_TRANSITIONS,
419 KEEP_TRANSITIONS
420 };
421
422
423 // Union used for fast testing of specific double values.
424 union DoubleRepresentation {
425 double value;
426 int64_t bits;
DoubleRepresentation(double x)427 DoubleRepresentation(double x) { value = x; }
428 };
429
430
431 // AccessorCallback
432 struct AccessorDescriptor {
433 Object* (*getter)(Object* object, void* data);
434 Object* (*setter)(JSObject* object, Object* value, void* data);
435 void* data;
436 };
437
438
439 // Logging and profiling.
440 // A StateTag represents a possible state of the VM. When compiled with
441 // ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these.
442 // Creating a VMState object enters a state by pushing on the stack, and
443 // destroying a VMState object leaves a state by popping the current state
444 // from the stack.
445
446 #define STATE_TAG_LIST(V) \
447 V(JS) \
448 V(GC) \
449 V(COMPILER) \
450 V(OTHER) \
451 V(EXTERNAL)
452
453 enum StateTag {
454 #define DEF_STATE_TAG(name) name,
455 STATE_TAG_LIST(DEF_STATE_TAG)
456 #undef DEF_STATE_TAG
457 // Pseudo-types.
458 state_tag_count
459 };
460
461
462 // -----------------------------------------------------------------------------
463 // Macros
464
465 // Testers for test.
466
467 #define HAS_SMI_TAG(value) \
468 ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
469
470 #define HAS_FAILURE_TAG(value) \
471 ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
472
473 // OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
474 #define OBJECT_SIZE_ALIGN(value) \
475 (((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
476
477 // POINTER_SIZE_ALIGN returns the value aligned as a pointer.
478 #define POINTER_SIZE_ALIGN(value) \
479 (((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
480
481 // MAP_SIZE_ALIGN returns the value aligned as a map pointer.
482 #define MAP_SIZE_ALIGN(value) \
483 (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
484
485 // The expression OFFSET_OF(type, field) computes the byte-offset
486 // of the specified field relative to the containing type. This
487 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
488 // use 0 or NULL, which causes a problem with the compiler warnings
489 // we have enabled (which is also why 'offsetof' doesn't seem to work).
490 // Here we simply use the non-zero value 4, which seems to work.
491 #define OFFSET_OF(type, field) \
492 (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
493
494
495 // The expression ARRAY_SIZE(a) is a compile-time constant of type
496 // size_t which represents the number of elements of the given
497 // array. You should only use ARRAY_SIZE on statically allocated
498 // arrays.
499 #define ARRAY_SIZE(a) \
500 ((sizeof(a) / sizeof(*(a))) / \
501 static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
502
503
504 // The USE(x) template is used to silence C++ compiler warnings
505 // issued for (yet) unused variables (typically parameters).
506 template <typename T>
USE(T)507 static inline void USE(T) { }
508
509
510 // FUNCTION_ADDR(f) gets the address of a C function f.
511 #define FUNCTION_ADDR(f) \
512 (reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
513
514
515 // FUNCTION_CAST<F>(addr) casts an address into a function
516 // of type F. Used to invoke generated code from within C.
517 template <typename F>
FUNCTION_CAST(Address addr)518 F FUNCTION_CAST(Address addr) {
519 return reinterpret_cast<F>(reinterpret_cast<intptr_t>(addr));
520 }
521
522
523 // A macro to disallow the evil copy constructor and operator= functions
524 // This should be used in the private: declarations for a class
525 #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
526 TypeName(const TypeName&); \
527 void operator=(const TypeName&)
528
529
530 // A macro to disallow all the implicit constructors, namely the
531 // default constructor, copy constructor and operator= functions.
532 //
533 // This should be used in the private: declarations for a class
534 // that wants to prevent anyone from instantiating it. This is
535 // especially useful for classes containing only static methods.
536 #define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
537 TypeName(); \
538 DISALLOW_COPY_AND_ASSIGN(TypeName)
539
540
541 // Support for tracking C++ memory allocation. Insert TRACK_MEMORY("Fisk")
542 // inside a C++ class and new and delete will be overloaded so logging is
543 // performed.
544 // This file (globals.h) is included before log.h, so we use direct calls to
545 // the Logger rather than the LOG macro.
546 #ifdef DEBUG
547 #define TRACK_MEMORY(name) \
548 void* operator new(size_t size) { \
549 void* result = ::operator new(size); \
550 Logger::NewEvent(name, result, size); \
551 return result; \
552 } \
553 void operator delete(void* object) { \
554 Logger::DeleteEvent(name, object); \
555 ::operator delete(object); \
556 }
557 #else
558 #define TRACK_MEMORY(name)
559 #endif
560
561 // define used for helping GCC to make better inlining. Don't bother for debug
562 // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
563 // errors in debug build.
564 #if defined(__GNUC__) && !defined(DEBUG)
565 #if (__GNUC__ >= 4)
566 #define INLINE(header) inline header __attribute__((always_inline))
567 #else
568 #define INLINE(header) inline __attribute__((always_inline)) header
569 #endif
570 #else
571 #define INLINE(header) inline header
572 #endif
573
574 // The type-based aliasing rule allows the compiler to assume that pointers of
575 // different types (for some definition of different) never alias each other.
576 // Thus the following code does not work:
577 //
578 // float f = foo();
579 // int fbits = *(int*)(&f);
580 //
581 // The compiler 'knows' that the int pointer can't refer to f since the types
582 // don't match, so the compiler may cache f in a register, leaving random data
583 // in fbits. Using C++ style casts makes no difference, however a pointer to
584 // char data is assumed to alias any other pointer. This is the 'memcpy
585 // exception'.
586 //
587 // Bit_cast uses the memcpy exception to move the bits from a variable of one
588 // type of a variable of another type. Of course the end result is likely to
589 // be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
590 // will completely optimize bit_cast away.
591 //
592 // There is an additional use for bit_cast.
593 // Recent gccs will warn when they see casts that may result in breakage due to
594 // the type-based aliasing rule. If you have checked that there is no breakage
595 // you can use bit_cast to cast one pointer type to another. This confuses gcc
596 // enough that it can no longer see that you have cast one pointer type to
597 // another thus avoiding the warning.
598 template <class Dest, class Source>
bit_cast(const Source & source)599 inline Dest bit_cast(const Source& source) {
600 // Compile time assertion: sizeof(Dest) == sizeof(Source)
601 // A compile error here means your Dest and Source have different sizes.
602 typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
603
604 Dest dest;
605 memcpy(&dest, &source, sizeof(dest));
606 return dest;
607 }
608
609
610 // Feature flags bit positions. They are mostly based on the CPUID spec.
611 // (We assign CPUID itself to one of the currently reserved bits --
612 // feel free to change this if needed.)
613 enum CpuFeature { SSE3 = 32, // x86
614 SSE2 = 26, // x86
615 CMOV = 15, // x86
616 RDTSC = 4, // x86
617 CPUID = 10, // x86
618 VFP3 = 1, // ARM
619 ARMv7 = 2, // ARM
620 SAHF = 0}; // x86
621
622 } } // namespace v8::internal
623
624 #endif // V8_GLOBALS_H_
625