• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #ifndef V8_UTILS_H_
29 #define V8_UTILS_H_
30 
31 #include <stdlib.h>
32 #include <string.h>
33 
34 #include "globals.h"
35 #include "checks.h"
36 #include "allocation.h"
37 
38 namespace v8 {
39 namespace internal {
40 
41 // ----------------------------------------------------------------------------
42 // General helper functions
43 
44 #define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
45 
46 // Returns true iff x is a power of 2 (or zero). Cannot be used with the
47 // maximally negative value of the type T (the -1 overflows).
48 template <typename T>
IsPowerOf2(T x)49 static inline bool IsPowerOf2(T x) {
50   return IS_POWER_OF_TWO(x);
51 }
52 
53 
54 // X must be a power of 2.  Returns the number of trailing zeros.
55 template <typename T>
WhichPowerOf2(T x)56 static inline int WhichPowerOf2(T x) {
57   ASSERT(IsPowerOf2(x));
58   ASSERT(x != 0);
59   if (x < 0) return 31;
60   int bits = 0;
61 #ifdef DEBUG
62   int original_x = x;
63 #endif
64   if (x >= 0x10000) {
65     bits += 16;
66     x >>= 16;
67   }
68   if (x >= 0x100) {
69     bits += 8;
70     x >>= 8;
71   }
72   if (x >= 0x10) {
73     bits += 4;
74     x >>= 4;
75   }
76   switch (x) {
77     default: UNREACHABLE();
78     case 8: bits++;  // Fall through.
79     case 4: bits++;  // Fall through.
80     case 2: bits++;  // Fall through.
81     case 1: break;
82   }
83   ASSERT_EQ(1 << bits, original_x);
84   return bits;
85   return 0;
86 }
87 
88 
89 // The C++ standard leaves the semantics of '>>' undefined for
90 // negative signed operands. Most implementations do the right thing,
91 // though.
ArithmeticShiftRight(int x,int s)92 static inline int ArithmeticShiftRight(int x, int s) {
93   return x >> s;
94 }
95 
96 
97 // Compute the 0-relative offset of some absolute value x of type T.
98 // This allows conversion of Addresses and integral types into
99 // 0-relative int offsets.
100 template <typename T>
OffsetFrom(T x)101 static inline intptr_t OffsetFrom(T x) {
102   return x - static_cast<T>(0);
103 }
104 
105 
106 // Compute the absolute value of type T for some 0-relative offset x.
107 // This allows conversion of 0-relative int offsets into Addresses and
108 // integral types.
109 template <typename T>
AddressFrom(intptr_t x)110 static inline T AddressFrom(intptr_t x) {
111   return static_cast<T>(static_cast<T>(0) + x);
112 }
113 
114 
115 // Return the largest multiple of m which is <= x.
116 template <typename T>
RoundDown(T x,int m)117 static inline T RoundDown(T x, int m) {
118   ASSERT(IsPowerOf2(m));
119   return AddressFrom<T>(OffsetFrom(x) & -m);
120 }
121 
122 
123 // Return the smallest multiple of m which is >= x.
124 template <typename T>
RoundUp(T x,int m)125 static inline T RoundUp(T x, int m) {
126   return RoundDown(x + m - 1, m);
127 }
128 
129 
130 template <typename T>
Compare(const T & a,const T & b)131 static int Compare(const T& a, const T& b) {
132   if (a == b)
133     return 0;
134   else if (a < b)
135     return -1;
136   else
137     return 1;
138 }
139 
140 
141 template <typename T>
PointerValueCompare(const T * a,const T * b)142 static int PointerValueCompare(const T* a, const T* b) {
143   return Compare<T>(*a, *b);
144 }
145 
146 
147 // Returns the smallest power of two which is >= x. If you pass in a
148 // number that is already a power of two, it is returned as is.
149 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
150 // figure 3-3, page 48, where the function is called clp2.
RoundUpToPowerOf2(uint32_t x)151 static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
152   ASSERT(x <= 0x80000000u);
153   x = x - 1;
154   x = x | (x >> 1);
155   x = x | (x >> 2);
156   x = x | (x >> 4);
157   x = x | (x >> 8);
158   x = x | (x >> 16);
159   return x + 1;
160 }
161 
162 
163 
164 template <typename T>
IsAligned(T value,T alignment)165 static inline bool IsAligned(T value, T alignment) {
166   ASSERT(IsPowerOf2(alignment));
167   return (value & (alignment - 1)) == 0;
168 }
169 
170 
171 // Returns true if (addr + offset) is aligned.
IsAddressAligned(Address addr,intptr_t alignment,int offset)172 static inline bool IsAddressAligned(Address addr,
173                                     intptr_t alignment,
174                                     int offset) {
175   intptr_t offs = OffsetFrom(addr + offset);
176   return IsAligned(offs, alignment);
177 }
178 
179 
180 // Returns the maximum of the two parameters.
181 template <typename T>
Max(T a,T b)182 static T Max(T a, T b) {
183   return a < b ? b : a;
184 }
185 
186 
187 // Returns the minimum of the two parameters.
188 template <typename T>
Min(T a,T b)189 static T Min(T a, T b) {
190   return a < b ? a : b;
191 }
192 
193 
StrLength(const char * string)194 inline int StrLength(const char* string) {
195   size_t length = strlen(string);
196   ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
197   return static_cast<int>(length);
198 }
199 
200 
201 // ----------------------------------------------------------------------------
202 // BitField is a help template for encoding and decode bitfield with
203 // unsigned content.
204 template<class T, int shift, int size>
205 class BitField {
206  public:
207   // Tells whether the provided value fits into the bit field.
is_valid(T value)208   static bool is_valid(T value) {
209     return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
210   }
211 
212   // Returns a uint32_t mask of bit field.
mask()213   static uint32_t mask() {
214     // To use all bits of a uint32 in a bitfield without compiler warnings we
215     // have to compute 2^32 without using a shift count of 32.
216     return ((1U << shift) << size) - (1U << shift);
217   }
218 
219   // Returns a uint32_t with the bit field value encoded.
encode(T value)220   static uint32_t encode(T value) {
221     ASSERT(is_valid(value));
222     return static_cast<uint32_t>(value) << shift;
223   }
224 
225   // Extracts the bit field from the value.
decode(uint32_t value)226   static T decode(uint32_t value) {
227     return static_cast<T>((value & mask()) >> shift);
228   }
229 
230   // Value for the field with all bits set.
max()231   static T max() {
232     return decode(mask());
233   }
234 };
235 
236 
237 // ----------------------------------------------------------------------------
238 // Hash function.
239 
240 // Thomas Wang, Integer Hash Functions.
241 // http://www.concentric.net/~Ttwang/tech/inthash.htm
ComputeIntegerHash(uint32_t key)242 static inline uint32_t ComputeIntegerHash(uint32_t key) {
243   uint32_t hash = key;
244   hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
245   hash = hash ^ (hash >> 12);
246   hash = hash + (hash << 2);
247   hash = hash ^ (hash >> 4);
248   hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
249   hash = hash ^ (hash >> 16);
250   return hash;
251 }
252 
253 
254 // ----------------------------------------------------------------------------
255 // Miscellaneous
256 
257 // A static resource holds a static instance that can be reserved in
258 // a local scope using an instance of Access.  Attempts to re-reserve
259 // the instance will cause an error.
260 template <typename T>
261 class StaticResource {
262  public:
StaticResource()263   StaticResource() : is_reserved_(false)  {}
264 
265  private:
266   template <typename S> friend class Access;
267   T instance_;
268   bool is_reserved_;
269 };
270 
271 
272 // Locally scoped access to a static resource.
273 template <typename T>
274 class Access {
275  public:
Access(StaticResource<T> * resource)276   explicit Access(StaticResource<T>* resource)
277     : resource_(resource)
278     , instance_(&resource->instance_) {
279     ASSERT(!resource->is_reserved_);
280     resource->is_reserved_ = true;
281   }
282 
~Access()283   ~Access() {
284     resource_->is_reserved_ = false;
285     resource_ = NULL;
286     instance_ = NULL;
287   }
288 
value()289   T* value()  { return instance_; }
290   T* operator -> ()  { return instance_; }
291 
292  private:
293   StaticResource<T>* resource_;
294   T* instance_;
295 };
296 
297 
298 template <typename T>
299 class Vector {
300  public:
Vector()301   Vector() : start_(NULL), length_(0) {}
Vector(T * data,int length)302   Vector(T* data, int length) : start_(data), length_(length) {
303     ASSERT(length == 0 || (length > 0 && data != NULL));
304   }
305 
New(int length)306   static Vector<T> New(int length) {
307     return Vector<T>(NewArray<T>(length), length);
308   }
309 
310   // Returns a vector using the same backing storage as this one,
311   // spanning from and including 'from', to but not including 'to'.
SubVector(int from,int to)312   Vector<T> SubVector(int from, int to) {
313     ASSERT(to <= length_);
314     ASSERT(from < to);
315     ASSERT(0 <= from);
316     return Vector<T>(start() + from, to - from);
317   }
318 
319   // Returns the length of the vector.
length()320   int length() const { return length_; }
321 
322   // Returns whether or not the vector is empty.
is_empty()323   bool is_empty() const { return length_ == 0; }
324 
325   // Returns the pointer to the start of the data in the vector.
start()326   T* start() const { return start_; }
327 
328   // Access individual vector elements - checks bounds in debug mode.
329   T& operator[](int index) const {
330     ASSERT(0 <= index && index < length_);
331     return start_[index];
332   }
333 
at(int index)334   const T& at(int index) const { return operator[](index); }
335 
first()336   T& first() { return start_[0]; }
337 
last()338   T& last() { return start_[length_ - 1]; }
339 
340   // Returns a clone of this vector with a new backing store.
Clone()341   Vector<T> Clone() const {
342     T* result = NewArray<T>(length_);
343     for (int i = 0; i < length_; i++) result[i] = start_[i];
344     return Vector<T>(result, length_);
345   }
346 
Sort(int (* cmp)(const T *,const T *))347   void Sort(int (*cmp)(const T*, const T*)) {
348     typedef int (*RawComparer)(const void*, const void*);
349     qsort(start(),
350           length(),
351           sizeof(T),
352           reinterpret_cast<RawComparer>(cmp));
353   }
354 
Sort()355   void Sort() {
356     Sort(PointerValueCompare<T>);
357   }
358 
Truncate(int length)359   void Truncate(int length) {
360     ASSERT(length <= length_);
361     length_ = length;
362   }
363 
364   // Releases the array underlying this vector. Once disposed the
365   // vector is empty.
Dispose()366   void Dispose() {
367     DeleteArray(start_);
368     start_ = NULL;
369     length_ = 0;
370   }
371 
372   inline Vector<T> operator+(int offset) {
373     ASSERT(offset < length_);
374     return Vector<T>(start_ + offset, length_ - offset);
375   }
376 
377   // Factory method for creating empty vectors.
empty()378   static Vector<T> empty() { return Vector<T>(NULL, 0); }
379 
380   template<typename S>
cast(Vector<S> input)381   static Vector<T> cast(Vector<S> input) {
382     return Vector<T>(reinterpret_cast<T*>(input.start()),
383                      input.length() * sizeof(S) / sizeof(T));
384   }
385 
386  protected:
set_start(T * start)387   void set_start(T* start) { start_ = start; }
388 
389  private:
390   T* start_;
391   int length_;
392 };
393 
394 
395 // A pointer that can only be set once and doesn't allow NULL values.
396 template<typename T>
397 class SetOncePointer {
398  public:
SetOncePointer()399   SetOncePointer() : pointer_(NULL) { }
400 
is_set()401   bool is_set() const { return pointer_ != NULL; }
402 
get()403   T* get() const {
404     ASSERT(pointer_ != NULL);
405     return pointer_;
406   }
407 
set(T * value)408   void set(T* value) {
409     ASSERT(pointer_ == NULL && value != NULL);
410     pointer_ = value;
411   }
412 
413  private:
414   T* pointer_;
415 };
416 
417 
418 template <typename T, int kSize>
419 class EmbeddedVector : public Vector<T> {
420  public:
EmbeddedVector()421   EmbeddedVector() : Vector<T>(buffer_, kSize) { }
422 
EmbeddedVector(T initial_value)423   explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
424     for (int i = 0; i < kSize; ++i) {
425       buffer_[i] = initial_value;
426     }
427   }
428 
429   // When copying, make underlying Vector to reference our buffer.
EmbeddedVector(const EmbeddedVector & rhs)430   EmbeddedVector(const EmbeddedVector& rhs)
431       : Vector<T>(rhs) {
432     memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
433     set_start(buffer_);
434   }
435 
436   EmbeddedVector& operator=(const EmbeddedVector& rhs) {
437     if (this == &rhs) return *this;
438     Vector<T>::operator=(rhs);
439     memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
440     this->set_start(buffer_);
441     return *this;
442   }
443 
444  private:
445   T buffer_[kSize];
446 };
447 
448 
449 template <typename T>
450 class ScopedVector : public Vector<T> {
451  public:
ScopedVector(int length)452   explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
~ScopedVector()453   ~ScopedVector() {
454     DeleteArray(this->start());
455   }
456 
457  private:
458   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
459 };
460 
461 
CStrVector(const char * data)462 inline Vector<const char> CStrVector(const char* data) {
463   return Vector<const char>(data, StrLength(data));
464 }
465 
MutableCStrVector(char * data)466 inline Vector<char> MutableCStrVector(char* data) {
467   return Vector<char>(data, StrLength(data));
468 }
469 
MutableCStrVector(char * data,int max)470 inline Vector<char> MutableCStrVector(char* data, int max) {
471   int length = StrLength(data);
472   return Vector<char>(data, (length < max) ? length : max);
473 }
474 
475 
476 /*
477  * A class that collects values into a backing store.
478  * Specialized versions of the class can allow access to the backing store
479  * in different ways.
480  * There is no guarantee that the backing store is contiguous (and, as a
481  * consequence, no guarantees that consecutively added elements are adjacent
482  * in memory). The collector may move elements unless it has guaranteed not
483  * to.
484  */
485 template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
486 class Collector {
487  public:
488   explicit Collector(int initial_capacity = kMinCapacity)
489       : index_(0), size_(0) {
490     if (initial_capacity < kMinCapacity) {
491       initial_capacity = kMinCapacity;
492     }
493     current_chunk_ = Vector<T>::New(initial_capacity);
494   }
495 
~Collector()496   virtual ~Collector() {
497     // Free backing store (in reverse allocation order).
498     current_chunk_.Dispose();
499     for (int i = chunks_.length() - 1; i >= 0; i--) {
500       chunks_.at(i).Dispose();
501     }
502   }
503 
504   // Add a single element.
Add(T value)505   inline void Add(T value) {
506     if (index_ >= current_chunk_.length()) {
507       Grow(1);
508     }
509     current_chunk_[index_] = value;
510     index_++;
511     size_++;
512   }
513 
514   // Add a block of contiguous elements and return a Vector backed by the
515   // memory area.
516   // A basic Collector will keep this vector valid as long as the Collector
517   // is alive.
AddBlock(int size,T initial_value)518   inline Vector<T> AddBlock(int size, T initial_value) {
519     ASSERT(size > 0);
520     if (size > current_chunk_.length() - index_) {
521       Grow(size);
522     }
523     T* position = current_chunk_.start() + index_;
524     index_ += size;
525     size_ += size;
526     for (int i = 0; i < size; i++) {
527       position[i] = initial_value;
528     }
529     return Vector<T>(position, size);
530   }
531 
532 
533   // Add a contiguous block of elements and return a vector backed
534   // by the added block.
535   // A basic Collector will keep this vector valid as long as the Collector
536   // is alive.
AddBlock(Vector<const T> source)537   inline Vector<T> AddBlock(Vector<const T> source) {
538     if (source.length() > current_chunk_.length() - index_) {
539       Grow(source.length());
540     }
541     T* position = current_chunk_.start() + index_;
542     index_ += source.length();
543     size_ += source.length();
544     for (int i = 0; i < source.length(); i++) {
545       position[i] = source[i];
546     }
547     return Vector<T>(position, source.length());
548   }
549 
550 
551   // Write the contents of the collector into the provided vector.
WriteTo(Vector<T> destination)552   void WriteTo(Vector<T> destination) {
553     ASSERT(size_ <= destination.length());
554     int position = 0;
555     for (int i = 0; i < chunks_.length(); i++) {
556       Vector<T> chunk = chunks_.at(i);
557       for (int j = 0; j < chunk.length(); j++) {
558         destination[position] = chunk[j];
559         position++;
560       }
561     }
562     for (int i = 0; i < index_; i++) {
563       destination[position] = current_chunk_[i];
564       position++;
565     }
566   }
567 
568   // Allocate a single contiguous vector, copy all the collected
569   // elements to the vector, and return it.
570   // The caller is responsible for freeing the memory of the returned
571   // vector (e.g., using Vector::Dispose).
ToVector()572   Vector<T> ToVector() {
573     Vector<T> new_store = Vector<T>::New(size_);
574     WriteTo(new_store);
575     return new_store;
576   }
577 
578   // Resets the collector to be empty.
Reset()579   virtual void Reset() {
580     for (int i = chunks_.length() - 1; i >= 0; i--) {
581       chunks_.at(i).Dispose();
582     }
583     chunks_.Rewind(0);
584     index_ = 0;
585     size_ = 0;
586   }
587 
588   // Total number of elements added to collector so far.
size()589   inline int size() { return size_; }
590 
591  protected:
592   static const int kMinCapacity = 16;
593   List<Vector<T> > chunks_;
594   Vector<T> current_chunk_;  // Block of memory currently being written into.
595   int index_;  // Current index in current chunk.
596   int size_;  // Total number of elements in collector.
597 
598   // Creates a new current chunk, and stores the old chunk in the chunks_ list.
Grow(int min_capacity)599   void Grow(int min_capacity) {
600     ASSERT(growth_factor > 1);
601     int growth = current_chunk_.length() * (growth_factor - 1);
602     if (growth > max_growth) {
603       growth = max_growth;
604     }
605     int new_capacity = current_chunk_.length() + growth;
606     if (new_capacity < min_capacity) {
607       new_capacity = min_capacity + growth;
608     }
609     Vector<T> new_chunk = Vector<T>::New(new_capacity);
610     int new_index = PrepareGrow(new_chunk);
611     if (index_ > 0) {
612       chunks_.Add(current_chunk_.SubVector(0, index_));
613     } else {
614       // Can happen if the call to PrepareGrow moves everything into
615       // the new chunk.
616       current_chunk_.Dispose();
617     }
618     current_chunk_ = new_chunk;
619     index_ = new_index;
620     ASSERT(index_ + min_capacity <= current_chunk_.length());
621   }
622 
623   // Before replacing the current chunk, give a subclass the option to move
624   // some of the current data into the new chunk. The function may update
625   // the current index_ value to represent data no longer in the current chunk.
626   // Returns the initial index of the new chunk (after copied data).
PrepareGrow(Vector<T> new_chunk)627   virtual int PrepareGrow(Vector<T> new_chunk)  {
628     return 0;
629   }
630 };
631 
632 
633 /*
634  * A collector that allows sequences of values to be guaranteed to
635  * stay consecutive.
636  * If the backing store grows while a sequence is active, the current
637  * sequence might be moved, but after the sequence is ended, it will
638  * not move again.
639  * NOTICE: Blocks allocated using Collector::AddBlock(int) can move
640  * as well, if inside an active sequence where another element is added.
641  */
642 template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
643 class SequenceCollector : public Collector<T, growth_factor, max_growth> {
644  public:
SequenceCollector(int initial_capacity)645   explicit SequenceCollector(int initial_capacity)
646       : Collector<T, growth_factor, max_growth>(initial_capacity),
647         sequence_start_(kNoSequence) { }
648 
~SequenceCollector()649   virtual ~SequenceCollector() {}
650 
StartSequence()651   void StartSequence() {
652     ASSERT(sequence_start_ == kNoSequence);
653     sequence_start_ = this->index_;
654   }
655 
EndSequence()656   Vector<T> EndSequence() {
657     ASSERT(sequence_start_ != kNoSequence);
658     int sequence_start = sequence_start_;
659     sequence_start_ = kNoSequence;
660     if (sequence_start == this->index_) return Vector<T>();
661     return this->current_chunk_.SubVector(sequence_start, this->index_);
662   }
663 
664   // Drops the currently added sequence, and all collected elements in it.
DropSequence()665   void DropSequence() {
666     ASSERT(sequence_start_ != kNoSequence);
667     int sequence_length = this->index_ - sequence_start_;
668     this->index_ = sequence_start_;
669     this->size_ -= sequence_length;
670     sequence_start_ = kNoSequence;
671   }
672 
Reset()673   virtual void Reset() {
674     sequence_start_ = kNoSequence;
675     this->Collector<T, growth_factor, max_growth>::Reset();
676   }
677 
678  private:
679   static const int kNoSequence = -1;
680   int sequence_start_;
681 
682   // Move the currently active sequence to the new chunk.
PrepareGrow(Vector<T> new_chunk)683   virtual int PrepareGrow(Vector<T> new_chunk) {
684     if (sequence_start_ != kNoSequence) {
685       int sequence_length = this->index_ - sequence_start_;
686       // The new chunk is always larger than the current chunk, so there
687       // is room for the copy.
688       ASSERT(sequence_length < new_chunk.length());
689       for (int i = 0; i < sequence_length; i++) {
690         new_chunk[i] = this->current_chunk_[sequence_start_ + i];
691       }
692       this->index_ = sequence_start_;
693       sequence_start_ = 0;
694       return sequence_length;
695     }
696     return 0;
697   }
698 };
699 
700 
701 // Compare ASCII/16bit chars to ASCII/16bit chars.
702 template <typename lchar, typename rchar>
CompareChars(const lchar * lhs,const rchar * rhs,int chars)703 static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
704   const lchar* limit = lhs + chars;
705 #ifdef V8_HOST_CAN_READ_UNALIGNED
706   if (sizeof(*lhs) == sizeof(*rhs)) {
707     // Number of characters in a uintptr_t.
708     static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs);  // NOLINT
709     while (lhs <= limit - kStepSize) {
710       if (*reinterpret_cast<const uintptr_t*>(lhs) !=
711           *reinterpret_cast<const uintptr_t*>(rhs)) {
712         break;
713       }
714       lhs += kStepSize;
715       rhs += kStepSize;
716     }
717   }
718 #endif
719   while (lhs < limit) {
720     int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
721     if (r != 0) return r;
722     ++lhs;
723     ++rhs;
724   }
725   return 0;
726 }
727 
728 
729 // Calculate 10^exponent.
TenToThe(int exponent)730 static inline int TenToThe(int exponent) {
731   ASSERT(exponent <= 9);
732   ASSERT(exponent >= 1);
733   int answer = 10;
734   for (int i = 1; i < exponent; i++) answer *= 10;
735   return answer;
736 }
737 
738 
739 // The type-based aliasing rule allows the compiler to assume that pointers of
740 // different types (for some definition of different) never alias each other.
741 // Thus the following code does not work:
742 //
743 // float f = foo();
744 // int fbits = *(int*)(&f);
745 //
746 // The compiler 'knows' that the int pointer can't refer to f since the types
747 // don't match, so the compiler may cache f in a register, leaving random data
748 // in fbits.  Using C++ style casts makes no difference, however a pointer to
749 // char data is assumed to alias any other pointer.  This is the 'memcpy
750 // exception'.
751 //
752 // Bit_cast uses the memcpy exception to move the bits from a variable of one
753 // type of a variable of another type.  Of course the end result is likely to
754 // be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
755 // will completely optimize BitCast away.
756 //
757 // There is an additional use for BitCast.
758 // Recent gccs will warn when they see casts that may result in breakage due to
759 // the type-based aliasing rule.  If you have checked that there is no breakage
760 // you can use BitCast to cast one pointer type to another.  This confuses gcc
761 // enough that it can no longer see that you have cast one pointer type to
762 // another thus avoiding the warning.
763 
764 // We need different implementations of BitCast for pointer and non-pointer
765 // values. We use partial specialization of auxiliary struct to work around
766 // issues with template functions overloading.
767 template <class Dest, class Source>
768 struct BitCastHelper {
769   STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
770 
INLINEBitCastHelper771   INLINE(static Dest cast(const Source& source)) {
772     Dest dest;
773     memcpy(&dest, &source, sizeof(dest));
774     return dest;
775   }
776 };
777 
778 template <class Dest, class Source>
779 struct BitCastHelper<Dest, Source*> {
780   INLINE(static Dest cast(Source* source)) {
781     return BitCastHelper<Dest, uintptr_t>::
782         cast(reinterpret_cast<uintptr_t>(source));
783   }
784 };
785 
786 template <class Dest, class Source>
787 INLINE(Dest BitCast(const Source& source));
788 
789 template <class Dest, class Source>
790 inline Dest BitCast(const Source& source) {
791   return BitCastHelper<Dest, Source>::cast(source);
792 }
793 
794 } }  // namespace v8::internal
795 
796 #endif  // V8_UTILS_H_
797