• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright 2006 The Android Open Source Project
3   *
4   * Use of this source code is governed by a BSD-style license that can be
5   * found in the LICENSE file.
6   */
7  
8  #ifndef SkTypes_DEFINED
9  #define SkTypes_DEFINED
10  
11  // IWYU pragma: begin_exports
12  #include "SkPreConfig.h"
13  #include "SkUserConfig.h"
14  #include "SkPostConfig.h"
15  #include <stddef.h>
16  #include <stdint.h>
17  
18  #if defined(SK_ARM_HAS_NEON)
19      #include <arm_neon.h>
20  #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
21      #include <immintrin.h>
22  #endif
23  // IWYU pragma: end_exports
24  
25  #include <string.h>
26  
27  /**
28   *  sk_careful_memcpy() is just like memcpy(), but guards against undefined behavior.
29   *
30   * It is undefined behavior to call memcpy() with null dst or src, even if len is 0.
31   * If an optimizer is "smart" enough, it can exploit this to do unexpected things.
32   *     memcpy(dst, src, 0);
33   *     if (src) {
34   *         printf("%x\n", *src);
35   *     }
36   * In this code the compiler can assume src is not null and omit the if (src) {...} check,
37   * unconditionally running the printf, crashing the program if src really is null.
38   * Of the compilers we pay attention to only GCC performs this optimization in practice.
39   */
sk_careful_memcpy(void * dst,const void * src,size_t len)40  static inline void* sk_careful_memcpy(void* dst, const void* src, size_t len) {
41      // When we pass >0 len we had better already be passing valid pointers.
42      // So we just need to skip calling memcpy when len == 0.
43      if (len) {
44          memcpy(dst,src,len);
45      }
46      return dst;
47  }
48  
49  /** \file SkTypes.h
50  */
51  
52  /** See SkGraphics::GetVersion() to retrieve these at runtime
53   */
54  #define SKIA_VERSION_MAJOR  1
55  #define SKIA_VERSION_MINOR  0
56  #define SKIA_VERSION_PATCH  0
57  
58  /*
59      memory wrappers to be implemented by the porting layer (platform)
60  */
61  
62  /** Called internally if we run out of memory. The platform implementation must
63      not return, but should either throw an exception or otherwise exit.
64  */
65  SK_API extern void sk_out_of_memory(void);
66  /** Called internally if we hit an unrecoverable error.
67      The platform implementation must not return, but should either throw
68      an exception or otherwise exit.
69  */
70  SK_API extern void sk_abort_no_print(void);
71  
72  enum {
73      SK_MALLOC_TEMP  = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
74      SK_MALLOC_THROW = 0x02  //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
75  };
76  /** Return a block of memory (at least 4-byte aligned) of at least the
77      specified size. If the requested memory cannot be returned, either
78      return null (if SK_MALLOC_TEMP bit is clear) or throw an exception
79      (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
80  */
81  SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
82  /** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
83  */
84  SK_API extern void* sk_malloc_throw(size_t size);
85  /** Same as standard realloc(), but this one never returns null on failure. It will throw
86      an exception if it fails.
87  */
88  SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
89  /** Free memory returned by sk_malloc(). It is safe to pass null.
90  */
91  SK_API extern void sk_free(void*);
92  
93  /** Much like calloc: returns a pointer to at least size zero bytes, or NULL on failure.
94   */
95  SK_API extern void* sk_calloc(size_t size);
96  
97  /** Same as sk_calloc, but throws an exception instead of returning NULL on failure.
98   */
99  SK_API extern void* sk_calloc_throw(size_t size);
100  
101  // bzero is safer than memset, but we can't rely on it, so... sk_bzero()
sk_bzero(void * buffer,size_t size)102  static inline void sk_bzero(void* buffer, size_t size) {
103      // Please c.f. sk_careful_memcpy.  It's undefined behavior to call memset(null, 0, 0).
104      if (size) {
105          memset(buffer, 0, size);
106      }
107  }
108  
109  ///////////////////////////////////////////////////////////////////////////////
110  
111  #ifdef override_GLOBAL_NEW
112  #include <new>
113  
new(size_t size)114  inline void* operator new(size_t size) {
115      return sk_malloc_throw(size);
116  }
117  
delete(void * p)118  inline void operator delete(void* p) {
119      sk_free(p);
120  }
121  #endif
122  
123  ///////////////////////////////////////////////////////////////////////////////
124  
125  #define SK_INIT_TO_AVOID_WARNING    = 0
126  
127  #ifndef SkDebugf
128      SK_API void SkDebugf(const char format[], ...);
129  #endif
130  
131  #define SkASSERT_RELEASE(cond)          if(!(cond)) { SK_ABORT(#cond); }
132  
133  #ifdef SK_DEBUG
134      #define SkASSERT(cond)              SkASSERT_RELEASE(cond)
135      #define SkDEBUGFAIL(message)        SkASSERT(false && message)
136      #define SkDEBUGFAILF(fmt, ...)      SkASSERTF(false, fmt, ##__VA_ARGS__)
137      #define SkDEBUGCODE(code)           code
138      #define SkDECLAREPARAM(type, var)   , type var
139      #define SkPARAM(var)                , var
140  //  #define SkDEBUGF(args       )       SkDebugf##args
141      #define SkDEBUGF(args       )       SkDebugf args
142      #define SkAssertResult(cond)        SkASSERT(cond)
143  #else
144      #define SkASSERT(cond)
145      #define SkDEBUGFAIL(message)
146      #define SkDEBUGCODE(code)
147      #define SkDEBUGF(args)
148      #define SkDECLAREPARAM(type, var)
149      #define SkPARAM(var)
150  
151      // unlike SkASSERT, this guy executes its condition in the non-debug build
152      #define SkAssertResult(cond)        cond
153  #endif
154  
155  // Legacy macro names for SK_ABORT
156  #define SkFAIL(message)                 SK_ABORT(message)
157  #define sk_throw()                      SK_ABORT("sk_throw")
158  
159  // We want to evaluate cond only once, and inside the SkASSERT somewhere so we see its string form.
160  // So we use the comma operator to make an SkDebugf that always returns false: we'll evaluate cond,
161  // and if it's true the assert passes; if it's false, we'll print the message and the assert fails.
162  #define SkASSERTF(cond, fmt, ...)       SkASSERT((cond) || (SkDebugf(fmt"\n", __VA_ARGS__), false))
163  
164  #ifdef SK_DEVELOPER
165      #define SkDEVCODE(code)             code
166  #else
167      #define SkDEVCODE(code)
168  #endif
169  
170  #ifdef SK_IGNORE_TO_STRING
171      #define SK_TO_STRING_NONVIRT()
172      #define SK_TO_STRING_VIRT()
173      #define SK_TO_STRING_PUREVIRT()
174      #define SK_TO_STRING_OVERRIDE()
175  #else
176      class SkString;
177      // the 'toString' helper functions convert Sk* objects to human-readable
178      // form in developer mode
179      #define SK_TO_STRING_NONVIRT() void toString(SkString* str) const;
180      #define SK_TO_STRING_VIRT() virtual void toString(SkString* str) const;
181      #define SK_TO_STRING_PUREVIRT() virtual void toString(SkString* str) const = 0;
182      #define SK_TO_STRING_OVERRIDE() void toString(SkString* str) const override;
183  #endif
184  
185  /*
186   *  Usage:  SK_MACRO_CONCAT(a, b)   to construct the symbol ab
187   *
188   *  SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
189   *
190   */
191  #define SK_MACRO_CONCAT(X, Y)           SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
192  #define SK_MACRO_CONCAT_IMPL_PRIV(X, Y)  X ## Y
193  
194  /*
195   *  Usage: SK_MACRO_APPEND_LINE(foo)    to make foo123, where 123 is the current
196   *                                      line number. Easy way to construct
197   *                                      unique names for local functions or
198   *                                      variables.
199   */
200  #define SK_MACRO_APPEND_LINE(name)  SK_MACRO_CONCAT(name, __LINE__)
201  
202  /**
203   * For some classes, it's almost always an error to instantiate one without a name, e.g.
204   *   {
205   *       SkAutoMutexAcquire(&mutex);
206   *       <some code>
207   *   }
208   * In this case, the writer meant to hold mutex while the rest of the code in the block runs,
209   * but instead the mutex is acquired and then immediately released.  The correct usage is
210   *   {
211   *       SkAutoMutexAcquire lock(&mutex);
212   *       <some code>
213   *   }
214   *
215   * To prevent callers from instantiating your class without a name, use SK_REQUIRE_LOCAL_VAR
216   * like this:
217   *   class classname {
218   *       <your class>
219   *   };
220   *   #define classname(...) SK_REQUIRE_LOCAL_VAR(classname)
221   *
222   * This won't work with templates, and you must inline the class' constructors and destructors.
223   * Take a look at SkAutoFree and SkAutoMalloc in this file for examples.
224   */
225  #define SK_REQUIRE_LOCAL_VAR(classname) \
226      static_assert(false, "missing name for " #classname)
227  
228  ///////////////////////////////////////////////////////////////////////
229  
230  /**
231   *  Fast type for signed 8 bits. Use for parameter passing and local variables,
232   *  not for storage.
233   */
234  typedef int S8CPU;
235  
236  /**
237   *  Fast type for unsigned 8 bits. Use for parameter passing and local
238   *  variables, not for storage
239   */
240  typedef unsigned U8CPU;
241  
242  /**
243   *  Fast type for signed 16 bits. Use for parameter passing and local variables,
244   *  not for storage
245   */
246  typedef int S16CPU;
247  
248  /**
249   *  Fast type for unsigned 16 bits. Use for parameter passing and local
250   *  variables, not for storage
251   */
252  typedef unsigned U16CPU;
253  
254  /**
255   *  Meant to be a small version of bool, for storage purposes. Will be 0 or 1
256   */
257  typedef uint8_t SkBool8;
258  
259  #ifdef SK_DEBUG
260      SK_API int8_t      SkToS8(intmax_t);
261      SK_API uint8_t     SkToU8(uintmax_t);
262      SK_API int16_t     SkToS16(intmax_t);
263      SK_API uint16_t    SkToU16(uintmax_t);
264      SK_API int32_t     SkToS32(intmax_t);
265      SK_API uint32_t    SkToU32(uintmax_t);
266      SK_API int         SkToInt(intmax_t);
267      SK_API unsigned    SkToUInt(uintmax_t);
268      SK_API size_t      SkToSizeT(uintmax_t);
269  #else
270      #define SkToS8(x)   ((int8_t)(x))
271      #define SkToU8(x)   ((uint8_t)(x))
272      #define SkToS16(x)  ((int16_t)(x))
273      #define SkToU16(x)  ((uint16_t)(x))
274      #define SkToS32(x)  ((int32_t)(x))
275      #define SkToU32(x)  ((uint32_t)(x))
276      #define SkToInt(x)  ((int)(x))
277      #define SkToUInt(x) ((unsigned)(x))
278      #define SkToSizeT(x) ((size_t)(x))
279  #endif
280  
281  /** Returns 0 or 1 based on the condition
282  */
283  #define SkToBool(cond)  ((cond) != 0)
284  
285  #define SK_MaxS16   32767
286  #define SK_MinS16   -32767
287  #define SK_MaxU16   0xFFFF
288  #define SK_MinU16   0
289  #define SK_MaxS32   0x7FFFFFFF
290  #define SK_MinS32   -SK_MaxS32
291  #define SK_MaxU32   0xFFFFFFFF
292  #define SK_MinU32   0
293  #define SK_NaN32    (1 << 31)
294  
295  /** Returns true if the value can be represented with signed 16bits
296   */
SkIsS16(long x)297  static inline bool SkIsS16(long x) {
298      return (int16_t)x == x;
299  }
300  
301  /** Returns true if the value can be represented with unsigned 16bits
302   */
SkIsU16(long x)303  static inline bool SkIsU16(long x) {
304      return (uint16_t)x == x;
305  }
306  
SkLeftShift(int32_t value,int32_t shift)307  static inline int32_t SkLeftShift(int32_t value, int32_t shift) {
308      return (int32_t) ((uint32_t) value << shift);
309  }
310  
SkLeftShift(int64_t value,int32_t shift)311  static inline int64_t SkLeftShift(int64_t value, int32_t shift) {
312      return (int64_t) ((uint64_t) value << shift);
313  }
314  
315  //////////////////////////////////////////////////////////////////////////////
316  
317  /** Returns the number of entries in an array (not a pointer) */
318  template <typename T, size_t N> char (&SkArrayCountHelper(T (&array)[N]))[N];
319  #define SK_ARRAY_COUNT(array) (sizeof(SkArrayCountHelper(array)))
320  
321  // Can be used to bracket data types that must be dense, e.g. hash keys.
322  #if defined(__clang__)  // This should work on GCC too, but GCC diagnostic pop didn't seem to work!
323      #define SK_BEGIN_REQUIRE_DENSE _Pragma("GCC diagnostic push") \
324                                     _Pragma("GCC diagnostic error \"-Wpadded\"")
325      #define SK_END_REQUIRE_DENSE   _Pragma("GCC diagnostic pop")
326  #else
327      #define SK_BEGIN_REQUIRE_DENSE
328      #define SK_END_REQUIRE_DENSE
329  #endif
330  
331  #define SkAlign2(x)     (((x) + 1) >> 1 << 1)
332  #define SkIsAlign2(x)   (0 == ((x) & 1))
333  
334  #define SkAlign4(x)     (((x) + 3) >> 2 << 2)
335  #define SkIsAlign4(x)   (0 == ((x) & 3))
336  
337  #define SkAlign8(x)     (((x) + 7) >> 3 << 3)
338  #define SkIsAlign8(x)   (0 == ((x) & 7))
339  
340  #define SkAlignPtr(x)   (sizeof(void*) == 8 ?   SkAlign8(x) :   SkAlign4(x))
341  #define SkIsAlignPtr(x) (sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x))
342  
343  typedef uint32_t SkFourByteTag;
344  #define SkSetFourByteTag(a, b, c, d)    (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
345  
346  /** 32 bit integer to hold a unicode value
347  */
348  typedef int32_t SkUnichar;
349  /** 32 bit value to hold a millisecond count
350  */
351  typedef uint32_t SkMSec;
352  /** 1 second measured in milliseconds
353  */
354  #define SK_MSec1 1000
355  /** maximum representable milliseconds
356  */
357  #define SK_MSecMax 0x7FFFFFFF
358  /** Returns a < b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
359  */
360  #define SkMSec_LT(a, b)     ((int32_t)(a) - (int32_t)(b) < 0)
361  /** Returns a <= b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
362  */
363  #define SkMSec_LE(a, b)     ((int32_t)(a) - (int32_t)(b) <= 0)
364  
365  /** The generation IDs in Skia reserve 0 has an invalid marker.
366   */
367  #define SK_InvalidGenID     0
368  /** The unique IDs in Skia reserve 0 has an invalid marker.
369   */
370  #define SK_InvalidUniqueID  0
371  
372  /****************************************************************************
373      The rest of these only build with C++
374  */
375  #ifdef __cplusplus
376  
377  /** Faster than SkToBool for integral conditions. Returns 0 or 1
378  */
Sk32ToBool(uint32_t n)379  static inline int Sk32ToBool(uint32_t n) {
380      return (n | (0-n)) >> 31;
381  }
382  
383  /** Generic swap function. Classes with efficient swaps should specialize this function to take
384      their fast path. This function is used by SkTSort. */
SkTSwap(T & a,T & b)385  template <typename T> inline void SkTSwap(T& a, T& b) {
386      T c(a);
387      a = b;
388      b = c;
389  }
390  
SkAbs32(int32_t value)391  static inline int32_t SkAbs32(int32_t value) {
392      SkASSERT(value != SK_NaN32);  // The most negative int32_t can't be negated.
393      if (value < 0) {
394          value = -value;
395      }
396      return value;
397  }
398  
SkTAbs(T value)399  template <typename T> inline T SkTAbs(T value) {
400      if (value < 0) {
401          value = -value;
402      }
403      return value;
404  }
405  
SkMax32(int32_t a,int32_t b)406  static inline int32_t SkMax32(int32_t a, int32_t b) {
407      if (a < b)
408          a = b;
409      return a;
410  }
411  
SkMin32(int32_t a,int32_t b)412  static inline int32_t SkMin32(int32_t a, int32_t b) {
413      if (a > b)
414          a = b;
415      return a;
416  }
417  
SkTMin(const T & a,const T & b)418  template <typename T> const T& SkTMin(const T& a, const T& b) {
419      return (a < b) ? a : b;
420  }
421  
SkTMax(const T & a,const T & b)422  template <typename T> const T& SkTMax(const T& a, const T& b) {
423      return (b < a) ? a : b;
424  }
425  
SkSign32(int32_t a)426  static inline int32_t SkSign32(int32_t a) {
427      return (a >> 31) | ((unsigned) -a >> 31);
428  }
429  
SkFastMin32(int32_t value,int32_t max)430  static inline int32_t SkFastMin32(int32_t value, int32_t max) {
431      if (value > max) {
432          value = max;
433      }
434      return value;
435  }
436  
437  /** Returns value pinned between min and max, inclusively. */
SkTPin(const T & value,const T & min,const T & max)438  template <typename T> static inline const T& SkTPin(const T& value, const T& min, const T& max) {
439      return SkTMax(SkTMin(value, max), min);
440  }
441  
442  
443  ///////////////////////////////////////////////////////////////////////////////
444  
445  /**
446   *  Indicates whether an allocation should count against a cache budget.
447   */
448  enum class SkBudgeted : bool {
449      kNo  = false,
450      kYes = true
451  };
452  
453  ///////////////////////////////////////////////////////////////////////////////
454  
455  /** Use to combine multiple bits in a bitmask in a type safe way.
456   */
457  template <typename T>
SkTBitOr(T a,T b)458  T SkTBitOr(T a, T b) {
459      return (T)(a | b);
460  }
461  
462  /**
463   *  Use to cast a pointer to a different type, and maintaining strict-aliasing
464   */
SkTCast(const void * ptr)465  template <typename Dst> Dst SkTCast(const void* ptr) {
466      union {
467          const void* src;
468          Dst dst;
469      } data;
470      data.src = ptr;
471      return data.dst;
472  }
473  
474  //////////////////////////////////////////////////////////////////////////////
475  
476  /** \class SkNoncopyable
477  
478  SkNoncopyable is the base class for objects that do not want to
479  be copied. It hides its copy-constructor and its assignment-operator.
480  */
481  class SK_API SkNoncopyable {
482  public:
SkNoncopyable()483      SkNoncopyable() {}
484  
485  private:
486      SkNoncopyable(const SkNoncopyable&);
487      SkNoncopyable& operator=(const SkNoncopyable&);
488  };
489  
490  class SkAutoFree : SkNoncopyable {
491  public:
SkAutoFree()492      SkAutoFree() : fPtr(NULL) {}
SkAutoFree(void * ptr)493      explicit SkAutoFree(void* ptr) : fPtr(ptr) {}
~SkAutoFree()494      ~SkAutoFree() { sk_free(fPtr); }
495  
496      /** Return the currently allocate buffer, or null
497      */
get()498      void* get() const { return fPtr; }
499  
500      /** Assign a new ptr allocated with sk_malloc (or null), and return the
501          previous ptr. Note it is the caller's responsibility to sk_free the
502          returned ptr.
503      */
set(void * ptr)504      void* set(void* ptr) {
505          void* prev = fPtr;
506          fPtr = ptr;
507          return prev;
508      }
509  
510      /** Transfer ownership of the current ptr to the caller, setting the
511          internal reference to null. Note the caller is reponsible for calling
512          sk_free on the returned address.
513      */
detach()514      void* detach() { return this->set(NULL); }
515  
516      /** Free the current buffer, and set the internal reference to NULL. Same
517          as calling sk_free(detach())
518      */
free()519      void free() {
520          sk_free(fPtr);
521          fPtr = NULL;
522      }
523  
524  private:
525      void* fPtr;
526      // illegal
527      SkAutoFree(const SkAutoFree&);
528      SkAutoFree& operator=(const SkAutoFree&);
529  };
530  #define SkAutoFree(...) SK_REQUIRE_LOCAL_VAR(SkAutoFree)
531  
532  /**
533   *  Manage an allocated block of heap memory. This object is the sole manager of
534   *  the lifetime of the block, so the caller must not call sk_free() or delete
535   *  on the block, unless detach() was called.
536   */
537  class SkAutoMalloc : SkNoncopyable {
538  public:
539      explicit SkAutoMalloc(size_t size = 0) {
540          fPtr = size ? sk_malloc_throw(size) : NULL;
541          fSize = size;
542      }
543  
~SkAutoMalloc()544      ~SkAutoMalloc() {
545          sk_free(fPtr);
546      }
547  
548      /**
549       *  Passed to reset to specify what happens if the requested size is smaller
550       *  than the current size (and the current block was dynamically allocated).
551       */
552      enum OnShrink {
553          /**
554           *  If the requested size is smaller than the current size, and the
555           *  current block is dynamically allocated, free the old block and
556           *  malloc a new block of the smaller size.
557           */
558          kAlloc_OnShrink,
559  
560          /**
561           *  If the requested size is smaller than the current size, and the
562           *  current block is dynamically allocated, just return the old
563           *  block.
564           */
565          kReuse_OnShrink
566      };
567  
568      /**
569       *  Reallocates the block to a new size. The ptr may or may not change.
570       */
571      void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink,  bool* didChangeAlloc = NULL) {
572          if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
573              if (didChangeAlloc) {
574                  *didChangeAlloc = false;
575              }
576              return fPtr;
577          }
578  
579          sk_free(fPtr);
580          fPtr = size ? sk_malloc_throw(size) : NULL;
581          fSize = size;
582          if (didChangeAlloc) {
583              *didChangeAlloc = true;
584          }
585  
586          return fPtr;
587      }
588  
589      /**
590       *  Releases the block back to the heap
591       */
free()592      void free() {
593          this->reset(0);
594      }
595  
596      /**
597       *  Return the allocated block.
598       */
get()599      void* get() { return fPtr; }
get()600      const void* get() const { return fPtr; }
601  
602     /** Transfer ownership of the current ptr to the caller, setting the
603         internal reference to null. Note the caller is reponsible for calling
604         sk_free on the returned address.
605      */
detach()606      void* detach() {
607          void* ptr = fPtr;
608          fPtr = NULL;
609          fSize = 0;
610          return ptr;
611      }
612  
613  private:
614      void*   fPtr;
615      size_t  fSize;  // can be larger than the requested size (see kReuse)
616  };
617  #define SkAutoMalloc(...) SK_REQUIRE_LOCAL_VAR(SkAutoMalloc)
618  
619  /**
620   *  Manage an allocated block of memory. If the requested size is <= kSizeRequested (or slightly
621   *  more), then the allocation will come from the stack rather than the heap. This object is the
622   *  sole manager of the lifetime of the block, so the caller must not call sk_free() or delete on
623   *  the block.
624   */
625  template <size_t kSizeRequested> class SkAutoSMalloc : SkNoncopyable {
626  public:
627      /**
628       *  Creates initially empty storage. get() returns a ptr, but it is to a zero-byte allocation.
629       *  Must call reset(size) to return an allocated block.
630       */
SkAutoSMalloc()631      SkAutoSMalloc() {
632          fPtr = fStorage;
633          fSize = kSize;
634      }
635  
636      /**
637       *  Allocate a block of the specified size. If size <= kSizeRequested (or slightly more), then
638       *  the allocation will come from the stack, otherwise it will be dynamically allocated.
639       */
SkAutoSMalloc(size_t size)640      explicit SkAutoSMalloc(size_t size) {
641          fPtr = fStorage;
642          fSize = kSize;
643          this->reset(size);
644      }
645  
646      /**
647       *  Free the allocated block (if any). If the block was small enough to have been allocated on
648       *  the stack, then this does nothing.
649       */
~SkAutoSMalloc()650      ~SkAutoSMalloc() {
651          if (fPtr != (void*)fStorage) {
652              sk_free(fPtr);
653          }
654      }
655  
656      /**
657       *  Return the allocated block. May return non-null even if the block is of zero size. Since
658       *  this may be on the stack or dynamically allocated, the caller must not call sk_free() on it,
659       *  but must rely on SkAutoSMalloc to manage it.
660       */
get()661      void* get() const { return fPtr; }
662  
663      /**
664       *  Return a new block of the requested size, freeing (as necessary) any previously allocated
665       *  block. As with the constructor, if size <= kSizeRequested (or slightly more) then the return
666       *  block may be allocated locally, rather than from the heap.
667       */
668      void* reset(size_t size,
669                  SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink,
670                  bool* didChangeAlloc = NULL) {
671          size = (size < kSize) ? kSize : size;
672          bool alloc = size != fSize && (SkAutoMalloc::kAlloc_OnShrink == shrink || size > fSize);
673          if (didChangeAlloc) {
674              *didChangeAlloc = alloc;
675          }
676          if (alloc) {
677              if (fPtr != (void*)fStorage) {
678                  sk_free(fPtr);
679              }
680  
681              if (size == kSize) {
682                  SkASSERT(fPtr != fStorage); // otherwise we lied when setting didChangeAlloc.
683                  fPtr = fStorage;
684              } else {
685                  fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
686              }
687  
688              fSize = size;
689          }
690          SkASSERT(fSize >= size && fSize >= kSize);
691          SkASSERT((fPtr == fStorage) || fSize > kSize);
692          return fPtr;
693      }
694  
695  private:
696      // Align up to 32 bits.
697      static const size_t kSizeAlign4 = SkAlign4(kSizeRequested);
698  #if defined(GOOGLE3)
699      // Stack frame size is limited for GOOGLE3. 4k is less than the actual max, but some functions
700      // have multiple large stack allocations.
701      static const size_t kMaxBytes = 4 * 1024;
702      static const size_t kSize = kSizeRequested > kMaxBytes ? kMaxBytes : kSizeAlign4;
703  #else
704      static const size_t kSize = kSizeAlign4;
705  #endif
706  
707      void*       fPtr;
708      size_t      fSize;  // can be larger than the requested size (see kReuse)
709      uint32_t    fStorage[kSize >> 2];
710  };
711  // Can't guard the constructor because it's a template class.
712  
713  #endif /* C++ */
714  
715  #endif
716