• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*
3  * Copyright 2006 The Android Open Source Project
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8 
9 
10 #ifndef SkTypes_DEFINED
11 #define SkTypes_DEFINED
12 
13 #include "SkPreConfig.h"
14 #include "SkUserConfig.h"
15 #include "SkPostConfig.h"
16 
17 #ifndef SK_IGNORE_STDINT_DOT_H
18     #include <stdint.h>
19 #endif
20 
21 #include <stdio.h>
22 
23 /** \file SkTypes.h
24 */
25 
26 /** See SkGraphics::GetVersion() to retrieve these at runtime
27  */
28 #define SKIA_VERSION_MAJOR  1
29 #define SKIA_VERSION_MINOR  0
30 #define SKIA_VERSION_PATCH  0
31 
32 /*
33     memory wrappers to be implemented by the porting layer (platform)
34 */
35 
36 /** Called internally if we run out of memory. The platform implementation must
37     not return, but should either throw an exception or otherwise exit.
38 */
39 SK_API extern void sk_out_of_memory(void);
40 /** Called internally if we hit an unrecoverable error.
41     The platform implementation must not return, but should either throw
42     an exception or otherwise exit.
43 */
44 SK_API extern void sk_throw(void);
45 
46 enum {
47     SK_MALLOC_TEMP  = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
48     SK_MALLOC_THROW = 0x02  //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
49 };
50 /** Return a block of memory (at least 4-byte aligned) of at least the
51     specified size. If the requested memory cannot be returned, either
52     return null (if SK_MALLOC_TEMP bit is clear) or call sk_throw()
53     (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
54 */
55 SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
56 /** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
57 */
58 SK_API extern void* sk_malloc_throw(size_t size);
59 /** Same as standard realloc(), but this one never returns null on failure. It will throw
60     an exception if it fails.
61 */
62 SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
63 /** Free memory returned by sk_malloc(). It is safe to pass null.
64 */
65 SK_API extern void sk_free(void*);
66 
67 // bzero is safer than memset, but we can't rely on it, so... sk_bzero()
sk_bzero(void * buffer,size_t size)68 static inline void sk_bzero(void* buffer, size_t size) {
69     memset(buffer, 0, size);
70 }
71 
72 ///////////////////////////////////////////////////////////////////////////////
73 
74 #ifdef SK_OVERRIDE_GLOBAL_NEW
75 #include <new>
76 
new(size_t size)77 inline void* operator new(size_t size) {
78     return sk_malloc_throw(size);
79 }
80 
delete(void * p)81 inline void operator delete(void* p) {
82     sk_free(p);
83 }
84 #endif
85 
86 ///////////////////////////////////////////////////////////////////////////////
87 
88 #define SK_INIT_TO_AVOID_WARNING    = 0
89 
90 #ifndef SkDebugf
91     void SkDebugf(const char format[], ...);
92 #endif
93 
94 #ifdef SK_DEBUG
95     #define SkASSERT(cond)              SK_DEBUGBREAK(cond)
96     #define SkDEBUGFAIL(message)        SkASSERT(false && message)
97     #define SkDEBUGCODE(code)           code
98     #define SkDECLAREPARAM(type, var)   , type var
99     #define SkPARAM(var)                , var
100 //  #define SkDEBUGF(args       )       SkDebugf##args
101     #define SkDEBUGF(args       )       SkDebugf args
102     #define SkAssertResult(cond)        SkASSERT(cond)
103 #else
104     #define SkASSERT(cond)
105     #define SkDEBUGFAIL(message)
106     #define SkDEBUGCODE(code)
107     #define SkDEBUGF(args)
108     #define SkDECLAREPARAM(type, var)
109     #define SkPARAM(var)
110 
111     // unlike SkASSERT, this guy executes its condition in the non-debug build
112     #define SkAssertResult(cond)        cond
113 #endif
114 
115 #ifdef SK_DEVELOPER
116     #define SkDEVCODE(code)             code
117     // the 'toString' helper functions convert Sk* objects to human-readable
118     // form in developer mode
119     #define SK_DEVELOPER_TO_STRING()    virtual void toString(SkString* str) const SK_OVERRIDE;
120 #else
121     #define SkDEVCODE(code)
122     #define SK_DEVELOPER_TO_STRING()
123 #endif
124 
125 template <bool>
126 struct SkCompileAssert {
127 };
128 
129 #define SK_COMPILE_ASSERT(expr, msg) \
130     typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
131 
132 /*
133  *  Usage:  SK_MACRO_CONCAT(a, b)   to construct the symbol ab
134  *
135  *  SK_MACRO_CONCAT_IMPL_PRIV just exists to make this work. Do not use directly
136  *
137  */
138 #define SK_MACRO_CONCAT(X, Y)           SK_MACRO_CONCAT_IMPL_PRIV(X, Y)
139 #define SK_MACRO_CONCAT_IMPL_PRIV(X, Y)  X ## Y
140 
141 /*
142  *  Usage: SK_MACRO_APPEND_LINE(foo)    to make foo123, where 123 is the current
143  *                                      line number. Easy way to construct
144  *                                      unique names for local functions or
145  *                                      variables.
146  */
147 #define SK_MACRO_APPEND_LINE(name)  SK_MACRO_CONCAT(name, __LINE__)
148 
149 ///////////////////////////////////////////////////////////////////////
150 
151 /**
152  *  Fast type for signed 8 bits. Use for parameter passing and local variables,
153  *  not for storage.
154  */
155 typedef int S8CPU;
156 
157 /**
158  *  Fast type for unsigned 8 bits. Use for parameter passing and local
159  *  variables, not for storage
160  */
161 typedef unsigned U8CPU;
162 
163 /**
164  *  Fast type for signed 16 bits. Use for parameter passing and local variables,
165  *  not for storage
166  */
167 typedef int S16CPU;
168 
169 /**
170  *  Fast type for unsigned 16 bits. Use for parameter passing and local
171  *  variables, not for storage
172  */
173 typedef unsigned U16CPU;
174 
175 /**
176  *  Meant to be faster than bool (doesn't promise to be 0 or 1,
177  *  just 0 or non-zero
178  */
179 typedef int SkBool;
180 
181 /**
182  *  Meant to be a small version of bool, for storage purposes. Will be 0 or 1
183  */
184 typedef uint8_t SkBool8;
185 
186 #ifdef SK_DEBUG
187     SK_API int8_t      SkToS8(long);
188     SK_API uint8_t     SkToU8(size_t);
189     SK_API int16_t     SkToS16(long);
190     SK_API uint16_t    SkToU16(size_t);
191     SK_API int32_t     SkToS32(long);
192     SK_API uint32_t    SkToU32(size_t);
193 #else
194     #define SkToS8(x)   ((int8_t)(x))
195     #define SkToU8(x)   ((uint8_t)(x))
196     #define SkToS16(x)  ((int16_t)(x))
197     #define SkToU16(x)  ((uint16_t)(x))
198     #define SkToS32(x)  ((int32_t)(x))
199     #define SkToU32(x)  ((uint32_t)(x))
200 #endif
201 
202 /** Returns 0 or 1 based on the condition
203 */
204 #define SkToBool(cond)  ((cond) != 0)
205 
206 #define SK_MaxS16   32767
207 #define SK_MinS16   -32767
208 #define SK_MaxU16   0xFFFF
209 #define SK_MinU16   0
210 #define SK_MaxS32   0x7FFFFFFF
211 #define SK_MinS32   -SK_MaxS32
212 #define SK_MaxU32   0xFFFFFFFF
213 #define SK_MinU32   0
214 #define SK_NaN32    (1 << 31)
215 
216 /** Returns true if the value can be represented with signed 16bits
217  */
SkIsS16(long x)218 static inline bool SkIsS16(long x) {
219     return (int16_t)x == x;
220 }
221 
222 /** Returns true if the value can be represented with unsigned 16bits
223  */
SkIsU16(long x)224 static inline bool SkIsU16(long x) {
225     return (uint16_t)x == x;
226 }
227 
228 //////////////////////////////////////////////////////////////////////////////
229 #ifndef SK_OFFSETOF
230     #define SK_OFFSETOF(type, field)    (size_t)((char*)&(((type*)1)->field) - (char*)1)
231 #endif
232 
233 /** Returns the number of entries in an array (not a pointer)
234 */
235 #define SK_ARRAY_COUNT(array)       (sizeof(array) / sizeof(array[0]))
236 
237 #define SkAlign2(x)     (((x) + 1) >> 1 << 1)
238 #define SkIsAlign2(x)   (0 == ((x) & 1))
239 
240 #define SkAlign4(x)     (((x) + 3) >> 2 << 2)
241 #define SkIsAlign4(x)   (0 == ((x) & 3))
242 
243 #define SkAlign8(x)     (((x) + 7) >> 3 << 3)
244 #define SkIsAlign8(x)   (0 == ((x) & 7))
245 
246 typedef uint32_t SkFourByteTag;
247 #define SkSetFourByteTag(a, b, c, d)    (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
248 
249 /** 32 bit integer to hold a unicode value
250 */
251 typedef int32_t SkUnichar;
252 /** 32 bit value to hold a millisecond count
253 */
254 typedef uint32_t SkMSec;
255 /** 1 second measured in milliseconds
256 */
257 #define SK_MSec1 1000
258 /** maximum representable milliseconds
259 */
260 #define SK_MSecMax 0x7FFFFFFF
261 /** Returns a < b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
262 */
263 #define SkMSec_LT(a, b)     ((int32_t)(a) - (int32_t)(b) < 0)
264 /** Returns a <= b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
265 */
266 #define SkMSec_LE(a, b)     ((int32_t)(a) - (int32_t)(b) <= 0)
267 
268 /****************************************************************************
269     The rest of these only build with C++
270 */
271 #ifdef __cplusplus
272 
273 /** Faster than SkToBool for integral conditions. Returns 0 or 1
274 */
Sk32ToBool(uint32_t n)275 static inline int Sk32ToBool(uint32_t n) {
276     return (n | (0-n)) >> 31;
277 }
278 
SkTSwap(T & a,T & b)279 template <typename T> inline void SkTSwap(T& a, T& b) {
280     T c(a);
281     a = b;
282     b = c;
283 }
284 
SkAbs32(int32_t value)285 static inline int32_t SkAbs32(int32_t value) {
286 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
287     if (value < 0)
288         value = -value;
289     return value;
290 #else
291     int32_t mask = value >> 31;
292     return (value ^ mask) - mask;
293 #endif
294 }
295 
SkTAbs(T value)296 template <typename T> inline T SkTAbs(T value) {
297     if (value < 0) {
298         value = -value;
299     }
300     return value;
301 }
302 
SkMax32(int32_t a,int32_t b)303 static inline int32_t SkMax32(int32_t a, int32_t b) {
304     if (a < b)
305         a = b;
306     return a;
307 }
308 
SkMin32(int32_t a,int32_t b)309 static inline int32_t SkMin32(int32_t a, int32_t b) {
310     if (a > b)
311         a = b;
312     return a;
313 }
314 
SkSign32(int32_t a)315 static inline int32_t SkSign32(int32_t a) {
316     return (a >> 31) | ((unsigned) -a >> 31);
317 }
318 
SkFastMin32(int32_t value,int32_t max)319 static inline int32_t SkFastMin32(int32_t value, int32_t max) {
320 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
321     if (value > max)
322         value = max;
323     return value;
324 #else
325     int diff = max - value;
326     // clear diff if it is negative (clear if value > max)
327     diff &= (diff >> 31);
328     return value + diff;
329 #endif
330 }
331 
332 /** Returns signed 32 bit value pinned between min and max, inclusively
333 */
SkPin32(int32_t value,int32_t min,int32_t max)334 static inline int32_t SkPin32(int32_t value, int32_t min, int32_t max) {
335 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
336     if (value < min)
337         value = min;
338     if (value > max)
339         value = max;
340 #else
341     if (value < min)
342         value = min;
343     else if (value > max)
344         value = max;
345 #endif
346     return value;
347 }
348 
SkSetClearShift(uint32_t bits,bool cond,unsigned shift)349 static inline uint32_t SkSetClearShift(uint32_t bits, bool cond,
350                                        unsigned shift) {
351     SkASSERT((int)cond == 0 || (int)cond == 1);
352     return (bits & ~(1 << shift)) | ((int)cond << shift);
353 }
354 
SkSetClearMask(uint32_t bits,bool cond,uint32_t mask)355 static inline uint32_t SkSetClearMask(uint32_t bits, bool cond,
356                                       uint32_t mask) {
357     return cond ? bits | mask : bits & ~mask;
358 }
359 
360 ///////////////////////////////////////////////////////////////////////////////
361 
362 /** Use to combine multiple bits in a bitmask in a type safe way.
363  */
364 template <typename T>
SkTBitOr(T a,T b)365 T SkTBitOr(T a, T b) {
366     return (T)(a | b);
367 }
368 
369 /**
370  *  Use to cast a pointer to a different type, and maintaining strict-aliasing
371  */
SkTCast(const void * ptr)372 template <typename Dst> Dst SkTCast(const void* ptr) {
373     union {
374         const void* src;
375         Dst dst;
376     } data;
377     data.src = ptr;
378     return data.dst;
379 }
380 
381 //////////////////////////////////////////////////////////////////////////////
382 
383 /** \class SkNoncopyable
384 
385 SkNoncopyable is the base class for objects that may do not want to
386 be copied. It hides its copy-constructor and its assignment-operator.
387 */
388 class SK_API SkNoncopyable {
389 public:
SkNoncopyable()390     SkNoncopyable() {}
391 
392 private:
393     SkNoncopyable(const SkNoncopyable&);
394     SkNoncopyable& operator=(const SkNoncopyable&);
395 };
396 
397 class SkAutoFree : SkNoncopyable {
398 public:
SkAutoFree()399     SkAutoFree() : fPtr(NULL) {}
SkAutoFree(void * ptr)400     explicit SkAutoFree(void* ptr) : fPtr(ptr) {}
~SkAutoFree()401     ~SkAutoFree() { sk_free(fPtr); }
402 
403     /** Return the currently allocate buffer, or null
404     */
get()405     void* get() const { return fPtr; }
406 
407     /** Assign a new ptr allocated with sk_malloc (or null), and return the
408         previous ptr. Note it is the caller's responsibility to sk_free the
409         returned ptr.
410     */
set(void * ptr)411     void* set(void* ptr) {
412         void* prev = fPtr;
413         fPtr = ptr;
414         return prev;
415     }
416 
417     /** Transfer ownership of the current ptr to the caller, setting the
418         internal reference to null. Note the caller is reponsible for calling
419         sk_free on the returned address.
420     */
detach()421     void* detach() { return this->set(NULL); }
422 
423     /** Free the current buffer, and set the internal reference to NULL. Same
424         as calling sk_free(detach())
425     */
free()426     void free() {
427         sk_free(fPtr);
428         fPtr = NULL;
429     }
430 
431 private:
432     void* fPtr;
433     // illegal
434     SkAutoFree(const SkAutoFree&);
435     SkAutoFree& operator=(const SkAutoFree&);
436 };
437 
438 /**
439  *  Manage an allocated block of heap memory. This object is the sole manager of
440  *  the lifetime of the block, so the caller must not call sk_free() or delete
441  *  on the block, unless detach() was called.
442  */
443 class SkAutoMalloc : public SkNoncopyable {
444 public:
445     explicit SkAutoMalloc(size_t size = 0) {
446         fPtr = size ? sk_malloc_throw(size) : NULL;
447         fSize = size;
448     }
449 
~SkAutoMalloc()450     ~SkAutoMalloc() {
451         sk_free(fPtr);
452     }
453 
454     /**
455      *  Passed to reset to specify what happens if the requested size is smaller
456      *  than the current size (and the current block was dynamically allocated).
457      */
458     enum OnShrink {
459         /**
460          *  If the requested size is smaller than the current size, and the
461          *  current block is dynamically allocated, free the old block and
462          *  malloc a new block of the smaller size.
463          */
464         kAlloc_OnShrink,
465 
466         /**
467          *  If the requested size is smaller than the current size, and the
468          *  current block is dynamically allocated, just return the old
469          *  block.
470          */
471         kReuse_OnShrink
472     };
473 
474     /**
475      *  Reallocates the block to a new size. The ptr may or may not change.
476      */
477     void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink) {
478         if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
479             return fPtr;
480         }
481 
482         sk_free(fPtr);
483         fPtr = size ? sk_malloc_throw(size) : NULL;
484         fSize = size;
485 
486         return fPtr;
487     }
488 
489     /**
490      *  Releases the block back to the heap
491      */
free()492     void free() {
493         this->reset(0);
494     }
495 
496     /**
497      *  Return the allocated block.
498      */
get()499     void* get() { return fPtr; }
get()500     const void* get() const { return fPtr; }
501 
502    /** Transfer ownership of the current ptr to the caller, setting the
503        internal reference to null. Note the caller is reponsible for calling
504        sk_free on the returned address.
505     */
detach()506     void* detach() {
507         void* ptr = fPtr;
508         fPtr = NULL;
509         fSize = 0;
510         return ptr;
511     }
512 
513 private:
514     void*   fPtr;
515     size_t  fSize;  // can be larger than the requested size (see kReuse)
516 };
517 
518 /**
519  *  Manage an allocated block of memory. If the requested size is <= kSize, then
520  *  the allocation will come from the stack rather than the heap. This object
521  *  is the sole manager of the lifetime of the block, so the caller must not
522  *  call sk_free() or delete on the block.
523  */
524 template <size_t kSize> class SkAutoSMalloc : SkNoncopyable {
525 public:
526     /**
527      *  Creates initially empty storage. get() returns a ptr, but it is to
528      *  a zero-byte allocation. Must call reset(size) to return an allocated
529      *  block.
530      */
SkAutoSMalloc()531     SkAutoSMalloc() {
532         fPtr = fStorage;
533         fSize = 0;
534     }
535 
536     /**
537      *  Allocate a block of the specified size. If size <= kSize, then the
538      *  allocation will come from the stack, otherwise it will be dynamically
539      *  allocated.
540      */
SkAutoSMalloc(size_t size)541     explicit SkAutoSMalloc(size_t size) {
542         fPtr = fStorage;
543         fSize = 0;
544         this->reset(size);
545     }
546 
547     /**
548      *  Free the allocated block (if any). If the block was small enought to
549      *  have been allocated on the stack (size <= kSize) then this does nothing.
550      */
~SkAutoSMalloc()551     ~SkAutoSMalloc() {
552         if (fPtr != (void*)fStorage) {
553             sk_free(fPtr);
554         }
555     }
556 
557     /**
558      *  Return the allocated block. May return non-null even if the block is
559      *  of zero size. Since this may be on the stack or dynamically allocated,
560      *  the caller must not call sk_free() on it, but must rely on SkAutoSMalloc
561      *  to manage it.
562      */
get()563     void* get() const { return fPtr; }
564 
565     /**
566      *  Return a new block of the requested size, freeing (as necessary) any
567      *  previously allocated block. As with the constructor, if size <= kSize
568      *  then the return block may be allocated locally, rather than from the
569      *  heap.
570      */
571     void* reset(size_t size,
572                 SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink) {
573         if (size == fSize || (SkAutoMalloc::kReuse_OnShrink == shrink &&
574                               size < fSize)) {
575             return fPtr;
576         }
577 
578         if (fPtr != (void*)fStorage) {
579             sk_free(fPtr);
580         }
581 
582         if (size <= kSize) {
583             fPtr = fStorage;
584         } else {
585             fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
586         }
587         return fPtr;
588     }
589 
590 private:
591     void*       fPtr;
592     size_t      fSize;  // can be larger than the requested size (see kReuse)
593     uint32_t    fStorage[(kSize + 3) >> 2];
594 };
595 
596 #endif /* C++ */
597 
598 #endif
599