1
2 /*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10 #ifndef SkTypes_DEFINED
11 #define SkTypes_DEFINED
12
13 #include "SkPreConfig.h"
14 #include "SkUserConfig.h"
15 #include "SkPostConfig.h"
16
17 #ifndef SK_IGNORE_STDINT_DOT_H
18 #include <stdint.h>
19 #endif
20
21 #include <stdio.h>
22
23 /** \file SkTypes.h
24 */
25
26 /** See SkGraphics::GetVersion() to retrieve these at runtime
27 */
28 #define SKIA_VERSION_MAJOR 1
29 #define SKIA_VERSION_MINOR 0
30 #define SKIA_VERSION_PATCH 0
31
32 /*
33 memory wrappers to be implemented by the porting layer (platform)
34 */
35
36 /** Called internally if we run out of memory. The platform implementation must
37 not return, but should either throw an exception or otherwise exit.
38 */
39 SK_API extern void sk_out_of_memory(void);
40 /** Called internally if we hit an unrecoverable error.
41 The platform implementation must not return, but should either throw
42 an exception or otherwise exit.
43 */
44 SK_API extern void sk_throw(void);
45
46 enum {
47 SK_MALLOC_TEMP = 0x01, //!< hint to sk_malloc that the requested memory will be freed in the scope of the stack frame
48 SK_MALLOC_THROW = 0x02 //!< instructs sk_malloc to call sk_throw if the memory cannot be allocated.
49 };
50 /** Return a block of memory (at least 4-byte aligned) of at least the
51 specified size. If the requested memory cannot be returned, either
52 return null (if SK_MALLOC_TEMP bit is clear) or call sk_throw()
53 (if SK_MALLOC_TEMP bit is set). To free the memory, call sk_free().
54 */
55 SK_API extern void* sk_malloc_flags(size_t size, unsigned flags);
56 /** Same as sk_malloc(), but hard coded to pass SK_MALLOC_THROW as the flag
57 */
58 SK_API extern void* sk_malloc_throw(size_t size);
59 /** Same as standard realloc(), but this one never returns null on failure. It will throw
60 an exception if it fails.
61 */
62 SK_API extern void* sk_realloc_throw(void* buffer, size_t size);
63 /** Free memory returned by sk_malloc(). It is safe to pass null.
64 */
65 SK_API extern void sk_free(void*);
66
67 // bzero is safer than memset, but we can't rely on it, so... sk_bzero()
sk_bzero(void * buffer,size_t size)68 static inline void sk_bzero(void* buffer, size_t size) {
69 memset(buffer, 0, size);
70 }
71
72 ///////////////////////////////////////////////////////////////////////////////
73
74 #ifdef SK_OVERRIDE_GLOBAL_NEW
75 #include <new>
76
new(size_t size)77 inline void* operator new(size_t size) {
78 return sk_malloc_throw(size);
79 }
80
delete(void * p)81 inline void operator delete(void* p) {
82 sk_free(p);
83 }
84 #endif
85
86 ///////////////////////////////////////////////////////////////////////////////
87
88 #define SK_INIT_TO_AVOID_WARNING = 0
89
90 #ifndef SkDebugf
91 void SkDebugf(const char format[], ...);
92 #endif
93
94 #ifdef SK_DEBUG
95 #define SkASSERT(cond) SK_DEBUGBREAK(cond)
96 #define SkDEBUGFAIL(message) SkASSERT(false && message)
97 #define SkDEBUGCODE(code) code
98 #define SkDECLAREPARAM(type, var) , type var
99 #define SkPARAM(var) , var
100 // #define SkDEBUGF(args ) SkDebugf##args
101 #define SkDEBUGF(args ) SkDebugf args
102 #define SkAssertResult(cond) SkASSERT(cond)
103 #else
104 #define SkASSERT(cond)
105 #define SkDEBUGFAIL(message)
106 #define SkDEBUGCODE(code)
107 #define SkDEBUGF(args)
108 #define SkDECLAREPARAM(type, var)
109 #define SkPARAM(var)
110
111 // unlike SkASSERT, this guy executes its condition in the non-debug build
112 #define SkAssertResult(cond) cond
113 #endif
114
115 namespace {
116
117 template <bool>
118 struct SkCompileAssert {
119 };
120
121 } // namespace
122
123 #define SK_COMPILE_ASSERT(expr, msg) \
124 typedef SkCompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
125
126 ///////////////////////////////////////////////////////////////////////
127
128 /**
129 * Fast type for signed 8 bits. Use for parameter passing and local variables,
130 * not for storage.
131 */
132 typedef int S8CPU;
133
134 /**
135 * Fast type for unsigned 8 bits. Use for parameter passing and local
136 * variables, not for storage
137 */
138 typedef unsigned U8CPU;
139
140 /**
141 * Fast type for signed 16 bits. Use for parameter passing and local variables,
142 * not for storage
143 */
144 typedef int S16CPU;
145
146 /**
147 * Fast type for unsigned 16 bits. Use for parameter passing and local
148 * variables, not for storage
149 */
150 typedef unsigned U16CPU;
151
152 /**
153 * Meant to be faster than bool (doesn't promise to be 0 or 1,
154 * just 0 or non-zero
155 */
156 typedef int SkBool;
157
158 /**
159 * Meant to be a small version of bool, for storage purposes. Will be 0 or 1
160 */
161 typedef uint8_t SkBool8;
162
163 #ifdef SK_DEBUG
164 SK_API int8_t SkToS8(long);
165 SK_API uint8_t SkToU8(size_t);
166 SK_API int16_t SkToS16(long);
167 SK_API uint16_t SkToU16(size_t);
168 SK_API int32_t SkToS32(long);
169 SK_API uint32_t SkToU32(size_t);
170 #else
171 #define SkToS8(x) ((int8_t)(x))
172 #define SkToU8(x) ((uint8_t)(x))
173 #define SkToS16(x) ((int16_t)(x))
174 #define SkToU16(x) ((uint16_t)(x))
175 #define SkToS32(x) ((int32_t)(x))
176 #define SkToU32(x) ((uint32_t)(x))
177 #endif
178
179 /** Returns 0 or 1 based on the condition
180 */
181 #define SkToBool(cond) ((cond) != 0)
182
183 #define SK_MaxS16 32767
184 #define SK_MinS16 -32767
185 #define SK_MaxU16 0xFFFF
186 #define SK_MinU16 0
187 #define SK_MaxS32 0x7FFFFFFF
188 #define SK_MinS32 0x80000001
189 #define SK_MaxU32 0xFFFFFFFF
190 #define SK_MinU32 0
191 #define SK_NaN32 0x80000000
192
193 /** Returns true if the value can be represented with signed 16bits
194 */
SkIsS16(long x)195 static inline bool SkIsS16(long x) {
196 return (int16_t)x == x;
197 }
198
199 /** Returns true if the value can be represented with unsigned 16bits
200 */
SkIsU16(long x)201 static inline bool SkIsU16(long x) {
202 return (uint16_t)x == x;
203 }
204
205 //////////////////////////////////////////////////////////////////////////////
206 #ifndef SK_OFFSETOF
207 #define SK_OFFSETOF(type, field) ((char*)&(((type*)1)->field) - (char*)1)
208 #endif
209
210 /** Returns the number of entries in an array (not a pointer)
211 */
212 #define SK_ARRAY_COUNT(array) (sizeof(array) / sizeof(array[0]))
213
214 /** Returns x rounded up to a multiple of 2
215 */
216 #define SkAlign2(x) (((x) + 1) >> 1 << 1)
217 /** Returns x rounded up to a multiple of 4
218 */
219 #define SkAlign4(x) (((x) + 3) >> 2 << 2)
220
221 #define SkIsAlign4(x) (((x) & 3) == 0)
222
223 typedef uint32_t SkFourByteTag;
224 #define SkSetFourByteTag(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
225
226 /** 32 bit integer to hold a unicode value
227 */
228 typedef int32_t SkUnichar;
229 /** 32 bit value to hold a millisecond count
230 */
231 typedef uint32_t SkMSec;
232 /** 1 second measured in milliseconds
233 */
234 #define SK_MSec1 1000
235 /** maximum representable milliseconds
236 */
237 #define SK_MSecMax 0x7FFFFFFF
238 /** Returns a < b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
239 */
240 #define SkMSec_LT(a, b) ((int32_t)(a) - (int32_t)(b) < 0)
241 /** Returns a <= b for milliseconds, correctly handling wrap-around from 0xFFFFFFFF to 0
242 */
243 #define SkMSec_LE(a, b) ((int32_t)(a) - (int32_t)(b) <= 0)
244
245 /****************************************************************************
246 The rest of these only build with C++
247 */
248 #ifdef __cplusplus
249
250 /** Faster than SkToBool for integral conditions. Returns 0 or 1
251 */
Sk32ToBool(uint32_t n)252 static inline int Sk32ToBool(uint32_t n) {
253 return (n | (0-n)) >> 31;
254 }
255
SkTSwap(T & a,T & b)256 template <typename T> inline void SkTSwap(T& a, T& b) {
257 T c(a);
258 a = b;
259 b = c;
260 }
261
SkAbs32(int32_t value)262 static inline int32_t SkAbs32(int32_t value) {
263 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
264 if (value < 0)
265 value = -value;
266 return value;
267 #else
268 int32_t mask = value >> 31;
269 return (value ^ mask) - mask;
270 #endif
271 }
272
SkMax32(int32_t a,int32_t b)273 static inline int32_t SkMax32(int32_t a, int32_t b) {
274 if (a < b)
275 a = b;
276 return a;
277 }
278
SkMin32(int32_t a,int32_t b)279 static inline int32_t SkMin32(int32_t a, int32_t b) {
280 if (a > b)
281 a = b;
282 return a;
283 }
284
SkSign32(int32_t a)285 static inline int32_t SkSign32(int32_t a) {
286 return (a >> 31) | ((unsigned) -a >> 31);
287 }
288
SkFastMin32(int32_t value,int32_t max)289 static inline int32_t SkFastMin32(int32_t value, int32_t max) {
290 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
291 if (value > max)
292 value = max;
293 return value;
294 #else
295 int diff = max - value;
296 // clear diff if it is negative (clear if value > max)
297 diff &= (diff >> 31);
298 return value + diff;
299 #endif
300 }
301
302 /** Returns signed 32 bit value pinned between min and max, inclusively
303 */
SkPin32(int32_t value,int32_t min,int32_t max)304 static inline int32_t SkPin32(int32_t value, int32_t min, int32_t max) {
305 #ifdef SK_CPU_HAS_CONDITIONAL_INSTR
306 if (value < min)
307 value = min;
308 if (value > max)
309 value = max;
310 #else
311 if (value < min)
312 value = min;
313 else if (value > max)
314 value = max;
315 #endif
316 return value;
317 }
318
SkSetClearShift(uint32_t bits,bool cond,unsigned shift)319 static inline uint32_t SkSetClearShift(uint32_t bits, bool cond,
320 unsigned shift) {
321 SkASSERT((int)cond == 0 || (int)cond == 1);
322 return (bits & ~(1 << shift)) | ((int)cond << shift);
323 }
324
SkSetClearMask(uint32_t bits,bool cond,uint32_t mask)325 static inline uint32_t SkSetClearMask(uint32_t bits, bool cond,
326 uint32_t mask) {
327 return cond ? bits | mask : bits & ~mask;
328 }
329
330 ///////////////////////////////////////////////////////////////////////////////
331
332 /** Use to combine multiple bits in a bitmask in a type safe way.
333 */
334 template <typename T>
SkTBitOr(T a,T b)335 T SkTBitOr(T a, T b) {
336 return (T)(a | b);
337 }
338
339 /**
340 * Use to cast a pointer to a different type, and maintaining strict-aliasing
341 */
SkTCast(const void * ptr)342 template <typename Dst> Dst SkTCast(const void* ptr) {
343 union {
344 const void* src;
345 Dst dst;
346 } data;
347 data.src = ptr;
348 return data.dst;
349 }
350
351 //////////////////////////////////////////////////////////////////////////////
352
353 /** \class SkNoncopyable
354
355 SkNoncopyable is the base class for objects that may do not want to
356 be copied. It hides its copy-constructor and its assignment-operator.
357 */
358 class SK_API SkNoncopyable {
359 public:
SkNoncopyable()360 SkNoncopyable() {}
361
362 private:
363 SkNoncopyable(const SkNoncopyable&);
364 SkNoncopyable& operator=(const SkNoncopyable&);
365 };
366
367 class SkAutoFree : SkNoncopyable {
368 public:
SkAutoFree()369 SkAutoFree() : fPtr(NULL) {}
SkAutoFree(void * ptr)370 explicit SkAutoFree(void* ptr) : fPtr(ptr) {}
~SkAutoFree()371 ~SkAutoFree() { sk_free(fPtr); }
372
373 /** Return the currently allocate buffer, or null
374 */
get()375 void* get() const { return fPtr; }
376
377 /** Assign a new ptr allocated with sk_malloc (or null), and return the
378 previous ptr. Note it is the caller's responsibility to sk_free the
379 returned ptr.
380 */
set(void * ptr)381 void* set(void* ptr) {
382 void* prev = fPtr;
383 fPtr = ptr;
384 return prev;
385 }
386
387 /** Transfer ownership of the current ptr to the caller, setting the
388 internal reference to null. Note the caller is reponsible for calling
389 sk_free on the returned address.
390 */
detach()391 void* detach() { return this->set(NULL); }
392
393 /** Free the current buffer, and set the internal reference to NULL. Same
394 as calling sk_free(detach())
395 */
free()396 void free() {
397 sk_free(fPtr);
398 fPtr = NULL;
399 }
400
401 private:
402 void* fPtr;
403 // illegal
404 SkAutoFree(const SkAutoFree&);
405 SkAutoFree& operator=(const SkAutoFree&);
406 };
407
408 /**
409 * Manage an allocated block of heap memory. This object is the sole manager of
410 * the lifetime of the block, so the caller must not call sk_free() or delete
411 * on the block, unless detach() was called.
412 */
413 class SkAutoMalloc : public SkNoncopyable {
414 public:
415 explicit SkAutoMalloc(size_t size = 0) {
416 fPtr = size ? sk_malloc_throw(size) : NULL;
417 fSize = size;
418 }
419
~SkAutoMalloc()420 ~SkAutoMalloc() {
421 sk_free(fPtr);
422 }
423
424 /**
425 * Passed to reset to specify what happens if the requested size is smaller
426 * than the current size (and the current block was dynamically allocated).
427 */
428 enum OnShrink {
429 /**
430 * If the requested size is smaller than the current size, and the
431 * current block is dynamically allocated, free the old block and
432 * malloc a new block of the smaller size.
433 */
434 kAlloc_OnShrink,
435
436 /**
437 * If the requested size is smaller than the current size, and the
438 * current block is dynamically allocated, just return the old
439 * block.
440 */
441 kReuse_OnShrink,
442 };
443
444 /**
445 * Reallocates the block to a new size. The ptr may or may not change.
446 */
447 void* reset(size_t size, OnShrink shrink = kAlloc_OnShrink) {
448 if (size == fSize || (kReuse_OnShrink == shrink && size < fSize)) {
449 return fPtr;
450 }
451
452 sk_free(fPtr);
453 fPtr = size ? sk_malloc_throw(size) : NULL;
454 fSize = size;
455
456 return fPtr;
457 }
458
459 /**
460 * Releases the block back to the heap
461 */
free()462 void free() {
463 this->reset(0);
464 }
465
466 /**
467 * Return the allocated block.
468 */
get()469 void* get() { return fPtr; }
get()470 const void* get() const { return fPtr; }
471
472 /** Transfer ownership of the current ptr to the caller, setting the
473 internal reference to null. Note the caller is reponsible for calling
474 sk_free on the returned address.
475 */
detach()476 void* detach() {
477 void* ptr = fPtr;
478 fPtr = NULL;
479 fSize = 0;
480 return ptr;
481 }
482
483 private:
484 void* fPtr;
485 size_t fSize; // can be larger than the requested size (see kReuse)
486 };
487
488 /**
489 * Manage an allocated block of memory. If the requested size is <= kSize, then
490 * the allocation will come from the stack rather than the heap. This object
491 * is the sole manager of the lifetime of the block, so the caller must not
492 * call sk_free() or delete on the block.
493 */
494 template <size_t kSize> class SkAutoSMalloc : SkNoncopyable {
495 public:
496 /**
497 * Creates initially empty storage. get() returns a ptr, but it is to
498 * a zero-byte allocation. Must call reset(size) to return an allocated
499 * block.
500 */
SkAutoSMalloc()501 SkAutoSMalloc() {
502 fPtr = fStorage;
503 fSize = 0;
504 }
505
506 /**
507 * Allocate a block of the specified size. If size <= kSize, then the
508 * allocation will come from the stack, otherwise it will be dynamically
509 * allocated.
510 */
SkAutoSMalloc(size_t size)511 explicit SkAutoSMalloc(size_t size) {
512 fPtr = fStorage;
513 fSize = 0;
514 this->reset(size);
515 }
516
517 /**
518 * Free the allocated block (if any). If the block was small enought to
519 * have been allocated on the stack (size <= kSize) then this does nothing.
520 */
~SkAutoSMalloc()521 ~SkAutoSMalloc() {
522 if (fPtr != (void*)fStorage) {
523 sk_free(fPtr);
524 }
525 }
526
527 /**
528 * Return the allocated block. May return non-null even if the block is
529 * of zero size. Since this may be on the stack or dynamically allocated,
530 * the caller must not call sk_free() on it, but must rely on SkAutoSMalloc
531 * to manage it.
532 */
get()533 void* get() const { return fPtr; }
534
535 /**
536 * Return a new block of the requested size, freeing (as necessary) any
537 * previously allocated block. As with the constructor, if size <= kSize
538 * then the return block may be allocated locally, rather than from the
539 * heap.
540 */
541 void* reset(size_t size,
542 SkAutoMalloc::OnShrink shrink = SkAutoMalloc::kAlloc_OnShrink) {
543 if (size == fSize || (SkAutoMalloc::kReuse_OnShrink == shrink &&
544 size < fSize)) {
545 return fPtr;
546 }
547
548 if (fPtr != (void*)fStorage) {
549 sk_free(fPtr);
550 }
551
552 if (size <= kSize) {
553 fPtr = fStorage;
554 } else {
555 fPtr = sk_malloc_flags(size, SK_MALLOC_THROW | SK_MALLOC_TEMP);
556 }
557 return fPtr;
558 }
559
560 private:
561 void* fPtr;
562 size_t fSize; // can be larger than the requested size (see kReuse)
563 uint32_t fStorage[(kSize + 3) >> 2];
564 };
565
566 #endif /* C++ */
567
568 #endif
569