1
2 //----------------------------------------------------------------------------
3 // Anti-Grain Geometry - Version 2.3
4 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
5 //
6 // Permission to copy, use, modify, sell and distribute this software
7 // is granted provided this copyright notice appears in all copies.
8 // This software is provided "as is" without express or implied
9 // warranty, and with no claim as to its suitability for any purpose.
10 //
11 //----------------------------------------------------------------------------
12 // Contact: mcseem@antigrain.com
13 // mcseemagg@yahoo.com
14 // http://www.antigrain.com
15 //----------------------------------------------------------------------------
16 #ifndef AGG_ARRAY_INCLUDED
17 #define AGG_ARRAY_INCLUDED
18
19 #include "agg_basics.h"
20 #include "core/fxcrt/fx_memory.h" // For FXSYS_* macros.
21
22 namespace agg
23 {
24 template <class T>
25 class pod_array {
26 public:
27 typedef T value_type;
~pod_array()28 ~pod_array()
29 {
30 FX_Free(m_array);
31 }
pod_array()32 pod_array() : m_size(0), m_capacity(0), m_array(0) {}
33 pod_array(unsigned cap, unsigned extra_tail = 0);
34 pod_array(const pod_array<T>&);
35 pod_array<T>& operator = (const pod_array<T>&);
36 void capacity(unsigned cap, unsigned extra_tail = 0);
capacity()37 unsigned capacity() const
38 {
39 return m_capacity;
40 }
41 void allocate(unsigned size, unsigned extra_tail = 0);
42 void resize(unsigned new_size);
zero()43 void zero() { memset(m_array, 0, sizeof(T) * m_size); }
add(const T & v)44 void add(const T& v)
45 {
46 m_array[m_size++] = v;
47 }
inc_size(unsigned size)48 void inc_size(unsigned size)
49 {
50 m_size += size;
51 }
size()52 unsigned size() const
53 {
54 return m_size;
55 }
byte_size()56 unsigned byte_size() const
57 {
58 return m_size * sizeof(T);
59 }
60 const T& operator [] (unsigned i) const
61 {
62 return m_array[i];
63 }
64 T& operator [] (unsigned i)
65 {
66 return m_array[i];
67 }
at(unsigned i)68 const T& at(unsigned i) const
69 {
70 return m_array[i];
71 }
at(unsigned i)72 T& at(unsigned i)
73 {
74 return m_array[i];
75 }
value_at(unsigned i)76 T value_at(unsigned i) const
77 {
78 return m_array[i];
79 }
data()80 const T* data() const
81 {
82 return m_array;
83 }
data()84 T* data()
85 {
86 return m_array;
87 }
remove_all()88 void remove_all()
89 {
90 m_size = 0;
91 }
cut_at(unsigned num)92 void cut_at(unsigned num)
93 {
94 if(num < m_size) {
95 m_size = num;
96 }
97 }
98 private:
99 unsigned m_size;
100 unsigned m_capacity;
101 T* m_array;
102 };
103 template<class T>
capacity(unsigned cap,unsigned extra_tail)104 void pod_array<T>::capacity(unsigned cap, unsigned extra_tail)
105 {
106 m_size = 0;
107 unsigned full_cap = cap + extra_tail;
108 if(full_cap < cap) {
109 FX_Free(m_array);
110 m_array = 0;
111 m_capacity = 0;
112 } else if(full_cap > m_capacity) {
113 FX_Free(m_array);
114 m_array = FX_Alloc(T, full_cap);
115 m_capacity = full_cap;
116 }
117 }
118 template<class T>
allocate(unsigned size,unsigned extra_tail)119 void pod_array<T>::allocate(unsigned size, unsigned extra_tail)
120 {
121 capacity(size, extra_tail);
122 m_size = size;
123 }
124 template<class T>
resize(unsigned new_size)125 void pod_array<T>::resize(unsigned new_size)
126 {
127 if(new_size > m_size) {
128 if(new_size > m_capacity) {
129 T* data = FX_Alloc(T, new_size);
130 memcpy(data, m_array, m_size * sizeof(T));
131 FX_Free(m_array);
132 m_array = data;
133 }
134 } else {
135 m_size = new_size;
136 }
137 }
pod_array(unsigned cap,unsigned extra_tail)138 template<class T> pod_array<T>::pod_array(unsigned cap, unsigned extra_tail) :
139 m_size(0), m_capacity(cap + extra_tail), m_array(FX_Alloc(T, m_capacity)) {}
pod_array(const pod_array<T> & v)140 template<class T> pod_array<T>::pod_array(const pod_array<T>& v) :
141 m_size(v.m_size),
142 m_capacity(v.m_capacity),
143 m_array(v.m_capacity ? FX_Alloc(T, v.m_capacity) : 0)
144 {
145 memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
146 }
147 template<class T> pod_array<T>&
148 pod_array<T>::operator = (const pod_array<T>&v)
149 {
150 allocate(v.m_size);
151 if(v.m_size) {
152 memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
153 }
154 return *this;
155 }
156 template<class T, unsigned S = 6> class pod_deque
157 {
158 public:
159 enum block_scale_e {
160 block_shift = S,
161 block_size = 1 << block_shift,
162 block_mask = block_size - 1
163 };
164 typedef T value_type;
165 ~pod_deque();
166 pod_deque();
167 pod_deque(unsigned block_ptr_inc);
168 pod_deque(const pod_deque<T, S>& v);
169 pod_deque<T, S>& operator = (const pod_deque<T, S>& v);
remove_all()170 void remove_all()
171 {
172 m_size = 0;
173 }
free_all()174 void free_all()
175 {
176 free_tail(0);
177 }
178 void free_tail(unsigned size);
179 void add(const T& val);
180 void modify_last(const T& val);
181 void remove_last();
182 int allocate_continuous_block(unsigned num_elements);
add_array(const T * ptr,unsigned num_elem)183 void add_array(const T* ptr, unsigned num_elem)
184 {
185 while(num_elem--) {
186 add(*ptr++);
187 }
188 }
add_data(DataAccessor & data)189 template<class DataAccessor> void add_data(DataAccessor& data)
190 {
191 while(data.size()) {
192 add(*data);
193 ++data;
194 }
195 }
cut_at(unsigned size)196 void cut_at(unsigned size)
197 {
198 if(size < m_size) {
199 m_size = size;
200 }
201 }
size()202 unsigned size() const
203 {
204 return m_size;
205 }
206 const T& operator [] (unsigned i) const
207 {
208 return m_blocks[i >> block_shift][i & block_mask];
209 }
210 T& operator [] (unsigned i)
211 {
212 return m_blocks[i >> block_shift][i & block_mask];
213 }
at(unsigned i)214 const T& at(unsigned i) const
215 {
216 return m_blocks[i >> block_shift][i & block_mask];
217 }
at(unsigned i)218 T& at(unsigned i)
219 {
220 return m_blocks[i >> block_shift][i & block_mask];
221 }
value_at(unsigned i)222 T value_at(unsigned i) const
223 {
224 return m_blocks[i >> block_shift][i & block_mask];
225 }
curr(unsigned idx)226 const T& curr(unsigned idx) const
227 {
228 return (*this)[idx];
229 }
curr(unsigned idx)230 T& curr(unsigned idx)
231 {
232 return (*this)[idx];
233 }
prev(unsigned idx)234 const T& prev(unsigned idx) const
235 {
236 return (*this)[(idx + m_size - 1) % m_size];
237 }
prev(unsigned idx)238 T& prev(unsigned idx)
239 {
240 return (*this)[(idx + m_size - 1) % m_size];
241 }
next(unsigned idx)242 const T& next(unsigned idx) const
243 {
244 return (*this)[(idx + 1) % m_size];
245 }
next(unsigned idx)246 T& next(unsigned idx)
247 {
248 return (*this)[(idx + 1) % m_size];
249 }
last()250 const T& last() const
251 {
252 return (*this)[m_size - 1];
253 }
last()254 T& last()
255 {
256 return (*this)[m_size - 1];
257 }
258 unsigned byte_size() const;
block(unsigned nb)259 const T* block(unsigned nb) const
260 {
261 return m_blocks[nb];
262 }
263 public:
264 void allocate_block(unsigned nb);
265 T* data_ptr();
266 unsigned m_size;
267 unsigned m_num_blocks;
268 unsigned m_max_blocks;
269 T** m_blocks;
270 unsigned m_block_ptr_inc;
271 };
~pod_deque()272 template<class T, unsigned S> pod_deque<T, S>::~pod_deque()
273 {
274 if(m_num_blocks) {
275 T** blk = m_blocks + m_num_blocks - 1;
276 while(m_num_blocks--) {
277 FX_Free(*blk);
278 --blk;
279 }
280 FX_Free(m_blocks);
281 }
282 }
283 template<class T, unsigned S>
free_tail(unsigned size)284 void pod_deque<T, S>::free_tail(unsigned size)
285 {
286 if(size < m_size) {
287 unsigned nb = (size + block_mask) >> block_shift;
288 while(m_num_blocks > nb) {
289 FX_Free(m_blocks[--m_num_blocks]);
290 }
291 m_size = size;
292 }
293 }
pod_deque()294 template<class T, unsigned S> pod_deque<T, S>::pod_deque() :
295 m_size(0),
296 m_num_blocks(0),
297 m_max_blocks(0),
298 m_blocks(0),
299 m_block_ptr_inc(block_size)
300 {
301 }
302 template<class T, unsigned S>
pod_deque(unsigned block_ptr_inc)303 pod_deque<T, S>::pod_deque(unsigned block_ptr_inc) :
304 m_size(0),
305 m_num_blocks(0),
306 m_max_blocks(0),
307 m_blocks(0),
308 m_block_ptr_inc(block_ptr_inc)
309 {
310 }
311 template<class T, unsigned S>
pod_deque(const pod_deque<T,S> & v)312 pod_deque<T, S>::pod_deque(const pod_deque<T, S>& v) :
313 m_size(v.m_size),
314 m_num_blocks(v.m_num_blocks),
315 m_max_blocks(v.m_max_blocks),
316 m_blocks(v.m_max_blocks ? FX_Alloc(T*, v.m_max_blocks) : 0),
317 m_block_ptr_inc(v.m_block_ptr_inc)
318 {
319 unsigned i;
320 for(i = 0; i < v.m_num_blocks; ++i) {
321 m_blocks[i] = FX_Alloc(T, block_size);
322 memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
323 }
324 }
325 template<class T, unsigned S>
326 pod_deque<T, S>& pod_deque<T, S>::operator = (const pod_deque<T, S>& v)
327 {
328 unsigned i;
329 for(i = m_num_blocks; i < v.m_num_blocks; ++i) {
330 allocate_block(i);
331 }
332 for(i = 0; i < v.m_num_blocks; ++i) {
333 memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
334 }
335 m_size = v.m_size;
336 return *this;
337 }
338 template<class T, unsigned S>
allocate_block(unsigned nb)339 void pod_deque<T, S>::allocate_block(unsigned nb)
340 {
341 if(nb >= m_max_blocks) {
342 T** new_blocks = FX_Alloc(T*, m_max_blocks + m_block_ptr_inc);
343 if(m_blocks) {
344 memcpy(new_blocks, m_blocks, m_num_blocks * sizeof(T*));
345 FX_Free(m_blocks);
346 }
347 m_blocks = new_blocks;
348 m_max_blocks += m_block_ptr_inc;
349 }
350 m_blocks[nb] = FX_Alloc(T, block_size);
351 m_num_blocks++;
352 }
353 template<class T, unsigned S>
data_ptr()354 inline T* pod_deque<T, S>::data_ptr()
355 {
356 unsigned nb = m_size >> block_shift;
357 if(nb >= m_num_blocks) {
358 allocate_block(nb);
359 }
360 return m_blocks[nb] + (m_size & block_mask);
361 }
362 template<class T, unsigned S>
add(const T & val)363 inline void pod_deque<T, S>::add(const T& val)
364 {
365 *data_ptr() = val;
366 ++m_size;
367 }
368 template<class T, unsigned S>
remove_last()369 inline void pod_deque<T, S>::remove_last()
370 {
371 if(m_size) {
372 --m_size;
373 }
374 }
375 template<class T, unsigned S>
modify_last(const T & val)376 void pod_deque<T, S>::modify_last(const T& val)
377 {
378 remove_last();
379 add(val);
380 }
381 template<class T, unsigned S>
allocate_continuous_block(unsigned num_elements)382 int pod_deque<T, S>::allocate_continuous_block(unsigned num_elements)
383 {
384 if(num_elements < block_size) {
385 data_ptr();
386 unsigned rest = block_size - (m_size & block_mask);
387 unsigned index;
388 if(num_elements <= rest) {
389 index = m_size;
390 m_size += num_elements;
391 return index;
392 }
393 m_size += rest;
394 data_ptr();
395 index = m_size;
396 m_size += num_elements;
397 return index;
398 }
399 return -1;
400 }
401 template<class T, unsigned S>
byte_size()402 unsigned pod_deque<T, S>::byte_size() const
403 {
404 return m_size * sizeof(T);
405 }
406 class pod_allocator
407 {
408 public:
remove_all()409 void remove_all()
410 {
411 if(m_num_blocks) {
412 int8u** blk = m_blocks + m_num_blocks - 1;
413 while(m_num_blocks--) {
414 FX_Free(*blk);
415 --blk;
416 }
417 FX_Free(m_blocks);
418 }
419 m_num_blocks = 0;
420 m_max_blocks = 0;
421 m_blocks = 0;
422 m_buf_ptr = 0;
423 m_rest = 0;
424 }
~pod_allocator()425 ~pod_allocator()
426 {
427 remove_all();
428 }
429 pod_allocator(unsigned block_size, unsigned block_ptr_inc = 256 - 8) :
m_block_size(block_size)430 m_block_size(block_size),
431 m_block_ptr_inc(block_ptr_inc),
432 m_num_blocks(0),
433 m_max_blocks(0),
434 m_blocks(0),
435 m_buf_ptr(0),
436 m_rest(0)
437 {
438 }
439 int8u* allocate(unsigned size, unsigned alignment = 1)
440 {
441 if(size == 0) {
442 return 0;
443 }
444 if(size <= m_rest) {
445 int8u* ptr = m_buf_ptr;
446 if(alignment > 1) {
447 unsigned align = (alignment - unsigned((size_t)ptr) % alignment) % alignment;
448 size += align;
449 ptr += align;
450 if(size <= m_rest) {
451 m_rest -= size;
452 m_buf_ptr += size;
453 return ptr;
454 }
455 allocate_block(size);
456 return allocate(size - align, alignment);
457 }
458 m_rest -= size;
459 m_buf_ptr += size;
460 return ptr;
461 }
462 allocate_block(size + alignment - 1);
463 return allocate(size, alignment);
464 }
465 private:
allocate_block(unsigned size)466 void allocate_block(unsigned size)
467 {
468 if(size < m_block_size) {
469 size = m_block_size;
470 }
471 if(m_num_blocks >= m_max_blocks) {
472 int8u** new_blocks = FX_Alloc(int8u*, m_max_blocks + m_block_ptr_inc);
473 if(m_blocks) {
474 memcpy(new_blocks, m_blocks, m_num_blocks * sizeof(int8u*));
475 FX_Free(m_blocks);
476 }
477 m_blocks = new_blocks;
478 m_max_blocks += m_block_ptr_inc;
479 }
480 m_blocks[m_num_blocks] = m_buf_ptr = FX_Alloc(int8u, size);
481 m_num_blocks++;
482 m_rest = size;
483 }
484 unsigned m_block_size;
485 unsigned m_block_ptr_inc;
486 unsigned m_num_blocks;
487 unsigned m_max_blocks;
488 int8u** m_blocks;
489 int8u* m_buf_ptr;
490 unsigned m_rest;
491 };
492 enum quick_sort_threshold_e {
493 quick_sort_threshold = 9
494 };
swap_elements(T & a,T & b)495 template<class T> inline void swap_elements(T& a, T& b)
496 {
497 T temp = a;
498 a = b;
499 b = temp;
500 }
501 }
502 #endif
503