1
2 //----------------------------------------------------------------------------
3 // Anti-Grain Geometry - Version 2.3
4 // Copyright (C) 2002-2005 Maxim Shemanarev (http://www.antigrain.com)
5 //
6 // Permission to copy, use, modify, sell and distribute this software
7 // is granted provided this copyright notice appears in all copies.
8 // This software is provided "as is" without express or implied
9 // warranty, and with no claim as to its suitability for any purpose.
10 //
11 //----------------------------------------------------------------------------
12 // Contact: mcseem@antigrain.com
13 // mcseemagg@yahoo.com
14 // http://www.antigrain.com
15 //----------------------------------------------------------------------------
16 #ifndef AGG_ARRAY_INCLUDED
17 #define AGG_ARRAY_INCLUDED
18 #include "agg_basics.h"
19 namespace agg
20 {
21 template<class T> class pod_array
22 {
23 public:
24 typedef T value_type;
~pod_array()25 ~pod_array()
26 {
27 FX_Free(m_array);
28 }
pod_array()29 pod_array() : m_size(0), m_capacity(0), m_array(0) {}
30 pod_array(unsigned cap, unsigned extra_tail = 0);
31 pod_array(const pod_array<T>&);
32 const pod_array<T>& operator = (const pod_array<T>&);
33 void capacity(unsigned cap, unsigned extra_tail = 0);
capacity()34 unsigned capacity() const
35 {
36 return m_capacity;
37 }
38 void allocate(unsigned size, unsigned extra_tail = 0);
39 void resize(unsigned new_size);
zero()40 void zero()
41 {
42 FXSYS_memset(m_array, 0, sizeof(T) * m_size);
43 }
add(const T & v)44 void add(const T& v)
45 {
46 m_array[m_size++] = v;
47 }
inc_size(unsigned size)48 void inc_size(unsigned size)
49 {
50 m_size += size;
51 }
size()52 unsigned size() const
53 {
54 return m_size;
55 }
byte_size()56 unsigned byte_size() const
57 {
58 return m_size * sizeof(T);
59 }
60 const T& operator [] (unsigned i) const
61 {
62 return m_array[i];
63 }
64 T& operator [] (unsigned i)
65 {
66 return m_array[i];
67 }
at(unsigned i)68 const T& at(unsigned i) const
69 {
70 return m_array[i];
71 }
at(unsigned i)72 T& at(unsigned i)
73 {
74 return m_array[i];
75 }
value_at(unsigned i)76 T value_at(unsigned i) const
77 {
78 return m_array[i];
79 }
data()80 const T* data() const
81 {
82 return m_array;
83 }
data()84 T* data()
85 {
86 return m_array;
87 }
remove_all()88 void remove_all()
89 {
90 m_size = 0;
91 }
cut_at(unsigned num)92 void cut_at(unsigned num)
93 {
94 if(num < m_size) {
95 m_size = num;
96 }
97 }
98 private:
99 unsigned m_size;
100 unsigned m_capacity;
101 T* m_array;
102 };
103 template<class T>
capacity(unsigned cap,unsigned extra_tail)104 void pod_array<T>::capacity(unsigned cap, unsigned extra_tail)
105 {
106 m_size = 0;
107 unsigned full_cap = cap + extra_tail;
108 if(full_cap < cap) {
109 FX_Free(m_array);
110 m_array = 0;
111 m_capacity = 0;
112 } else if(full_cap > m_capacity) {
113 FX_Free(m_array);
114 m_array = FX_Alloc(T, full_cap);
115 m_capacity = full_cap;
116 }
117 }
118 template<class T>
allocate(unsigned size,unsigned extra_tail)119 void pod_array<T>::allocate(unsigned size, unsigned extra_tail)
120 {
121 capacity(size, extra_tail);
122 m_size = size;
123 }
124 template<class T>
resize(unsigned new_size)125 void pod_array<T>::resize(unsigned new_size)
126 {
127 if(new_size > m_size) {
128 if(new_size > m_capacity) {
129 T* data = FX_Alloc(T, new_size);
130 FXSYS_memcpy(data, m_array, m_size * sizeof(T));
131 FX_Free(m_array);
132 m_array = data;
133 }
134 } else {
135 m_size = new_size;
136 }
137 }
pod_array(unsigned cap,unsigned extra_tail)138 template<class T> pod_array<T>::pod_array(unsigned cap, unsigned extra_tail) :
139 m_size(0), m_capacity(cap + extra_tail), m_array(FX_Alloc(T, m_capacity)) {}
pod_array(const pod_array<T> & v)140 template<class T> pod_array<T>::pod_array(const pod_array<T>& v) :
141 m_size(v.m_size),
142 m_capacity(v.m_capacity),
143 m_array(v.m_capacity ? FX_Alloc(T, v.m_capacity) : 0)
144 {
145 FXSYS_memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
146 }
147 template<class T> const pod_array<T>&
148 pod_array<T>::operator = (const pod_array<T>&v)
149 {
150 allocate(v.m_size);
151 if(v.m_size) {
152 FXSYS_memcpy(m_array, v.m_array, sizeof(T) * v.m_size);
153 }
154 return *this;
155 }
156 template<class T, unsigned S = 6> class pod_deque
157 {
158 public:
159 enum block_scale_e {
160 block_shift = S,
161 block_size = 1 << block_shift,
162 block_mask = block_size - 1
163 };
164 typedef T value_type;
165 ~pod_deque();
166 pod_deque();
167 pod_deque(unsigned block_ptr_inc);
168 pod_deque(const pod_deque<T, S>& v);
169 const pod_deque<T, S>& operator = (const pod_deque<T, S>& v);
remove_all()170 void remove_all()
171 {
172 m_size = 0;
173 }
free_all()174 void free_all()
175 {
176 free_tail(0);
177 }
178 void free_tail(unsigned size);
179 void add(const T& val);
180 void modify_last(const T& val);
181 void remove_last();
182 int allocate_continuous_block(unsigned num_elements);
add_array(const T * ptr,unsigned num_elem)183 void add_array(const T* ptr, unsigned num_elem)
184 {
185 while(num_elem--) {
186 add(*ptr++);
187 }
188 }
add_data(DataAccessor & data)189 template<class DataAccessor> void add_data(DataAccessor& data)
190 {
191 while(data.size()) {
192 add(*data);
193 ++data;
194 }
195 }
cut_at(unsigned size)196 void cut_at(unsigned size)
197 {
198 if(size < m_size) {
199 m_size = size;
200 }
201 }
size()202 unsigned size() const
203 {
204 return m_size;
205 }
206 const T& operator [] (unsigned i) const
207 {
208 return m_blocks[i >> block_shift][i & block_mask];
209 }
210 T& operator [] (unsigned i)
211 {
212 return m_blocks[i >> block_shift][i & block_mask];
213 }
at(unsigned i)214 const T& at(unsigned i) const
215 {
216 return m_blocks[i >> block_shift][i & block_mask];
217 }
at(unsigned i)218 T& at(unsigned i)
219 {
220 return m_blocks[i >> block_shift][i & block_mask];
221 }
value_at(unsigned i)222 T value_at(unsigned i) const
223 {
224 return m_blocks[i >> block_shift][i & block_mask];
225 }
curr(unsigned idx)226 const T& curr(unsigned idx) const
227 {
228 return (*this)[idx];
229 }
curr(unsigned idx)230 T& curr(unsigned idx)
231 {
232 return (*this)[idx];
233 }
prev(unsigned idx)234 const T& prev(unsigned idx) const
235 {
236 return (*this)[(idx + m_size - 1) % m_size];
237 }
prev(unsigned idx)238 T& prev(unsigned idx)
239 {
240 return (*this)[(idx + m_size - 1) % m_size];
241 }
next(unsigned idx)242 const T& next(unsigned idx) const
243 {
244 return (*this)[(idx + 1) % m_size];
245 }
next(unsigned idx)246 T& next(unsigned idx)
247 {
248 return (*this)[(idx + 1) % m_size];
249 }
last()250 const T& last() const
251 {
252 return (*this)[m_size - 1];
253 }
last()254 T& last()
255 {
256 return (*this)[m_size - 1];
257 }
258 unsigned byte_size() const;
block(unsigned nb)259 const T* block(unsigned nb) const
260 {
261 return m_blocks[nb];
262 }
263 public:
264 void allocate_block(unsigned nb);
265 T* data_ptr();
266 unsigned m_size;
267 unsigned m_num_blocks;
268 unsigned m_max_blocks;
269 T** m_blocks;
270 unsigned m_block_ptr_inc;
271 };
~pod_deque()272 template<class T, unsigned S> pod_deque<T, S>::~pod_deque()
273 {
274 if(m_num_blocks) {
275 T** blk = m_blocks + m_num_blocks - 1;
276 while(m_num_blocks--) {
277 FX_Free(*blk);
278 --blk;
279 }
280 FX_Free(m_blocks);
281 }
282 }
283 template<class T, unsigned S>
free_tail(unsigned size)284 void pod_deque<T, S>::free_tail(unsigned size)
285 {
286 if(size < m_size) {
287 unsigned nb = (size + block_mask) >> block_shift;
288 while(m_num_blocks > nb) {
289 FX_Free(m_blocks[--m_num_blocks]);
290 }
291 m_size = size;
292 }
293 }
pod_deque()294 template<class T, unsigned S> pod_deque<T, S>::pod_deque() :
295 m_size(0),
296 m_num_blocks(0),
297 m_max_blocks(0),
298 m_blocks(0),
299 m_block_ptr_inc(block_size)
300 {
301 }
302 template<class T, unsigned S>
pod_deque(unsigned block_ptr_inc)303 pod_deque<T, S>::pod_deque(unsigned block_ptr_inc) :
304 m_size(0),
305 m_num_blocks(0),
306 m_max_blocks(0),
307 m_blocks(0),
308 m_block_ptr_inc(block_ptr_inc)
309 {
310 }
311 template<class T, unsigned S>
pod_deque(const pod_deque<T,S> & v)312 pod_deque<T, S>::pod_deque(const pod_deque<T, S>& v) :
313 m_size(v.m_size),
314 m_num_blocks(v.m_num_blocks),
315 m_max_blocks(v.m_max_blocks),
316 m_blocks(v.m_max_blocks ? FX_Alloc(T*, v.m_max_blocks) : 0),
317 m_block_ptr_inc(v.m_block_ptr_inc)
318 {
319 unsigned i;
320 for(i = 0; i < v.m_num_blocks; ++i) {
321 m_blocks[i] = FX_Alloc(T, block_size);
322 FXSYS_memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
323 }
324 }
325 template<class T, unsigned S>
326 const pod_deque<T, S>& pod_deque<T, S>::operator = (const pod_deque<T, S>& v)
327 {
328 unsigned i;
329 for(i = m_num_blocks; i < v.m_num_blocks; ++i) {
330 allocate_block(i);
331 }
332 for(i = 0; i < v.m_num_blocks; ++i) {
333 FXSYS_memcpy(m_blocks[i], v.m_blocks[i], block_size * sizeof(T));
334 }
335 m_size = v.m_size;
336 return *this;
337 }
338 template<class T, unsigned S>
allocate_block(unsigned nb)339 void pod_deque<T, S>::allocate_block(unsigned nb)
340 {
341 if(nb >= m_max_blocks) {
342 T** new_blocks = FX_Alloc(T*, m_max_blocks + m_block_ptr_inc);
343 if(m_blocks) {
344 FXSYS_memcpy(new_blocks,
345 m_blocks,
346 m_num_blocks * sizeof(T*));
347 FX_Free(m_blocks);
348 }
349 m_blocks = new_blocks;
350 m_max_blocks += m_block_ptr_inc;
351 }
352 m_blocks[nb] = FX_Alloc(T, block_size);
353 m_num_blocks++;
354 }
355 template<class T, unsigned S>
data_ptr()356 inline T* pod_deque<T, S>::data_ptr()
357 {
358 unsigned nb = m_size >> block_shift;
359 if(nb >= m_num_blocks) {
360 allocate_block(nb);
361 }
362 return m_blocks[nb] + (m_size & block_mask);
363 }
364 template<class T, unsigned S>
add(const T & val)365 inline void pod_deque<T, S>::add(const T& val)
366 {
367 *data_ptr() = val;
368 ++m_size;
369 }
370 template<class T, unsigned S>
remove_last()371 inline void pod_deque<T, S>::remove_last()
372 {
373 if(m_size) {
374 --m_size;
375 }
376 }
377 template<class T, unsigned S>
modify_last(const T & val)378 void pod_deque<T, S>::modify_last(const T& val)
379 {
380 remove_last();
381 add(val);
382 }
383 template<class T, unsigned S>
allocate_continuous_block(unsigned num_elements)384 int pod_deque<T, S>::allocate_continuous_block(unsigned num_elements)
385 {
386 if(num_elements < block_size) {
387 data_ptr();
388 unsigned rest = block_size - (m_size & block_mask);
389 unsigned index;
390 if(num_elements <= rest) {
391 index = m_size;
392 m_size += num_elements;
393 return index;
394 }
395 m_size += rest;
396 data_ptr();
397 index = m_size;
398 m_size += num_elements;
399 return index;
400 }
401 return -1;
402 }
403 template<class T, unsigned S>
byte_size()404 unsigned pod_deque<T, S>::byte_size() const
405 {
406 return m_size * sizeof(T);
407 }
408 class pod_allocator
409 {
410 public:
remove_all()411 void remove_all()
412 {
413 if(m_num_blocks) {
414 int8u** blk = m_blocks + m_num_blocks - 1;
415 while(m_num_blocks--) {
416 FX_Free(*blk);
417 --blk;
418 }
419 FX_Free(m_blocks);
420 }
421 m_num_blocks = 0;
422 m_max_blocks = 0;
423 m_blocks = 0;
424 m_buf_ptr = 0;
425 m_rest = 0;
426 }
~pod_allocator()427 ~pod_allocator()
428 {
429 remove_all();
430 }
431 pod_allocator(unsigned block_size, unsigned block_ptr_inc = 256 - 8) :
m_block_size(block_size)432 m_block_size(block_size),
433 m_block_ptr_inc(block_ptr_inc),
434 m_num_blocks(0),
435 m_max_blocks(0),
436 m_blocks(0),
437 m_buf_ptr(0),
438 m_rest(0)
439 {
440 }
441 int8u* allocate(unsigned size, unsigned alignment = 1)
442 {
443 if(size == 0) {
444 return 0;
445 }
446 if(size <= m_rest) {
447 int8u* ptr = m_buf_ptr;
448 if(alignment > 1) {
449 unsigned align = (alignment - unsigned((size_t)ptr) % alignment) % alignment;
450 size += align;
451 ptr += align;
452 if(size <= m_rest) {
453 m_rest -= size;
454 m_buf_ptr += size;
455 return ptr;
456 }
457 allocate_block(size);
458 return allocate(size - align, alignment);
459 }
460 m_rest -= size;
461 m_buf_ptr += size;
462 return ptr;
463 }
464 allocate_block(size + alignment - 1);
465 return allocate(size, alignment);
466 }
467 private:
allocate_block(unsigned size)468 void allocate_block(unsigned size)
469 {
470 if(size < m_block_size) {
471 size = m_block_size;
472 }
473 if(m_num_blocks >= m_max_blocks) {
474 int8u** new_blocks = FX_Alloc(int8u*, m_max_blocks + m_block_ptr_inc);
475 if(m_blocks) {
476 FXSYS_memcpy(new_blocks,
477 m_blocks,
478 m_num_blocks * sizeof(int8u*));
479 FX_Free(m_blocks);
480 }
481 m_blocks = new_blocks;
482 m_max_blocks += m_block_ptr_inc;
483 }
484 m_blocks[m_num_blocks] = m_buf_ptr = FX_Alloc(int8u, size);
485 m_num_blocks++;
486 m_rest = size;
487 }
488 unsigned m_block_size;
489 unsigned m_block_ptr_inc;
490 unsigned m_num_blocks;
491 unsigned m_max_blocks;
492 int8u** m_blocks;
493 int8u* m_buf_ptr;
494 unsigned m_rest;
495 };
496 enum quick_sort_threshold_e {
497 quick_sort_threshold = 9
498 };
swap_elements(T & a,T & b)499 template<class T> inline void swap_elements(T& a, T& b)
500 {
501 T temp = a;
502 a = b;
503 b = temp;
504 }
505 }
506 #endif
507