1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright 2007-2008 VMware, Inc.
5 * Copyright (C) 2010 LunarG Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28
29 #include "draw/draw_context.h"
30 #include "draw/draw_private.h"
31 #include "draw/draw_pt.h"
32
33 #define SEGMENT_SIZE 1024
34 #define MAP_SIZE 256
35
36 /* The largest possible index within an index buffer */
37 #define MAX_ELT_IDX 0xffffffff
38
39 struct vsplit_frontend {
40 struct draw_pt_front_end base;
41 struct draw_context *draw;
42
43 unsigned prim;
44
45 struct draw_pt_middle_end *middle;
46
47 unsigned max_vertices;
48 ushort segment_size;
49
50 /* buffers for splitting */
51 unsigned fetch_elts[SEGMENT_SIZE];
52 ushort draw_elts[SEGMENT_SIZE];
53 ushort identity_draw_elts[SEGMENT_SIZE];
54
55 struct {
56 /* map a fetch element to a draw element */
57 unsigned fetches[MAP_SIZE];
58 ushort draws[MAP_SIZE];
59 boolean has_max_fetch;
60
61 ushort num_fetch_elts;
62 ushort num_draw_elts;
63 } cache;
64 };
65
66
67 static void
vsplit_clear_cache(struct vsplit_frontend * vsplit)68 vsplit_clear_cache(struct vsplit_frontend *vsplit)
69 {
70 memset(vsplit->cache.fetches, 0xff, sizeof(vsplit->cache.fetches));
71 vsplit->cache.has_max_fetch = FALSE;
72 vsplit->cache.num_fetch_elts = 0;
73 vsplit->cache.num_draw_elts = 0;
74 }
75
76 static void
vsplit_flush_cache(struct vsplit_frontend * vsplit,unsigned flags)77 vsplit_flush_cache(struct vsplit_frontend *vsplit, unsigned flags)
78 {
79 vsplit->middle->run(vsplit->middle,
80 vsplit->fetch_elts, vsplit->cache.num_fetch_elts,
81 vsplit->draw_elts, vsplit->cache.num_draw_elts, flags);
82 }
83
84 /**
85 * Add a fetch element and add it to the draw elements.
86 */
87 static inline void
vsplit_add_cache(struct vsplit_frontend * vsplit,unsigned fetch)88 vsplit_add_cache(struct vsplit_frontend *vsplit, unsigned fetch)
89 {
90 unsigned hash;
91
92 hash = fetch % MAP_SIZE;
93
94 /* If the value isn't in the cache or it's an overflow due to the
95 * element bias */
96 if (vsplit->cache.fetches[hash] != fetch) {
97 /* update cache */
98 vsplit->cache.fetches[hash] = fetch;
99 vsplit->cache.draws[hash] = vsplit->cache.num_fetch_elts;
100
101 /* add fetch */
102 assert(vsplit->cache.num_fetch_elts < vsplit->segment_size);
103 vsplit->fetch_elts[vsplit->cache.num_fetch_elts++] = fetch;
104 }
105
106 vsplit->draw_elts[vsplit->cache.num_draw_elts++] = vsplit->cache.draws[hash];
107 }
108
109 /**
110 * Returns the base index to the elements array.
111 * The value is checked for integer overflow (not sure it can happen?).
112 */
113 static inline unsigned
vsplit_get_base_idx(unsigned start,unsigned fetch)114 vsplit_get_base_idx(unsigned start, unsigned fetch)
115 {
116 return draw_overflow_uadd(start, fetch, MAX_ELT_IDX);
117 }
118
119 /*
120 * The final element index is just element index plus element bias.
121 */
122 #define VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias) \
123 unsigned elt_idx; \
124 elt_idx = vsplit_get_base_idx(start, fetch); \
125 elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + (int)elt_bias);
126
127
128 static inline void
vsplit_add_cache_ubyte(struct vsplit_frontend * vsplit,const ubyte * elts,unsigned start,unsigned fetch,int elt_bias)129 vsplit_add_cache_ubyte(struct vsplit_frontend *vsplit, const ubyte *elts,
130 unsigned start, unsigned fetch, int elt_bias)
131 {
132 struct draw_context *draw = vsplit->draw;
133 VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
134 /* unlike the uint case this can only happen with elt_bias */
135 if (elt_bias && elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
136 unsigned hash = fetch % MAP_SIZE;
137 vsplit->cache.fetches[hash] = 0;
138 vsplit->cache.has_max_fetch = TRUE;
139 }
140 vsplit_add_cache(vsplit, elt_idx);
141 }
142
143 static inline void
vsplit_add_cache_ushort(struct vsplit_frontend * vsplit,const ushort * elts,unsigned start,unsigned fetch,int elt_bias)144 vsplit_add_cache_ushort(struct vsplit_frontend *vsplit, const ushort *elts,
145 unsigned start, unsigned fetch, int elt_bias)
146 {
147 struct draw_context *draw = vsplit->draw;
148 VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
149 /* unlike the uint case this can only happen with elt_bias */
150 if (elt_bias && elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
151 unsigned hash = fetch % MAP_SIZE;
152 vsplit->cache.fetches[hash] = 0;
153 vsplit->cache.has_max_fetch = TRUE;
154 }
155 vsplit_add_cache(vsplit, elt_idx);
156 }
157
158
159 /**
160 * Add a fetch element and add it to the draw elements. The fetch element is
161 * in full range (uint).
162 */
163 static inline void
vsplit_add_cache_uint(struct vsplit_frontend * vsplit,const uint * elts,unsigned start,unsigned fetch,int elt_bias)164 vsplit_add_cache_uint(struct vsplit_frontend *vsplit, const uint *elts,
165 unsigned start, unsigned fetch, int elt_bias)
166 {
167 struct draw_context *draw = vsplit->draw;
168 VSPLIT_CREATE_IDX(elts, start, fetch, elt_bias);
169 /* Take care for DRAW_MAX_FETCH_IDX (since cache is initialized to -1). */
170 if (elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
171 unsigned hash = fetch % MAP_SIZE;
172 /* force update - any value will do except DRAW_MAX_FETCH_IDX */
173 vsplit->cache.fetches[hash] = 0;
174 vsplit->cache.has_max_fetch = TRUE;
175 }
176 vsplit_add_cache(vsplit, elt_idx);
177 }
178
179
180 #define FUNC vsplit_run_linear
181 #include "draw_pt_vsplit_tmp.h"
182
183 #define FUNC vsplit_run_ubyte
184 #define ELT_TYPE ubyte
185 #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_ubyte(vsplit,ib,start,fetch,bias)
186 #include "draw_pt_vsplit_tmp.h"
187
188 #define FUNC vsplit_run_ushort
189 #define ELT_TYPE ushort
190 #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_ushort(vsplit,ib,start,fetch, bias)
191 #include "draw_pt_vsplit_tmp.h"
192
193 #define FUNC vsplit_run_uint
194 #define ELT_TYPE uint
195 #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_uint(vsplit, ib, start, fetch, bias)
196 #include "draw_pt_vsplit_tmp.h"
197
198
vsplit_prepare(struct draw_pt_front_end * frontend,unsigned in_prim,struct draw_pt_middle_end * middle,unsigned opt)199 static void vsplit_prepare(struct draw_pt_front_end *frontend,
200 unsigned in_prim,
201 struct draw_pt_middle_end *middle,
202 unsigned opt)
203 {
204 struct vsplit_frontend *vsplit = (struct vsplit_frontend *) frontend;
205
206 switch (vsplit->draw->pt.user.eltSize) {
207 case 0:
208 vsplit->base.run = vsplit_run_linear;
209 break;
210 case 1:
211 vsplit->base.run = vsplit_run_ubyte;
212 break;
213 case 2:
214 vsplit->base.run = vsplit_run_ushort;
215 break;
216 case 4:
217 vsplit->base.run = vsplit_run_uint;
218 break;
219 default:
220 assert(0);
221 break;
222 }
223
224 /* split only */
225 vsplit->prim = in_prim;
226
227 vsplit->middle = middle;
228 middle->prepare(middle, vsplit->prim, opt, &vsplit->max_vertices);
229
230 vsplit->segment_size = MIN2(SEGMENT_SIZE, vsplit->max_vertices);
231 }
232
233
vsplit_flush(struct draw_pt_front_end * frontend,unsigned flags)234 static void vsplit_flush(struct draw_pt_front_end *frontend, unsigned flags)
235 {
236 struct vsplit_frontend *vsplit = (struct vsplit_frontend *) frontend;
237
238 if (flags & DRAW_FLUSH_STATE_CHANGE) {
239 vsplit->middle->finish(vsplit->middle);
240 vsplit->middle = NULL;
241 }
242 }
243
244
vsplit_destroy(struct draw_pt_front_end * frontend)245 static void vsplit_destroy(struct draw_pt_front_end *frontend)
246 {
247 FREE(frontend);
248 }
249
250
draw_pt_vsplit(struct draw_context * draw)251 struct draw_pt_front_end *draw_pt_vsplit(struct draw_context *draw)
252 {
253 struct vsplit_frontend *vsplit = CALLOC_STRUCT(vsplit_frontend);
254 ushort i;
255
256 if (!vsplit)
257 return NULL;
258
259 vsplit->base.prepare = vsplit_prepare;
260 vsplit->base.run = NULL;
261 vsplit->base.flush = vsplit_flush;
262 vsplit->base.destroy = vsplit_destroy;
263 vsplit->draw = draw;
264
265 for (i = 0; i < SEGMENT_SIZE; i++)
266 vsplit->identity_draw_elts[i] = i;
267
268 return &vsplit->base;
269 }
270