1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright 2007-2008 VMware, Inc.
5 * Copyright (C) 2010 LunarG Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included
15 * in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28
29 #include "draw/draw_context.h"
30 #include "draw/draw_private.h"
31 #include "draw/draw_pt.h"
32
33 #define SEGMENT_SIZE 1024
34 #define MAP_SIZE 256
35
36 /* The largest possible index within an index buffer */
37 #define MAX_ELT_IDX 0xffffffff
38
39 struct vsplit_frontend {
40 struct draw_pt_front_end base;
41 struct draw_context *draw;
42
43 unsigned prim;
44
45 struct draw_pt_middle_end *middle;
46
47 unsigned max_vertices;
48 ushort segment_size;
49
50 /* buffers for splitting */
51 unsigned fetch_elts[SEGMENT_SIZE];
52 ushort draw_elts[SEGMENT_SIZE];
53 ushort identity_draw_elts[SEGMENT_SIZE];
54
55 struct {
56 /* map a fetch element to a draw element */
57 unsigned fetches[MAP_SIZE];
58 ushort draws[MAP_SIZE];
59 boolean has_max_fetch;
60
61 ushort num_fetch_elts;
62 ushort num_draw_elts;
63 } cache;
64 };
65
66
67 static void
vsplit_clear_cache(struct vsplit_frontend * vsplit)68 vsplit_clear_cache(struct vsplit_frontend *vsplit)
69 {
70 memset(vsplit->cache.fetches, 0xff, sizeof(vsplit->cache.fetches));
71 vsplit->cache.has_max_fetch = FALSE;
72 vsplit->cache.num_fetch_elts = 0;
73 vsplit->cache.num_draw_elts = 0;
74 }
75
76 static void
vsplit_flush_cache(struct vsplit_frontend * vsplit,unsigned flags)77 vsplit_flush_cache(struct vsplit_frontend *vsplit, unsigned flags)
78 {
79 vsplit->middle->run(vsplit->middle,
80 vsplit->fetch_elts, vsplit->cache.num_fetch_elts,
81 vsplit->draw_elts, vsplit->cache.num_draw_elts, flags);
82 }
83
84 /**
85 * Add a fetch element and add it to the draw elements.
86 */
87 static inline void
vsplit_add_cache(struct vsplit_frontend * vsplit,unsigned fetch)88 vsplit_add_cache(struct vsplit_frontend *vsplit, unsigned fetch)
89 {
90 unsigned hash;
91
92 hash = fetch % MAP_SIZE;
93
94 /* If the value isn't in the cache or it's an overflow due to the
95 * element bias */
96 if (vsplit->cache.fetches[hash] != fetch) {
97 /* update cache */
98 vsplit->cache.fetches[hash] = fetch;
99 vsplit->cache.draws[hash] = vsplit->cache.num_fetch_elts;
100
101 /* add fetch */
102 assert(vsplit->cache.num_fetch_elts < vsplit->segment_size);
103 vsplit->fetch_elts[vsplit->cache.num_fetch_elts++] = fetch;
104 }
105
106 vsplit->draw_elts[vsplit->cache.num_draw_elts++] = vsplit->cache.draws[hash];
107 }
108
109 /**
110 * Returns the base index to the elements array.
111 * The value is checked for integer overflow (not sure it can happen?).
112 */
113 static inline unsigned
vsplit_get_base_idx(unsigned start,unsigned fetch)114 vsplit_get_base_idx(unsigned start, unsigned fetch)
115 {
116 return draw_overflow_uadd(start, fetch, MAX_ELT_IDX);
117 }
118
119
120 static inline void
vsplit_add_cache_ubyte(struct vsplit_frontend * vsplit,const ubyte * elts,unsigned start,unsigned fetch,int elt_bias)121 vsplit_add_cache_ubyte(struct vsplit_frontend *vsplit, const ubyte *elts,
122 unsigned start, unsigned fetch, int elt_bias)
123 {
124 struct draw_context *draw = vsplit->draw;
125 unsigned elt_idx;
126 elt_idx = vsplit_get_base_idx(start, fetch);
127 elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + elt_bias);
128 /* unlike the uint case this can only happen with elt_bias */
129 if (elt_bias && elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
130 unsigned hash = elt_idx % MAP_SIZE;
131 vsplit->cache.fetches[hash] = 0;
132 vsplit->cache.has_max_fetch = TRUE;
133 }
134 vsplit_add_cache(vsplit, elt_idx);
135 }
136
137 static inline void
vsplit_add_cache_ushort(struct vsplit_frontend * vsplit,const ushort * elts,unsigned start,unsigned fetch,int elt_bias)138 vsplit_add_cache_ushort(struct vsplit_frontend *vsplit, const ushort *elts,
139 unsigned start, unsigned fetch, int elt_bias)
140 {
141 struct draw_context *draw = vsplit->draw;
142 unsigned elt_idx;
143 elt_idx = vsplit_get_base_idx(start, fetch);
144 elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + elt_bias);
145 /* unlike the uint case this can only happen with elt_bias */
146 if (elt_bias && elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
147 unsigned hash = elt_idx % MAP_SIZE;
148 vsplit->cache.fetches[hash] = 0;
149 vsplit->cache.has_max_fetch = TRUE;
150 }
151 vsplit_add_cache(vsplit, elt_idx);
152 }
153
154
155 /**
156 * Add a fetch element and add it to the draw elements. The fetch element is
157 * in full range (uint).
158 */
159 static inline void
vsplit_add_cache_uint(struct vsplit_frontend * vsplit,const uint * elts,unsigned start,unsigned fetch,int elt_bias)160 vsplit_add_cache_uint(struct vsplit_frontend *vsplit, const uint *elts,
161 unsigned start, unsigned fetch, int elt_bias)
162 {
163 struct draw_context *draw = vsplit->draw;
164 unsigned elt_idx;
165 /*
166 * The final element index is just element index plus element bias.
167 */
168 elt_idx = vsplit_get_base_idx(start, fetch);
169 elt_idx = (unsigned)((int)(DRAW_GET_IDX(elts, elt_idx)) + elt_bias);
170 /* Take care for DRAW_MAX_FETCH_IDX (since cache is initialized to -1). */
171 if (elt_idx == DRAW_MAX_FETCH_IDX && !vsplit->cache.has_max_fetch) {
172 unsigned hash = elt_idx % MAP_SIZE;
173 /* force update - any value will do except DRAW_MAX_FETCH_IDX */
174 vsplit->cache.fetches[hash] = 0;
175 vsplit->cache.has_max_fetch = TRUE;
176 }
177 vsplit_add_cache(vsplit, elt_idx);
178 }
179
180
181 #define FUNC vsplit_run_linear
182 #include "draw_pt_vsplit_tmp.h"
183
184 #define FUNC vsplit_run_ubyte
185 #define ELT_TYPE ubyte
186 #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_ubyte(vsplit,ib,start,fetch,bias)
187 #include "draw_pt_vsplit_tmp.h"
188
189 #define FUNC vsplit_run_ushort
190 #define ELT_TYPE ushort
191 #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_ushort(vsplit,ib,start,fetch, bias)
192 #include "draw_pt_vsplit_tmp.h"
193
194 #define FUNC vsplit_run_uint
195 #define ELT_TYPE uint
196 #define ADD_CACHE(vsplit, ib, start, fetch, bias) vsplit_add_cache_uint(vsplit, ib, start, fetch, bias)
197 #include "draw_pt_vsplit_tmp.h"
198
199
vsplit_prepare(struct draw_pt_front_end * frontend,unsigned in_prim,struct draw_pt_middle_end * middle,unsigned opt)200 static void vsplit_prepare(struct draw_pt_front_end *frontend,
201 unsigned in_prim,
202 struct draw_pt_middle_end *middle,
203 unsigned opt)
204 {
205 struct vsplit_frontend *vsplit = (struct vsplit_frontend *) frontend;
206
207 switch (vsplit->draw->pt.user.eltSize) {
208 case 0:
209 vsplit->base.run = vsplit_run_linear;
210 break;
211 case 1:
212 vsplit->base.run = vsplit_run_ubyte;
213 break;
214 case 2:
215 vsplit->base.run = vsplit_run_ushort;
216 break;
217 case 4:
218 vsplit->base.run = vsplit_run_uint;
219 break;
220 default:
221 assert(0);
222 break;
223 }
224
225 /* split only */
226 vsplit->prim = in_prim;
227
228 vsplit->middle = middle;
229 middle->prepare(middle, vsplit->prim, opt, &vsplit->max_vertices);
230
231 vsplit->segment_size = MIN2(SEGMENT_SIZE, vsplit->max_vertices);
232 }
233
234
vsplit_flush(struct draw_pt_front_end * frontend,unsigned flags)235 static void vsplit_flush(struct draw_pt_front_end *frontend, unsigned flags)
236 {
237 struct vsplit_frontend *vsplit = (struct vsplit_frontend *) frontend;
238
239 if (flags & DRAW_FLUSH_STATE_CHANGE) {
240 vsplit->middle->finish(vsplit->middle);
241 vsplit->middle = NULL;
242 }
243 }
244
245
vsplit_destroy(struct draw_pt_front_end * frontend)246 static void vsplit_destroy(struct draw_pt_front_end *frontend)
247 {
248 FREE(frontend);
249 }
250
251
draw_pt_vsplit(struct draw_context * draw)252 struct draw_pt_front_end *draw_pt_vsplit(struct draw_context *draw)
253 {
254 struct vsplit_frontend *vsplit = CALLOC_STRUCT(vsplit_frontend);
255 ushort i;
256
257 if (!vsplit)
258 return NULL;
259
260 vsplit->base.prepare = vsplit_prepare;
261 vsplit->base.run = NULL;
262 vsplit->base.flush = vsplit_flush;
263 vsplit->base.destroy = vsplit_destroy;
264 vsplit->draw = draw;
265
266 for (i = 0; i < SEGMENT_SIZE; i++)
267 vsplit->identity_draw_elts[i] = i;
268
269 return &vsplit->base;
270 }
271