1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "util/u_inlines.h"
27 #include "util/u_memory.h"
28 #include "pipe/p_defines.h"
29 #include "util/u_math.h"
30
31 #include "svga_resource_texture.h"
32 #include "svga_sampler_view.h"
33 #include "svga_winsys.h"
34 #include "svga_context.h"
35 #include "svga_shader.h"
36 #include "svga_state.h"
37 #include "svga_cmd.h"
38
39
40 /**
41 * Called when tearing down a context to free resources and samplers.
42 */
43 void
svga_cleanup_tss_binding(struct svga_context * svga)44 svga_cleanup_tss_binding(struct svga_context *svga)
45 {
46 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
47 unsigned i;
48
49 for (i = 0; i < ARRAY_SIZE(svga->state.hw_draw.views); i++) {
50 struct svga_hw_view_state *view = &svga->state.hw_draw.views[i];
51 if (view) {
52 svga_sampler_view_reference(&view->v, NULL);
53 pipe_sampler_view_reference(&svga->curr.sampler_views[shader][i],
54 NULL);
55 pipe_resource_reference(&view->texture, NULL);
56 view->dirty = TRUE;
57 }
58 }
59 }
60
61
62 struct bind_queue {
63 struct {
64 unsigned unit;
65 struct svga_hw_view_state *view;
66 } bind[PIPE_MAX_SAMPLERS];
67
68 unsigned bind_count;
69 };
70
71
72 /**
73 * Update the texture binding for one texture unit.
74 */
75 static void
emit_tex_binding_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * s,const struct pipe_sampler_view * sv,struct svga_hw_view_state * view,boolean reemit,struct bind_queue * queue)76 emit_tex_binding_unit(struct svga_context *svga,
77 unsigned unit,
78 const struct svga_sampler_state *s,
79 const struct pipe_sampler_view *sv,
80 struct svga_hw_view_state *view,
81 boolean reemit,
82 struct bind_queue *queue)
83 {
84 struct pipe_resource *texture = NULL;
85 unsigned last_level, min_lod, max_lod;
86
87 /* get min max lod */
88 if (sv && s) {
89 if (s->mipfilter == SVGA3D_TEX_FILTER_NONE) {
90 /* just use the base level image */
91 min_lod = max_lod = sv->u.tex.first_level;
92 }
93 else {
94 last_level = MIN2(sv->u.tex.last_level, sv->texture->last_level);
95 min_lod = s->view_min_lod + sv->u.tex.first_level;
96 min_lod = MIN2(min_lod, last_level);
97 max_lod = MIN2(s->view_max_lod + sv->u.tex.first_level, last_level);
98 }
99 texture = sv->texture;
100 }
101 else {
102 min_lod = 0;
103 max_lod = 0;
104 }
105
106 if (view->texture != texture ||
107 view->min_lod != min_lod ||
108 view->max_lod != max_lod) {
109
110 svga_sampler_view_reference(&view->v, NULL);
111 pipe_resource_reference(&view->texture, texture);
112
113 view->dirty = TRUE;
114 view->min_lod = min_lod;
115 view->max_lod = max_lod;
116
117 if (texture) {
118 view->v = svga_get_tex_sampler_view(&svga->pipe,
119 texture,
120 min_lod,
121 max_lod);
122 }
123 }
124
125 /*
126 * We need to reemit non-null texture bindings, even when they are not
127 * dirty, to ensure that the resources are paged in.
128 */
129 if (view->dirty || (reemit && view->v)) {
130 queue->bind[queue->bind_count].unit = unit;
131 queue->bind[queue->bind_count].view = view;
132 queue->bind_count++;
133 }
134
135 if (!view->dirty && view->v) {
136 svga_validate_sampler_view(svga, view->v);
137 }
138 }
139
140
141 static enum pipe_error
update_tss_binding(struct svga_context * svga,uint64_t dirty)142 update_tss_binding(struct svga_context *svga, uint64_t dirty )
143 {
144 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
145 boolean reemit = svga->rebind.flags.texture_samplers;
146 unsigned i;
147 unsigned count = MAX2(svga->curr.num_sampler_views[shader],
148 svga->state.hw_draw.num_views);
149
150 struct bind_queue queue;
151
152 assert(!svga_have_vgpu10(svga));
153
154 queue.bind_count = 0;
155
156 for (i = 0; i < count; i++) {
157 emit_tex_binding_unit(svga, i,
158 svga->curr.sampler[shader][i],
159 svga->curr.sampler_views[shader][i],
160 &svga->state.hw_draw.views[i],
161 reemit,
162 &queue);
163 }
164
165 svga->state.hw_draw.num_views = svga->curr.num_sampler_views[shader];
166
167 /* Polygon stipple */
168 if (svga->curr.rast->templ.poly_stipple_enable) {
169 const unsigned unit =
170 svga_fs_variant(svga->state.hw_draw.fs)->pstipple_sampler_unit;
171 emit_tex_binding_unit(svga, unit,
172 svga->polygon_stipple.sampler,
173 &svga->polygon_stipple.sampler_view->base,
174 &svga->state.hw_draw.views[unit],
175 reemit,
176 &queue);
177 }
178
179 svga->state.hw_draw.num_backed_views = 0;
180
181 if (queue.bind_count) {
182 SVGA3dTextureState *ts;
183
184 if (SVGA3D_BeginSetTextureState(svga->swc, &ts,
185 queue.bind_count) != PIPE_OK)
186 goto fail;
187
188 for (i = 0; i < queue.bind_count; i++) {
189 struct svga_winsys_surface *handle;
190 struct svga_hw_view_state *view = queue.bind[i].view;
191
192 ts[i].stage = queue.bind[i].unit;
193 ts[i].name = SVGA3D_TS_BIND_TEXTURE;
194
195 if (view->v) {
196 handle = view->v->handle;
197
198 /* Keep track of number of views with a backing copy
199 * of texture.
200 */
201 if (handle != svga_texture(view->texture)->handle)
202 svga->state.hw_draw.num_backed_views++;
203 }
204 else {
205 handle = NULL;
206 }
207 svga->swc->surface_relocation(svga->swc,
208 &ts[i].value,
209 NULL,
210 handle,
211 SVGA_RELOC_READ);
212
213 queue.bind[i].view->dirty = FALSE;
214 }
215
216 SVGA_FIFOCommitAll(svga->swc);
217 }
218
219 svga->rebind.flags.texture_samplers = FALSE;
220
221 return PIPE_OK;
222
223 fail:
224 return PIPE_ERROR_OUT_OF_MEMORY;
225 }
226
227
228 /*
229 * Rebind textures.
230 *
231 * Similar to update_tss_binding, but without any state checking/update.
232 *
233 * Called at the beginning of every new command buffer to ensure that
234 * non-dirty textures are properly paged-in.
235 */
236 enum pipe_error
svga_reemit_tss_bindings(struct svga_context * svga)237 svga_reemit_tss_bindings(struct svga_context *svga)
238 {
239 unsigned i;
240 enum pipe_error ret;
241 struct bind_queue queue;
242
243 assert(!svga_have_vgpu10(svga));
244 assert(svga->rebind.flags.texture_samplers);
245
246 queue.bind_count = 0;
247
248 for (i = 0; i < svga->state.hw_draw.num_views; i++) {
249 struct svga_hw_view_state *view = &svga->state.hw_draw.views[i];
250
251 if (view->v) {
252 queue.bind[queue.bind_count].unit = i;
253 queue.bind[queue.bind_count].view = view;
254 queue.bind_count++;
255 }
256 }
257
258 /* Polygon stipple */
259 if (svga->curr.rast && svga->curr.rast->templ.poly_stipple_enable) {
260 const unsigned unit =
261 svga_fs_variant(svga->state.hw_draw.fs)->pstipple_sampler_unit;
262 struct svga_hw_view_state *view = &svga->state.hw_draw.views[unit];
263
264 if (view->v) {
265 queue.bind[queue.bind_count].unit = unit;
266 queue.bind[queue.bind_count].view = view;
267 queue.bind_count++;
268 }
269 }
270
271 if (queue.bind_count) {
272 SVGA3dTextureState *ts;
273
274 ret = SVGA3D_BeginSetTextureState(svga->swc, &ts, queue.bind_count);
275 if (ret != PIPE_OK) {
276 return ret;
277 }
278
279 for (i = 0; i < queue.bind_count; i++) {
280 struct svga_winsys_surface *handle;
281
282 ts[i].stage = queue.bind[i].unit;
283 ts[i].name = SVGA3D_TS_BIND_TEXTURE;
284
285 assert(queue.bind[i].view->v);
286 handle = queue.bind[i].view->v->handle;
287 svga->swc->surface_relocation(svga->swc,
288 &ts[i].value,
289 NULL,
290 handle,
291 SVGA_RELOC_READ);
292 }
293
294 SVGA_FIFOCommitAll(svga->swc);
295 }
296
297 svga->rebind.flags.texture_samplers = FALSE;
298
299 return PIPE_OK;
300 }
301
302
303 struct svga_tracked_state svga_hw_tss_binding = {
304 "texture binding emit",
305 SVGA_NEW_FRAME_BUFFER |
306 SVGA_NEW_TEXTURE_BINDING |
307 SVGA_NEW_STIPPLE |
308 SVGA_NEW_SAMPLER,
309 update_tss_binding
310 };
311
312
313
314 struct ts_queue {
315 unsigned ts_count;
316 SVGA3dTextureState ts[PIPE_MAX_SAMPLERS*SVGA3D_TS_MAX];
317 };
318
319
320 static inline void
svga_queue_tss(struct ts_queue * q,unsigned unit,unsigned tss,unsigned value)321 svga_queue_tss(struct ts_queue *q, unsigned unit, unsigned tss, unsigned value)
322 {
323 assert(q->ts_count < ARRAY_SIZE(q->ts));
324 q->ts[q->ts_count].stage = unit;
325 q->ts[q->ts_count].name = tss;
326 q->ts[q->ts_count].value = value;
327 q->ts_count++;
328 }
329
330
331 #define EMIT_TS(svga, unit, val, token) \
332 do { \
333 assert(unit < ARRAY_SIZE(svga->state.hw_draw.ts)); \
334 STATIC_ASSERT(SVGA3D_TS_##token < ARRAY_SIZE(svga->state.hw_draw.ts[unit])); \
335 if (svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] != val) { \
336 svga_queue_tss(queue, unit, SVGA3D_TS_##token, val); \
337 svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] = val; \
338 } \
339 } while (0)
340
341 #define EMIT_TS_FLOAT(svga, unit, fvalue, token) \
342 do { \
343 unsigned val = fui(fvalue); \
344 assert(unit < ARRAY_SIZE(svga->state.hw_draw.ts)); \
345 STATIC_ASSERT(SVGA3D_TS_##token < ARRAY_SIZE(svga->state.hw_draw.ts[unit])); \
346 if (svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] != val) { \
347 svga_queue_tss(queue, unit, SVGA3D_TS_##token, val); \
348 svga->state.hw_draw.ts[unit][SVGA3D_TS_##token] = val; \
349 } \
350 } while (0)
351
352
353 /**
354 * Emit texture sampler state (tss) for one texture unit.
355 */
356 static void
emit_tss_unit(struct svga_context * svga,unsigned unit,const struct svga_sampler_state * state,struct ts_queue * queue)357 emit_tss_unit(struct svga_context *svga, unsigned unit,
358 const struct svga_sampler_state *state,
359 struct ts_queue *queue)
360 {
361 EMIT_TS(svga, unit, state->mipfilter, MIPFILTER);
362 EMIT_TS(svga, unit, state->min_lod, TEXTURE_MIPMAP_LEVEL);
363 EMIT_TS(svga, unit, state->magfilter, MAGFILTER);
364 EMIT_TS(svga, unit, state->minfilter, MINFILTER);
365 EMIT_TS(svga, unit, state->aniso_level, TEXTURE_ANISOTROPIC_LEVEL);
366 EMIT_TS_FLOAT(svga, unit, state->lod_bias, TEXTURE_LOD_BIAS);
367 EMIT_TS(svga, unit, state->addressu, ADDRESSU);
368 EMIT_TS(svga, unit, state->addressw, ADDRESSW);
369 EMIT_TS(svga, unit, state->bordercolor, BORDERCOLOR);
370 // TEXCOORDINDEX -- hopefully not needed
371
372 if (svga->curr.tex_flags.flag_1d & (1 << unit))
373 EMIT_TS(svga, unit, SVGA3D_TEX_ADDRESS_WRAP, ADDRESSV);
374 else
375 EMIT_TS(svga, unit, state->addressv, ADDRESSV);
376
377 if (svga->curr.tex_flags.flag_srgb & (1 << unit))
378 EMIT_TS_FLOAT(svga, unit, 2.2f, GAMMA);
379 else
380 EMIT_TS_FLOAT(svga, unit, 1.0f, GAMMA);
381 }
382
383 static enum pipe_error
update_tss(struct svga_context * svga,uint64_t dirty)384 update_tss(struct svga_context *svga, uint64_t dirty )
385 {
386 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
387 unsigned i;
388 struct ts_queue queue;
389
390 assert(!svga_have_vgpu10(svga));
391
392 queue.ts_count = 0;
393 for (i = 0; i < svga->curr.num_samplers[shader]; i++) {
394 if (svga->curr.sampler[shader][i]) {
395 const struct svga_sampler_state *curr = svga->curr.sampler[shader][i];
396 emit_tss_unit(svga, i, curr, &queue);
397 }
398 }
399
400 /* polygon stipple sampler */
401 if (svga->curr.rast->templ.poly_stipple_enable) {
402 emit_tss_unit(svga,
403 svga_fs_variant(svga->state.hw_draw.fs)->pstipple_sampler_unit,
404 svga->polygon_stipple.sampler,
405 &queue);
406 }
407
408 if (queue.ts_count) {
409 SVGA3dTextureState *ts;
410
411 if (SVGA3D_BeginSetTextureState(svga->swc, &ts, queue.ts_count) != PIPE_OK)
412 goto fail;
413
414 memcpy(ts, queue.ts, queue.ts_count * sizeof queue.ts[0]);
415
416 SVGA_FIFOCommitAll(svga->swc);
417 }
418
419 return PIPE_OK;
420
421 fail:
422 /* XXX: need to poison cached hardware state on failure to ensure
423 * dirty state gets re-emitted. Fix this by re-instating partial
424 * FIFOCommit command and only updating cached hw state once the
425 * initial allocation has succeeded.
426 */
427 memset(svga->state.hw_draw.ts, 0xcd, sizeof(svga->state.hw_draw.ts));
428
429 return PIPE_ERROR_OUT_OF_MEMORY;
430 }
431
432
433 struct svga_tracked_state svga_hw_tss = {
434 "texture state emit",
435 (SVGA_NEW_SAMPLER |
436 SVGA_NEW_STIPPLE |
437 SVGA_NEW_TEXTURE_FLAGS),
438 update_tss
439 };
440
441