1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "pipe/p_defines.h"
24 #include "util/u_framebuffer.h"
25 #include "util/u_upload_mgr.h"
26
27 #include "nvc0/nvc0_context.h"
28 #include "nvc0/nvc0_screen.h"
29 #include "nvc0/nvc0_resource.h"
30
31
32 #include "xf86drm.h"
33 #include "nouveau_drm.h"
34
35
36 static void
nvc0_svm_migrate(struct pipe_context * pipe,unsigned num_ptrs,const void * const * ptrs,const size_t * sizes,bool to_device,bool mem_undefined)37 nvc0_svm_migrate(struct pipe_context *pipe, unsigned num_ptrs,
38 const void* const* ptrs, const size_t *sizes,
39 bool to_device, bool mem_undefined)
40 {
41 struct nvc0_context *nvc0 = nvc0_context(pipe);
42 struct nouveau_screen *screen = &nvc0->screen->base;
43 int fd = screen->drm->fd;
44 unsigned i;
45
46 for (i = 0; i < num_ptrs; i++) {
47 struct drm_nouveau_svm_bind args;
48 uint64_t cmd, prio, target;
49
50 args.va_start = (uint64_t)(uintptr_t)ptrs[i];
51 if (sizes && sizes[i]) {
52 args.va_end = (uint64_t)(uintptr_t)ptrs[i] + sizes[i];
53 args.npages = DIV_ROUND_UP(args.va_end - args.va_start, 0x1000);
54 } else {
55 args.va_end = 0;
56 args.npages = 0;
57 }
58 args.stride = 0;
59
60 args.reserved0 = 0;
61 args.reserved1 = 0;
62
63 prio = 0;
64 cmd = NOUVEAU_SVM_BIND_COMMAND__MIGRATE;
65 target = to_device ? NOUVEAU_SVM_BIND_TARGET__GPU_VRAM : 0;
66
67 args.header = cmd << NOUVEAU_SVM_BIND_COMMAND_SHIFT;
68 args.header |= prio << NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
69 args.header |= target << NOUVEAU_SVM_BIND_TARGET_SHIFT;
70
71 /* This is best effort, so no garanty whatsoever */
72 drmCommandWrite(fd, DRM_NOUVEAU_SVM_BIND,
73 &args, sizeof(args));
74 }
75 }
76
77
78 static void
nvc0_flush(struct pipe_context * pipe,struct pipe_fence_handle ** fence,unsigned flags)79 nvc0_flush(struct pipe_context *pipe,
80 struct pipe_fence_handle **fence,
81 unsigned flags)
82 {
83 struct nvc0_context *nvc0 = nvc0_context(pipe);
84 struct nouveau_screen *screen = &nvc0->screen->base;
85
86 if (fence)
87 nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
88
89 PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */
90
91 nouveau_context_update_frame_stats(&nvc0->base);
92 }
93
94 static void
nvc0_texture_barrier(struct pipe_context * pipe,unsigned flags)95 nvc0_texture_barrier(struct pipe_context *pipe, unsigned flags)
96 {
97 struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
98
99 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
100 IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
101 }
102
103 static void
nvc0_memory_barrier(struct pipe_context * pipe,unsigned flags)104 nvc0_memory_barrier(struct pipe_context *pipe, unsigned flags)
105 {
106 struct nvc0_context *nvc0 = nvc0_context(pipe);
107 struct nouveau_pushbuf *push = nvc0->base.pushbuf;
108 int i, s;
109
110 if (!(flags & ~PIPE_BARRIER_UPDATE))
111 return;
112
113 if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
114 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
115 if (!nvc0->vtxbuf[i].buffer.resource && !nvc0->vtxbuf[i].is_user_buffer)
116 continue;
117 if (nvc0->vtxbuf[i].buffer.resource->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
118 nvc0->base.vbo_dirty = true;
119 }
120
121 for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
122 uint32_t valid = nvc0->constbuf_valid[s];
123
124 while (valid && !nvc0->cb_dirty) {
125 const unsigned i = ffs(valid) - 1;
126 struct pipe_resource *res;
127
128 valid &= ~(1 << i);
129 if (nvc0->constbuf[s][i].user)
130 continue;
131
132 res = nvc0->constbuf[s][i].u.buf;
133 if (!res)
134 continue;
135
136 if (res->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT)
137 nvc0->cb_dirty = true;
138 }
139 }
140 } else {
141 /* Pretty much any writing by shaders needs a serialize after
142 * it. Especially when moving between 3d and compute pipelines, but even
143 * without that.
144 */
145 IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
146 }
147
148 /* If we're going to texture from a buffer/image written by a shader, we
149 * must flush the texture cache.
150 */
151 if (flags & PIPE_BARRIER_TEXTURE)
152 IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
153
154 if (flags & PIPE_BARRIER_CONSTANT_BUFFER)
155 nvc0->cb_dirty = true;
156 if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER))
157 nvc0->base.vbo_dirty = true;
158 }
159
160 static void
nvc0_emit_string_marker(struct pipe_context * pipe,const char * str,int len)161 nvc0_emit_string_marker(struct pipe_context *pipe, const char *str, int len)
162 {
163 struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
164 int string_words = len / 4;
165 int data_words;
166
167 if (len <= 0)
168 return;
169 string_words = MIN2(string_words, NV04_PFIFO_MAX_PACKET_LEN);
170 if (string_words == NV04_PFIFO_MAX_PACKET_LEN)
171 data_words = string_words;
172 else
173 data_words = string_words + !!(len & 3);
174 BEGIN_NIC0(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
175 if (string_words)
176 PUSH_DATAp(push, str, string_words);
177 if (string_words != data_words) {
178 int data = 0;
179 memcpy(&data, &str[string_words * 4], len & 3);
180 PUSH_DATA (push, data);
181 }
182 }
183
184 static enum pipe_reset_status
nvc0_get_device_reset_status(struct pipe_context * pipe)185 nvc0_get_device_reset_status(struct pipe_context *pipe)
186 {
187 return PIPE_NO_RESET;
188 }
189
190 static void
nvc0_context_unreference_resources(struct nvc0_context * nvc0)191 nvc0_context_unreference_resources(struct nvc0_context *nvc0)
192 {
193 unsigned s, i;
194
195 nouveau_bufctx_del(&nvc0->bufctx_3d);
196 nouveau_bufctx_del(&nvc0->bufctx);
197 nouveau_bufctx_del(&nvc0->bufctx_cp);
198
199 util_unreference_framebuffer_state(&nvc0->framebuffer);
200
201 for (i = 0; i < nvc0->num_vtxbufs; ++i)
202 pipe_vertex_buffer_unreference(&nvc0->vtxbuf[i]);
203
204 for (s = 0; s < 6; ++s) {
205 for (i = 0; i < nvc0->num_textures[s]; ++i)
206 pipe_sampler_view_reference(&nvc0->textures[s][i], NULL);
207
208 for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i)
209 if (!nvc0->constbuf[s][i].user)
210 pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL);
211
212 for (i = 0; i < NVC0_MAX_BUFFERS; ++i)
213 pipe_resource_reference(&nvc0->buffers[s][i].buffer, NULL);
214
215 for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
216 pipe_resource_reference(&nvc0->images[s][i].resource, NULL);
217 if (nvc0->screen->base.class_3d >= GM107_3D_CLASS)
218 pipe_sampler_view_reference(&nvc0->images_tic[s][i], NULL);
219 }
220 }
221
222 for (s = 0; s < 2; ++s) {
223 for (i = 0; i < NVC0_MAX_SURFACE_SLOTS; ++i)
224 pipe_surface_reference(&nvc0->surfaces[s][i], NULL);
225 }
226
227 for (i = 0; i < nvc0->num_tfbbufs; ++i)
228 pipe_so_target_reference(&nvc0->tfbbuf[i], NULL);
229
230 for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *);
231 ++i) {
232 struct pipe_resource **res = util_dynarray_element(
233 &nvc0->global_residents, struct pipe_resource *, i);
234 pipe_resource_reference(res, NULL);
235 }
236 util_dynarray_fini(&nvc0->global_residents);
237
238 if (nvc0->tcp_empty)
239 nvc0->base.pipe.delete_tcs_state(&nvc0->base.pipe, nvc0->tcp_empty);
240 }
241
242 static void
nvc0_destroy(struct pipe_context * pipe)243 nvc0_destroy(struct pipe_context *pipe)
244 {
245 struct nvc0_context *nvc0 = nvc0_context(pipe);
246
247 if (nvc0->screen->cur_ctx == nvc0) {
248 nvc0->screen->cur_ctx = NULL;
249 nvc0->screen->save_state = nvc0->state;
250 nvc0->screen->save_state.tfb = NULL;
251 }
252
253 if (nvc0->base.pipe.stream_uploader)
254 u_upload_destroy(nvc0->base.pipe.stream_uploader);
255
256 /* Unset bufctx, we don't want to revalidate any resources after the flush.
257 * Other contexts will always set their bufctx again on action calls.
258 */
259 nouveau_pushbuf_bufctx(nvc0->base.pushbuf, NULL);
260 nouveau_pushbuf_kick(nvc0->base.pushbuf, nvc0->base.pushbuf->channel);
261
262 nvc0_context_unreference_resources(nvc0);
263 nvc0_blitctx_destroy(nvc0);
264
265 list_for_each_entry_safe(struct nvc0_resident, pos, &nvc0->tex_head, list) {
266 list_del(&pos->list);
267 free(pos);
268 }
269
270 list_for_each_entry_safe(struct nvc0_resident, pos, &nvc0->img_head, list) {
271 list_del(&pos->list);
272 free(pos);
273 }
274
275 nouveau_context_destroy(&nvc0->base);
276 }
277
278 void
nvc0_default_kick_notify(struct nouveau_pushbuf * push)279 nvc0_default_kick_notify(struct nouveau_pushbuf *push)
280 {
281 struct nvc0_screen *screen = push->user_priv;
282
283 if (screen) {
284 nouveau_fence_next(&screen->base);
285 nouveau_fence_update(&screen->base, true);
286 if (screen->cur_ctx)
287 screen->cur_ctx->state.flushed = true;
288 NOUVEAU_DRV_STAT(&screen->base, pushbuf_count, 1);
289 }
290 }
291
292 static int
nvc0_invalidate_resource_storage(struct nouveau_context * ctx,struct pipe_resource * res,int ref)293 nvc0_invalidate_resource_storage(struct nouveau_context *ctx,
294 struct pipe_resource *res,
295 int ref)
296 {
297 struct nvc0_context *nvc0 = nvc0_context(&ctx->pipe);
298 unsigned s, i;
299
300 if (res->bind & PIPE_BIND_RENDER_TARGET) {
301 for (i = 0; i < nvc0->framebuffer.nr_cbufs; ++i) {
302 if (nvc0->framebuffer.cbufs[i] &&
303 nvc0->framebuffer.cbufs[i]->texture == res) {
304 nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
305 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
306 if (!--ref)
307 return ref;
308 }
309 }
310 }
311 if (res->bind & PIPE_BIND_DEPTH_STENCIL) {
312 if (nvc0->framebuffer.zsbuf &&
313 nvc0->framebuffer.zsbuf->texture == res) {
314 nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
315 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
316 if (!--ref)
317 return ref;
318 }
319 }
320
321 if (res->target == PIPE_BUFFER) {
322 for (i = 0; i < nvc0->num_vtxbufs; ++i) {
323 if (nvc0->vtxbuf[i].buffer.resource == res) {
324 nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
325 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX);
326 if (!--ref)
327 return ref;
328 }
329 }
330
331 for (s = 0; s < 6; ++s) {
332 for (i = 0; i < nvc0->num_textures[s]; ++i) {
333 if (nvc0->textures[s][i] &&
334 nvc0->textures[s][i]->texture == res) {
335 nvc0->textures_dirty[s] |= 1 << i;
336 if (unlikely(s == 5)) {
337 nvc0->dirty_cp |= NVC0_NEW_CP_TEXTURES;
338 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_TEX(i));
339 } else {
340 nvc0->dirty_3d |= NVC0_NEW_3D_TEXTURES;
341 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_TEX(s, i));
342 }
343 if (!--ref)
344 return ref;
345 }
346 }
347 }
348
349 for (s = 0; s < 6; ++s) {
350 for (i = 0; i < NVC0_MAX_PIPE_CONSTBUFS; ++i) {
351 if (!(nvc0->constbuf_valid[s] & (1 << i)))
352 continue;
353 if (!nvc0->constbuf[s][i].user &&
354 nvc0->constbuf[s][i].u.buf == res) {
355 nvc0->constbuf_dirty[s] |= 1 << i;
356 if (unlikely(s == 5)) {
357 nvc0->dirty_cp |= NVC0_NEW_CP_CONSTBUF;
358 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_CB(i));
359 } else {
360 nvc0->dirty_3d |= NVC0_NEW_3D_CONSTBUF;
361 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_CB(s, i));
362 }
363 if (!--ref)
364 return ref;
365 }
366 }
367 }
368
369 for (s = 0; s < 6; ++s) {
370 for (i = 0; i < NVC0_MAX_BUFFERS; ++i) {
371 if (nvc0->buffers[s][i].buffer == res) {
372 nvc0->buffers_dirty[s] |= 1 << i;
373 if (unlikely(s == 5)) {
374 nvc0->dirty_cp |= NVC0_NEW_CP_BUFFERS;
375 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_BUF);
376 } else {
377 nvc0->dirty_3d |= NVC0_NEW_3D_BUFFERS;
378 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_BUF);
379 }
380 if (!--ref)
381 return ref;
382 }
383 }
384 }
385
386 for (s = 0; s < 6; ++s) {
387 for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
388 if (nvc0->images[s][i].resource == res) {
389 nvc0->images_dirty[s] |= 1 << i;
390 if (unlikely(s == 5)) {
391 nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
392 nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
393 } else {
394 nvc0->dirty_3d |= NVC0_NEW_3D_SURFACES;
395 nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_SUF);
396 }
397 }
398 if (!--ref)
399 return ref;
400 }
401 }
402 }
403
404 return ref;
405 }
406
407 static void
408 nvc0_context_get_sample_position(struct pipe_context *, unsigned, unsigned,
409 float *);
410
411 struct pipe_context *
nvc0_create(struct pipe_screen * pscreen,void * priv,unsigned ctxflags)412 nvc0_create(struct pipe_screen *pscreen, void *priv, unsigned ctxflags)
413 {
414 struct nvc0_screen *screen = nvc0_screen(pscreen);
415 struct nvc0_context *nvc0;
416 struct pipe_context *pipe;
417 int ret;
418 uint32_t flags;
419
420 nvc0 = CALLOC_STRUCT(nvc0_context);
421 if (!nvc0)
422 return NULL;
423 pipe = &nvc0->base.pipe;
424
425 if (!nvc0_blitctx_create(nvc0))
426 goto out_err;
427
428 nvc0->base.pushbuf = screen->base.pushbuf;
429 nvc0->base.client = screen->base.client;
430
431 ret = nouveau_bufctx_new(screen->base.client, 2, &nvc0->bufctx);
432 if (!ret)
433 ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_3D_COUNT,
434 &nvc0->bufctx_3d);
435 if (!ret)
436 ret = nouveau_bufctx_new(screen->base.client, NVC0_BIND_CP_COUNT,
437 &nvc0->bufctx_cp);
438 if (ret)
439 goto out_err;
440
441 nvc0->screen = screen;
442 nvc0->base.screen = &screen->base;
443
444 pipe->screen = pscreen;
445 pipe->priv = priv;
446 pipe->stream_uploader = u_upload_create_default(pipe);
447 if (!pipe->stream_uploader)
448 goto out_err;
449 pipe->const_uploader = pipe->stream_uploader;
450
451 pipe->destroy = nvc0_destroy;
452
453 pipe->draw_vbo = nvc0_draw_vbo;
454 pipe->clear = nvc0_clear;
455 pipe->launch_grid = (nvc0->screen->base.class_3d >= NVE4_3D_CLASS) ?
456 nve4_launch_grid : nvc0_launch_grid;
457
458 pipe->svm_migrate = nvc0_svm_migrate;
459
460 pipe->flush = nvc0_flush;
461 pipe->texture_barrier = nvc0_texture_barrier;
462 pipe->memory_barrier = nvc0_memory_barrier;
463 pipe->get_sample_position = nvc0_context_get_sample_position;
464 pipe->emit_string_marker = nvc0_emit_string_marker;
465 pipe->get_device_reset_status = nvc0_get_device_reset_status;
466
467 nouveau_context_init(&nvc0->base);
468 nvc0_init_query_functions(nvc0);
469 nvc0_init_surface_functions(nvc0);
470 nvc0_init_state_functions(nvc0);
471 nvc0_init_transfer_functions(nvc0);
472 nvc0_init_resource_functions(pipe);
473 if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
474 nvc0_init_bindless_functions(pipe);
475
476 list_inithead(&nvc0->tex_head);
477 list_inithead(&nvc0->img_head);
478
479 nvc0->base.invalidate_resource_storage = nvc0_invalidate_resource_storage;
480
481 pipe->create_video_codec = nvc0_create_decoder;
482 pipe->create_video_buffer = nvc0_video_buffer_create;
483
484 /* shader builtin library is per-screen, but we need a context for m2mf */
485 nvc0_program_library_upload(nvc0);
486 nvc0_program_init_tcp_empty(nvc0);
487 if (!nvc0->tcp_empty)
488 goto out_err;
489 /* set the empty tctl prog on next draw in case one is never set */
490 nvc0->dirty_3d |= NVC0_NEW_3D_TCTLPROG;
491
492 /* Do not bind the COMPUTE driver constbuf at screen initialization because
493 * CBs are aliased between 3D and COMPUTE, but make sure it will be bound if
494 * a grid is launched later. */
495 nvc0->dirty_cp |= NVC0_NEW_CP_DRIVERCONST;
496
497 /* now that there are no more opportunities for errors, set the current
498 * context if there isn't already one.
499 */
500 if (!screen->cur_ctx) {
501 nvc0->state = screen->save_state;
502 screen->cur_ctx = nvc0;
503 nouveau_pushbuf_bufctx(screen->base.pushbuf, nvc0->bufctx);
504 }
505 screen->base.pushbuf->kick_notify = nvc0_default_kick_notify;
506
507 /* add permanently resident buffers to bufctxts */
508
509 flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RD;
510
511 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->uniform_bo);
512 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->txc);
513 if (screen->compute) {
514 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->uniform_bo);
515 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->txc);
516 }
517
518 flags = NV_VRAM_DOMAIN(&screen->base) | NOUVEAU_BO_RDWR;
519
520 if (screen->poly_cache)
521 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->poly_cache);
522 if (screen->compute)
523 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->tls);
524
525 flags = NOUVEAU_BO_GART | NOUVEAU_BO_WR;
526
527 BCTX_REFN_bo(nvc0->bufctx_3d, 3D_SCREEN, flags, screen->fence.bo);
528 BCTX_REFN_bo(nvc0->bufctx, FENCE, flags, screen->fence.bo);
529 if (screen->compute)
530 BCTX_REFN_bo(nvc0->bufctx_cp, CP_SCREEN, flags, screen->fence.bo);
531
532 nvc0->base.scratch.bo_size = 2 << 20;
533
534 memset(nvc0->tex_handles, ~0, sizeof(nvc0->tex_handles));
535
536 util_dynarray_init(&nvc0->global_residents, NULL);
537
538 // Make sure that the first TSC entry has SRGB conversion bit set, since we
539 // use it as a fallback on Fermi for TXF, and on Kepler+ generations for
540 // FBFETCH handling (which also uses TXF).
541 //
542 // NOTE: Preliminary testing suggests that this isn't necessary at all at
543 // least on GM20x (untested on Kepler). However this is ~free, so no reason
544 // not to do it.
545 if (!screen->tsc.entries[0])
546 nvc0_upload_tsc0(nvc0);
547
548 // On Fermi, mark samplers dirty so that the proper binding can happen
549 if (screen->base.class_3d < NVE4_3D_CLASS) {
550 for (int s = 0; s < 6; s++)
551 nvc0->samplers_dirty[s] = 1;
552 nvc0->dirty_3d |= NVC0_NEW_3D_SAMPLERS;
553 nvc0->dirty_cp |= NVC0_NEW_CP_SAMPLERS;
554 }
555
556 return pipe;
557
558 out_err:
559 if (nvc0) {
560 if (pipe->stream_uploader)
561 u_upload_destroy(pipe->stream_uploader);
562 if (nvc0->bufctx_3d)
563 nouveau_bufctx_del(&nvc0->bufctx_3d);
564 if (nvc0->bufctx_cp)
565 nouveau_bufctx_del(&nvc0->bufctx_cp);
566 if (nvc0->bufctx)
567 nouveau_bufctx_del(&nvc0->bufctx);
568 FREE(nvc0->blit);
569 FREE(nvc0);
570 }
571 return NULL;
572 }
573
574 void
nvc0_bufctx_fence(struct nvc0_context * nvc0,struct nouveau_bufctx * bufctx,bool on_flush)575 nvc0_bufctx_fence(struct nvc0_context *nvc0, struct nouveau_bufctx *bufctx,
576 bool on_flush)
577 {
578 struct nouveau_list *list = on_flush ? &bufctx->current : &bufctx->pending;
579 struct nouveau_list *it;
580 NOUVEAU_DRV_STAT_IFD(unsigned count = 0);
581
582 for (it = list->next; it != list; it = it->next) {
583 struct nouveau_bufref *ref = (struct nouveau_bufref *)it;
584 struct nv04_resource *res = ref->priv;
585 if (res)
586 nvc0_resource_validate(res, (unsigned)ref->priv_data);
587 NOUVEAU_DRV_STAT_IFD(count++);
588 }
589 NOUVEAU_DRV_STAT(&nvc0->screen->base, resource_validate_count, count);
590 }
591
592 const void *
nvc0_get_sample_locations(unsigned sample_count)593 nvc0_get_sample_locations(unsigned sample_count)
594 {
595 static const uint8_t ms1[1][2] = { { 0x8, 0x8 } };
596 static const uint8_t ms2[2][2] = {
597 { 0x4, 0x4 }, { 0xc, 0xc } }; /* surface coords (0,0), (1,0) */
598 static const uint8_t ms4[4][2] = {
599 { 0x6, 0x2 }, { 0xe, 0x6 }, /* (0,0), (1,0) */
600 { 0x2, 0xa }, { 0xa, 0xe } }; /* (0,1), (1,1) */
601 static const uint8_t ms8[8][2] = {
602 { 0x1, 0x7 }, { 0x5, 0x3 }, /* (0,0), (1,0) */
603 { 0x3, 0xd }, { 0x7, 0xb }, /* (0,1), (1,1) */
604 { 0x9, 0x5 }, { 0xf, 0x1 }, /* (2,0), (3,0) */
605 { 0xb, 0xf }, { 0xd, 0x9 } }; /* (2,1), (3,1) */
606 #if 0
607 /* NOTE: there are alternative modes for MS2 and MS8, currently not used */
608 static const uint8_t ms8_alt[8][2] = {
609 { 0x9, 0x5 }, { 0x7, 0xb }, /* (2,0), (1,1) */
610 { 0xd, 0x9 }, { 0x5, 0x3 }, /* (3,1), (1,0) */
611 { 0x3, 0xd }, { 0x1, 0x7 }, /* (0,1), (0,0) */
612 { 0xb, 0xf }, { 0xf, 0x1 } }; /* (2,1), (3,0) */
613 #endif
614
615 const uint8_t (*ptr)[2];
616
617 switch (sample_count) {
618 case 0:
619 case 1: ptr = ms1; break;
620 case 2: ptr = ms2; break;
621 case 4: ptr = ms4; break;
622 case 8: ptr = ms8; break;
623 default:
624 assert(0);
625 return NULL; /* bad sample count -> undefined locations */
626 }
627 return ptr;
628 }
629
630 static void
nvc0_context_get_sample_position(struct pipe_context * pipe,unsigned sample_count,unsigned sample_index,float * xy)631 nvc0_context_get_sample_position(struct pipe_context *pipe,
632 unsigned sample_count, unsigned sample_index,
633 float *xy)
634 {
635 const uint8_t (*ptr)[2];
636
637 ptr = nvc0_get_sample_locations(sample_count);
638 if (!ptr)
639 return;
640
641 xy[0] = ptr[sample_index][0] * 0.0625f;
642 xy[1] = ptr[sample_index][1] * 0.0625f;
643 }
644