1 /*
2 * Copyright © 2014-2018 NVIDIA Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include <stdlib.h>
26
27 #include "util/u_debug.h"
28 #include "util/u_draw.h"
29 #include "util/u_inlines.h"
30 #include "util/u_upload_mgr.h"
31
32 #include "tegra_context.h"
33 #include "tegra_resource.h"
34 #include "tegra_screen.h"
35
36 static void
tegra_destroy(struct pipe_context * pcontext)37 tegra_destroy(struct pipe_context *pcontext)
38 {
39 struct tegra_context *context = to_tegra_context(pcontext);
40
41 if (context->base.stream_uploader)
42 u_upload_destroy(context->base.stream_uploader);
43
44 context->gpu->destroy(context->gpu);
45 free(context);
46 }
47
48 static void
tegra_draw_vbo(struct pipe_context * pcontext,const struct pipe_draw_info * pinfo,unsigned drawid_offset,const struct pipe_draw_indirect_info * pindirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)49 tegra_draw_vbo(struct pipe_context *pcontext,
50 const struct pipe_draw_info *pinfo,
51 unsigned drawid_offset,
52 const struct pipe_draw_indirect_info *pindirect,
53 const struct pipe_draw_start_count_bias *draws,
54 unsigned num_draws)
55 {
56 if (num_draws > 1) {
57 util_draw_multi(pcontext, pinfo, drawid_offset, pindirect, draws, num_draws);
58 return;
59 }
60
61 if (!pindirect && (!draws[0].count || !pinfo->instance_count))
62 return;
63
64 struct tegra_context *context = to_tegra_context(pcontext);
65 struct pipe_draw_indirect_info indirect;
66 struct pipe_draw_info info;
67
68 if (pinfo && ((pindirect && pindirect->buffer) || pinfo->index_size)) {
69 memcpy(&info, pinfo, sizeof(info));
70
71 if (pindirect && pindirect->buffer) {
72 memcpy(&indirect, pindirect, sizeof(indirect));
73 indirect.buffer = tegra_resource_unwrap(pindirect->buffer);
74 indirect.indirect_draw_count = tegra_resource_unwrap(pindirect->indirect_draw_count);
75 pindirect = &indirect;
76 }
77
78 if (pinfo->index_size && !pinfo->has_user_indices)
79 info.index.resource = tegra_resource_unwrap(info.index.resource);
80
81 pinfo = &info;
82 }
83
84 context->gpu->draw_vbo(context->gpu, pinfo, drawid_offset, pindirect, draws, num_draws);
85 }
86
87 static void
tegra_render_condition(struct pipe_context * pcontext,struct pipe_query * query,bool condition,unsigned int mode)88 tegra_render_condition(struct pipe_context *pcontext,
89 struct pipe_query *query,
90 bool condition,
91 unsigned int mode)
92 {
93 struct tegra_context *context = to_tegra_context(pcontext);
94
95 context->gpu->render_condition(context->gpu, query, condition, mode);
96 }
97
98 static struct pipe_query *
tegra_create_query(struct pipe_context * pcontext,unsigned int query_type,unsigned int index)99 tegra_create_query(struct pipe_context *pcontext, unsigned int query_type,
100 unsigned int index)
101 {
102 struct tegra_context *context = to_tegra_context(pcontext);
103
104 return context->gpu->create_query(context->gpu, query_type, index);
105 }
106
107 static struct pipe_query *
tegra_create_batch_query(struct pipe_context * pcontext,unsigned int num_queries,unsigned int * queries)108 tegra_create_batch_query(struct pipe_context *pcontext,
109 unsigned int num_queries,
110 unsigned int *queries)
111 {
112 struct tegra_context *context = to_tegra_context(pcontext);
113
114 return context->gpu->create_batch_query(context->gpu, num_queries,
115 queries);
116 }
117
118 static void
tegra_destroy_query(struct pipe_context * pcontext,struct pipe_query * query)119 tegra_destroy_query(struct pipe_context *pcontext, struct pipe_query *query)
120 {
121 struct tegra_context *context = to_tegra_context(pcontext);
122
123 context->gpu->destroy_query(context->gpu, query);
124 }
125
126 static bool
tegra_begin_query(struct pipe_context * pcontext,struct pipe_query * query)127 tegra_begin_query(struct pipe_context *pcontext, struct pipe_query *query)
128 {
129 struct tegra_context *context = to_tegra_context(pcontext);
130
131 return context->gpu->begin_query(context->gpu, query);
132 }
133
134 static bool
tegra_end_query(struct pipe_context * pcontext,struct pipe_query * query)135 tegra_end_query(struct pipe_context *pcontext, struct pipe_query *query)
136 {
137 struct tegra_context *context = to_tegra_context(pcontext);
138
139 return context->gpu->end_query(context->gpu, query);
140 }
141
142 static bool
tegra_get_query_result(struct pipe_context * pcontext,struct pipe_query * query,bool wait,union pipe_query_result * result)143 tegra_get_query_result(struct pipe_context *pcontext,
144 struct pipe_query *query,
145 bool wait,
146 union pipe_query_result *result)
147 {
148 struct tegra_context *context = to_tegra_context(pcontext);
149
150 return context->gpu->get_query_result(context->gpu, query, wait,
151 result);
152 }
153
154 static void
tegra_get_query_result_resource(struct pipe_context * pcontext,struct pipe_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned int offset)155 tegra_get_query_result_resource(struct pipe_context *pcontext,
156 struct pipe_query *query,
157 enum pipe_query_flags flags,
158 enum pipe_query_value_type result_type,
159 int index,
160 struct pipe_resource *resource,
161 unsigned int offset)
162 {
163 struct tegra_context *context = to_tegra_context(pcontext);
164
165 context->gpu->get_query_result_resource(context->gpu, query, flags,
166 result_type, index, resource,
167 offset);
168 }
169
170 static void
tegra_set_active_query_state(struct pipe_context * pcontext,bool enable)171 tegra_set_active_query_state(struct pipe_context *pcontext, bool enable)
172 {
173 struct tegra_context *context = to_tegra_context(pcontext);
174
175 context->gpu->set_active_query_state(context->gpu, enable);
176 }
177
178 static void *
tegra_create_blend_state(struct pipe_context * pcontext,const struct pipe_blend_state * cso)179 tegra_create_blend_state(struct pipe_context *pcontext,
180 const struct pipe_blend_state *cso)
181 {
182 struct tegra_context *context = to_tegra_context(pcontext);
183
184 return context->gpu->create_blend_state(context->gpu, cso);
185 }
186
187 static void
tegra_bind_blend_state(struct pipe_context * pcontext,void * so)188 tegra_bind_blend_state(struct pipe_context *pcontext, void *so)
189 {
190 struct tegra_context *context = to_tegra_context(pcontext);
191
192 context->gpu->bind_blend_state(context->gpu, so);
193 }
194
195 static void
tegra_delete_blend_state(struct pipe_context * pcontext,void * so)196 tegra_delete_blend_state(struct pipe_context *pcontext, void *so)
197 {
198 struct tegra_context *context = to_tegra_context(pcontext);
199
200 context->gpu->delete_blend_state(context->gpu, so);
201 }
202
203 static void *
tegra_create_sampler_state(struct pipe_context * pcontext,const struct pipe_sampler_state * cso)204 tegra_create_sampler_state(struct pipe_context *pcontext,
205 const struct pipe_sampler_state *cso)
206 {
207 struct tegra_context *context = to_tegra_context(pcontext);
208
209 return context->gpu->create_sampler_state(context->gpu, cso);
210 }
211
212 static void
tegra_bind_sampler_states(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start_slot,unsigned num_samplers,void ** samplers)213 tegra_bind_sampler_states(struct pipe_context *pcontext, enum pipe_shader_type shader,
214 unsigned start_slot, unsigned num_samplers,
215 void **samplers)
216 {
217 struct tegra_context *context = to_tegra_context(pcontext);
218
219 context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
220 num_samplers, samplers);
221 }
222
223 static void
tegra_delete_sampler_state(struct pipe_context * pcontext,void * so)224 tegra_delete_sampler_state(struct pipe_context *pcontext, void *so)
225 {
226 struct tegra_context *context = to_tegra_context(pcontext);
227
228 context->gpu->delete_sampler_state(context->gpu, so);
229 }
230
231 static void *
tegra_create_rasterizer_state(struct pipe_context * pcontext,const struct pipe_rasterizer_state * cso)232 tegra_create_rasterizer_state(struct pipe_context *pcontext,
233 const struct pipe_rasterizer_state *cso)
234 {
235 struct tegra_context *context = to_tegra_context(pcontext);
236
237 return context->gpu->create_rasterizer_state(context->gpu, cso);
238 }
239
240 static void
tegra_bind_rasterizer_state(struct pipe_context * pcontext,void * so)241 tegra_bind_rasterizer_state(struct pipe_context *pcontext, void *so)
242 {
243 struct tegra_context *context = to_tegra_context(pcontext);
244
245 context->gpu->bind_rasterizer_state(context->gpu, so);
246 }
247
248 static void
tegra_delete_rasterizer_state(struct pipe_context * pcontext,void * so)249 tegra_delete_rasterizer_state(struct pipe_context *pcontext, void *so)
250 {
251 struct tegra_context *context = to_tegra_context(pcontext);
252
253 context->gpu->delete_rasterizer_state(context->gpu, so);
254 }
255
256 static void *
tegra_create_depth_stencil_alpha_state(struct pipe_context * pcontext,const struct pipe_depth_stencil_alpha_state * cso)257 tegra_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
258 const struct pipe_depth_stencil_alpha_state *cso)
259 {
260 struct tegra_context *context = to_tegra_context(pcontext);
261
262 return context->gpu->create_depth_stencil_alpha_state(context->gpu, cso);
263 }
264
265 static void
tegra_bind_depth_stencil_alpha_state(struct pipe_context * pcontext,void * so)266 tegra_bind_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
267 {
268 struct tegra_context *context = to_tegra_context(pcontext);
269
270 context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
271 }
272
273 static void
tegra_delete_depth_stencil_alpha_state(struct pipe_context * pcontext,void * so)274 tegra_delete_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
275 {
276 struct tegra_context *context = to_tegra_context(pcontext);
277
278 context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
279 }
280
281 static void *
tegra_create_fs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)282 tegra_create_fs_state(struct pipe_context *pcontext,
283 const struct pipe_shader_state *cso)
284 {
285 struct tegra_context *context = to_tegra_context(pcontext);
286
287 return context->gpu->create_fs_state(context->gpu, cso);
288 }
289
290 static void
tegra_bind_fs_state(struct pipe_context * pcontext,void * so)291 tegra_bind_fs_state(struct pipe_context *pcontext, void *so)
292 {
293 struct tegra_context *context = to_tegra_context(pcontext);
294
295 context->gpu->bind_fs_state(context->gpu, so);
296 }
297
298 static void
tegra_delete_fs_state(struct pipe_context * pcontext,void * so)299 tegra_delete_fs_state(struct pipe_context *pcontext, void *so)
300 {
301 struct tegra_context *context = to_tegra_context(pcontext);
302
303 context->gpu->delete_fs_state(context->gpu, so);
304 }
305
306 static void *
tegra_create_vs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)307 tegra_create_vs_state(struct pipe_context *pcontext,
308 const struct pipe_shader_state *cso)
309 {
310 struct tegra_context *context = to_tegra_context(pcontext);
311
312 return context->gpu->create_vs_state(context->gpu, cso);
313 }
314
315 static void
tegra_bind_vs_state(struct pipe_context * pcontext,void * so)316 tegra_bind_vs_state(struct pipe_context *pcontext, void *so)
317 {
318 struct tegra_context *context = to_tegra_context(pcontext);
319
320 context->gpu->bind_vs_state(context->gpu, so);
321 }
322
323 static void
tegra_delete_vs_state(struct pipe_context * pcontext,void * so)324 tegra_delete_vs_state(struct pipe_context *pcontext, void *so)
325 {
326 struct tegra_context *context = to_tegra_context(pcontext);
327
328 context->gpu->delete_vs_state(context->gpu, so);
329 }
330
331 static void *
tegra_create_gs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)332 tegra_create_gs_state(struct pipe_context *pcontext,
333 const struct pipe_shader_state *cso)
334 {
335 struct tegra_context *context = to_tegra_context(pcontext);
336
337 return context->gpu->create_gs_state(context->gpu, cso);
338 }
339
340 static void
tegra_bind_gs_state(struct pipe_context * pcontext,void * so)341 tegra_bind_gs_state(struct pipe_context *pcontext, void *so)
342 {
343 struct tegra_context *context = to_tegra_context(pcontext);
344
345 context->gpu->bind_gs_state(context->gpu, so);
346 }
347
348 static void
tegra_delete_gs_state(struct pipe_context * pcontext,void * so)349 tegra_delete_gs_state(struct pipe_context *pcontext, void *so)
350 {
351 struct tegra_context *context = to_tegra_context(pcontext);
352
353 context->gpu->delete_gs_state(context->gpu, so);
354 }
355
356 static void *
tegra_create_tcs_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)357 tegra_create_tcs_state(struct pipe_context *pcontext,
358 const struct pipe_shader_state *cso)
359 {
360 struct tegra_context *context = to_tegra_context(pcontext);
361
362 return context->gpu->create_tcs_state(context->gpu, cso);
363 }
364
365 static void
tegra_bind_tcs_state(struct pipe_context * pcontext,void * so)366 tegra_bind_tcs_state(struct pipe_context *pcontext, void *so)
367 {
368 struct tegra_context *context = to_tegra_context(pcontext);
369
370 context->gpu->bind_tcs_state(context->gpu, so);
371 }
372
373 static void
tegra_delete_tcs_state(struct pipe_context * pcontext,void * so)374 tegra_delete_tcs_state(struct pipe_context *pcontext, void *so)
375 {
376 struct tegra_context *context = to_tegra_context(pcontext);
377
378 context->gpu->delete_tcs_state(context->gpu, so);
379 }
380
381 static void *
tegra_create_tes_state(struct pipe_context * pcontext,const struct pipe_shader_state * cso)382 tegra_create_tes_state(struct pipe_context *pcontext,
383 const struct pipe_shader_state *cso)
384 {
385 struct tegra_context *context = to_tegra_context(pcontext);
386
387 return context->gpu->create_tes_state(context->gpu, cso);
388 }
389
390 static void
tegra_bind_tes_state(struct pipe_context * pcontext,void * so)391 tegra_bind_tes_state(struct pipe_context *pcontext, void *so)
392 {
393 struct tegra_context *context = to_tegra_context(pcontext);
394
395 context->gpu->bind_tes_state(context->gpu, so);
396 }
397
398 static void
tegra_delete_tes_state(struct pipe_context * pcontext,void * so)399 tegra_delete_tes_state(struct pipe_context *pcontext, void *so)
400 {
401 struct tegra_context *context = to_tegra_context(pcontext);
402
403 context->gpu->delete_tes_state(context->gpu, so);
404 }
405
406 static void *
tegra_create_vertex_elements_state(struct pipe_context * pcontext,unsigned num_elements,const struct pipe_vertex_element * elements)407 tegra_create_vertex_elements_state(struct pipe_context *pcontext,
408 unsigned num_elements,
409 const struct pipe_vertex_element *elements)
410 {
411 struct tegra_context *context = to_tegra_context(pcontext);
412
413 return context->gpu->create_vertex_elements_state(context->gpu,
414 num_elements,
415 elements);
416 }
417
418 static void
tegra_bind_vertex_elements_state(struct pipe_context * pcontext,void * so)419 tegra_bind_vertex_elements_state(struct pipe_context *pcontext, void *so)
420 {
421 struct tegra_context *context = to_tegra_context(pcontext);
422
423 context->gpu->bind_vertex_elements_state(context->gpu, so);
424 }
425
426 static void
tegra_delete_vertex_elements_state(struct pipe_context * pcontext,void * so)427 tegra_delete_vertex_elements_state(struct pipe_context *pcontext, void *so)
428 {
429 struct tegra_context *context = to_tegra_context(pcontext);
430
431 context->gpu->delete_vertex_elements_state(context->gpu, so);
432 }
433
434 static void
tegra_set_blend_color(struct pipe_context * pcontext,const struct pipe_blend_color * color)435 tegra_set_blend_color(struct pipe_context *pcontext,
436 const struct pipe_blend_color *color)
437 {
438 struct tegra_context *context = to_tegra_context(pcontext);
439
440 context->gpu->set_blend_color(context->gpu, color);
441 }
442
443 static void
tegra_set_stencil_ref(struct pipe_context * pcontext,const struct pipe_stencil_ref ref)444 tegra_set_stencil_ref(struct pipe_context *pcontext,
445 const struct pipe_stencil_ref ref)
446 {
447 struct tegra_context *context = to_tegra_context(pcontext);
448
449 context->gpu->set_stencil_ref(context->gpu, ref);
450 }
451
452 static void
tegra_set_sample_mask(struct pipe_context * pcontext,unsigned int mask)453 tegra_set_sample_mask(struct pipe_context *pcontext, unsigned int mask)
454 {
455 struct tegra_context *context = to_tegra_context(pcontext);
456
457 context->gpu->set_sample_mask(context->gpu, mask);
458 }
459
460 static void
tegra_set_min_samples(struct pipe_context * pcontext,unsigned int samples)461 tegra_set_min_samples(struct pipe_context *pcontext, unsigned int samples)
462 {
463 struct tegra_context *context = to_tegra_context(pcontext);
464
465 context->gpu->set_min_samples(context->gpu, samples);
466 }
467
468 static void
tegra_set_clip_state(struct pipe_context * pcontext,const struct pipe_clip_state * state)469 tegra_set_clip_state(struct pipe_context *pcontext,
470 const struct pipe_clip_state *state)
471 {
472 struct tegra_context *context = to_tegra_context(pcontext);
473
474 context->gpu->set_clip_state(context->gpu, state);
475 }
476
477 static void
tegra_set_constant_buffer(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned int index,bool take_ownership,const struct pipe_constant_buffer * buf)478 tegra_set_constant_buffer(struct pipe_context *pcontext, enum pipe_shader_type shader,
479 unsigned int index, bool take_ownership,
480 const struct pipe_constant_buffer *buf)
481 {
482 struct tegra_context *context = to_tegra_context(pcontext);
483 struct pipe_constant_buffer buffer;
484
485 if (buf && buf->buffer) {
486 memcpy(&buffer, buf, sizeof(buffer));
487 buffer.buffer = tegra_resource_unwrap(buffer.buffer);
488 buf = &buffer;
489 }
490
491 context->gpu->set_constant_buffer(context->gpu, shader, index, take_ownership, buf);
492 }
493
494 static void
tegra_set_framebuffer_state(struct pipe_context * pcontext,const struct pipe_framebuffer_state * fb)495 tegra_set_framebuffer_state(struct pipe_context *pcontext,
496 const struct pipe_framebuffer_state *fb)
497 {
498 struct tegra_context *context = to_tegra_context(pcontext);
499 struct pipe_framebuffer_state state;
500 unsigned i;
501
502 if (fb) {
503 memcpy(&state, fb, sizeof(state));
504
505 for (i = 0; i < fb->nr_cbufs; i++)
506 state.cbufs[i] = tegra_surface_unwrap(fb->cbufs[i]);
507
508 while (i < PIPE_MAX_COLOR_BUFS)
509 state.cbufs[i++] = NULL;
510
511 state.zsbuf = tegra_surface_unwrap(fb->zsbuf);
512
513 fb = &state;
514 }
515
516 context->gpu->set_framebuffer_state(context->gpu, fb);
517 }
518
519 static void
tegra_set_polygon_stipple(struct pipe_context * pcontext,const struct pipe_poly_stipple * stipple)520 tegra_set_polygon_stipple(struct pipe_context *pcontext,
521 const struct pipe_poly_stipple *stipple)
522 {
523 struct tegra_context *context = to_tegra_context(pcontext);
524
525 context->gpu->set_polygon_stipple(context->gpu, stipple);
526 }
527
528 static void
tegra_set_scissor_states(struct pipe_context * pcontext,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * scissors)529 tegra_set_scissor_states(struct pipe_context *pcontext, unsigned start_slot,
530 unsigned num_scissors,
531 const struct pipe_scissor_state *scissors)
532 {
533 struct tegra_context *context = to_tegra_context(pcontext);
534
535 context->gpu->set_scissor_states(context->gpu, start_slot, num_scissors,
536 scissors);
537 }
538
539 static void
tegra_set_window_rectangles(struct pipe_context * pcontext,bool include,unsigned int num_rectangles,const struct pipe_scissor_state * rectangles)540 tegra_set_window_rectangles(struct pipe_context *pcontext, bool include,
541 unsigned int num_rectangles,
542 const struct pipe_scissor_state *rectangles)
543 {
544 struct tegra_context *context = to_tegra_context(pcontext);
545
546 context->gpu->set_window_rectangles(context->gpu, include, num_rectangles,
547 rectangles);
548 }
549
550 static void
tegra_set_viewport_states(struct pipe_context * pcontext,unsigned start_slot,unsigned num_viewports,const struct pipe_viewport_state * viewports)551 tegra_set_viewport_states(struct pipe_context *pcontext, unsigned start_slot,
552 unsigned num_viewports,
553 const struct pipe_viewport_state *viewports)
554 {
555 struct tegra_context *context = to_tegra_context(pcontext);
556
557 context->gpu->set_viewport_states(context->gpu, start_slot, num_viewports,
558 viewports);
559 }
560
561 static void
tegra_set_sampler_views(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start_slot,unsigned num_views,unsigned unbind_num_trailing_slots,bool take_ownership,struct pipe_sampler_view ** pviews)562 tegra_set_sampler_views(struct pipe_context *pcontext, enum pipe_shader_type shader,
563 unsigned start_slot, unsigned num_views,
564 unsigned unbind_num_trailing_slots,
565 bool take_ownership,
566 struct pipe_sampler_view **pviews)
567 {
568 struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
569 struct tegra_context *context = to_tegra_context(pcontext);
570 struct tegra_sampler_view *view;
571 unsigned i;
572
573 for (i = 0; i < num_views; i++) {
574 /* adjust private reference count */
575 view = to_tegra_sampler_view(pviews[i]);
576 if (view) {
577 view->refcount--;
578 if (!view->refcount) {
579 view->refcount = 100000000;
580 p_atomic_add(&view->gpu->reference.count, view->refcount);
581 }
582 }
583
584 views[i] = tegra_sampler_view_unwrap(pviews[i]);
585 }
586
587 context->gpu->set_sampler_views(context->gpu, shader, start_slot,
588 num_views, unbind_num_trailing_slots,
589 take_ownership, views);
590 }
591
592 static void
tegra_set_tess_state(struct pipe_context * pcontext,const float default_outer_level[4],const float default_inner_level[2])593 tegra_set_tess_state(struct pipe_context *pcontext,
594 const float default_outer_level[4],
595 const float default_inner_level[2])
596 {
597 struct tegra_context *context = to_tegra_context(pcontext);
598
599 context->gpu->set_tess_state(context->gpu, default_outer_level,
600 default_inner_level);
601 }
602
603 static void
tegra_set_debug_callback(struct pipe_context * pcontext,const struct util_debug_callback * callback)604 tegra_set_debug_callback(struct pipe_context *pcontext,
605 const struct util_debug_callback *callback)
606 {
607 struct tegra_context *context = to_tegra_context(pcontext);
608
609 context->gpu->set_debug_callback(context->gpu, callback);
610 }
611
612 static void
tegra_set_shader_buffers(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)613 tegra_set_shader_buffers(struct pipe_context *pcontext, enum pipe_shader_type shader,
614 unsigned start, unsigned count,
615 const struct pipe_shader_buffer *buffers,
616 unsigned writable_bitmask)
617 {
618 struct tegra_context *context = to_tegra_context(pcontext);
619
620 context->gpu->set_shader_buffers(context->gpu, shader, start, count,
621 buffers, writable_bitmask);
622 }
623
624 static void
tegra_set_shader_images(struct pipe_context * pcontext,enum pipe_shader_type shader,unsigned start,unsigned count,unsigned unbind_num_trailing_slots,const struct pipe_image_view * images)625 tegra_set_shader_images(struct pipe_context *pcontext, enum pipe_shader_type shader,
626 unsigned start, unsigned count,
627 unsigned unbind_num_trailing_slots,
628 const struct pipe_image_view *images)
629 {
630 struct tegra_context *context = to_tegra_context(pcontext);
631
632 context->gpu->set_shader_images(context->gpu, shader, start, count,
633 unbind_num_trailing_slots, images);
634 }
635
636 static void
tegra_set_vertex_buffers(struct pipe_context * pcontext,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)637 tegra_set_vertex_buffers(struct pipe_context *pcontext,
638 unsigned num_buffers,
639 const struct pipe_vertex_buffer *buffers)
640 {
641 struct tegra_context *context = to_tegra_context(pcontext);
642 struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
643 unsigned i;
644
645 if (num_buffers && buffers) {
646 memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
647
648 for (i = 0; i < num_buffers; i++) {
649 if (!buf[i].is_user_buffer)
650 buf[i].buffer.resource = tegra_resource_unwrap(buf[i].buffer.resource);
651 }
652
653 buffers = buf;
654 }
655
656 context->gpu->set_vertex_buffers(context->gpu, num_buffers, buffers);
657 }
658
659 static struct pipe_stream_output_target *
tegra_create_stream_output_target(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned buffer_offset,unsigned buffer_size)660 tegra_create_stream_output_target(struct pipe_context *pcontext,
661 struct pipe_resource *presource,
662 unsigned buffer_offset,
663 unsigned buffer_size)
664 {
665 struct tegra_resource *resource = to_tegra_resource(presource);
666 struct tegra_context *context = to_tegra_context(pcontext);
667
668 return context->gpu->create_stream_output_target(context->gpu,
669 resource->gpu,
670 buffer_offset,
671 buffer_size);
672 }
673
674 static void
tegra_stream_output_target_destroy(struct pipe_context * pcontext,struct pipe_stream_output_target * target)675 tegra_stream_output_target_destroy(struct pipe_context *pcontext,
676 struct pipe_stream_output_target *target)
677 {
678 struct tegra_context *context = to_tegra_context(pcontext);
679
680 context->gpu->stream_output_target_destroy(context->gpu, target);
681 }
682
683 static void
tegra_set_stream_output_targets(struct pipe_context * pcontext,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets,enum mesa_prim output_prim)684 tegra_set_stream_output_targets(struct pipe_context *pcontext,
685 unsigned num_targets,
686 struct pipe_stream_output_target **targets,
687 const unsigned *offsets,
688 enum mesa_prim output_prim)
689 {
690 struct tegra_context *context = to_tegra_context(pcontext);
691
692 context->gpu->set_stream_output_targets(context->gpu, num_targets,
693 targets, offsets, output_prim);
694 }
695
696 static void
tegra_resource_copy_region(struct pipe_context * pcontext,struct pipe_resource * pdst,unsigned int dst_level,unsigned int dstx,unsigned int dsty,unsigned int dstz,struct pipe_resource * psrc,unsigned int src_level,const struct pipe_box * src_box)697 tegra_resource_copy_region(struct pipe_context *pcontext,
698 struct pipe_resource *pdst,
699 unsigned int dst_level,
700 unsigned int dstx,
701 unsigned int dsty,
702 unsigned int dstz,
703 struct pipe_resource *psrc,
704 unsigned int src_level,
705 const struct pipe_box *src_box)
706 {
707 struct tegra_context *context = to_tegra_context(pcontext);
708 struct tegra_resource *dst = to_tegra_resource(pdst);
709 struct tegra_resource *src = to_tegra_resource(psrc);
710
711 context->gpu->resource_copy_region(context->gpu, dst->gpu, dst_level, dstx,
712 dsty, dstz, src->gpu, src_level,
713 src_box);
714 }
715
716 static void
tegra_blit(struct pipe_context * pcontext,const struct pipe_blit_info * pinfo)717 tegra_blit(struct pipe_context *pcontext, const struct pipe_blit_info *pinfo)
718 {
719 struct tegra_context *context = to_tegra_context(pcontext);
720 struct pipe_blit_info info;
721
722 if (pinfo) {
723 memcpy(&info, pinfo, sizeof(info));
724 info.dst.resource = tegra_resource_unwrap(info.dst.resource);
725 info.src.resource = tegra_resource_unwrap(info.src.resource);
726 pinfo = &info;
727 }
728
729 context->gpu->blit(context->gpu, pinfo);
730 }
731
732 static void
tegra_clear(struct pipe_context * pcontext,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)733 tegra_clear(struct pipe_context *pcontext, unsigned buffers, const struct pipe_scissor_state *scissor_state,
734 const union pipe_color_union *color, double depth,
735 unsigned stencil)
736 {
737 struct tegra_context *context = to_tegra_context(pcontext);
738
739 context->gpu->clear(context->gpu, buffers, NULL, color, depth, stencil);
740 }
741
742 static void
tegra_clear_render_target(struct pipe_context * pcontext,struct pipe_surface * pdst,const union pipe_color_union * color,unsigned int dstx,unsigned int dsty,unsigned int width,unsigned int height,bool render_condition)743 tegra_clear_render_target(struct pipe_context *pcontext,
744 struct pipe_surface *pdst,
745 const union pipe_color_union *color,
746 unsigned int dstx,
747 unsigned int dsty,
748 unsigned int width,
749 unsigned int height,
750 bool render_condition)
751 {
752 struct tegra_context *context = to_tegra_context(pcontext);
753 struct tegra_surface *dst = to_tegra_surface(pdst);
754
755 context->gpu->clear_render_target(context->gpu, dst->gpu, color, dstx,
756 dsty, width, height, render_condition);
757 }
758
759 static void
tegra_clear_depth_stencil(struct pipe_context * pcontext,struct pipe_surface * pdst,unsigned int flags,double depth,unsigned int stencil,unsigned int dstx,unsigned int dsty,unsigned int width,unsigned int height,bool render_condition)760 tegra_clear_depth_stencil(struct pipe_context *pcontext,
761 struct pipe_surface *pdst,
762 unsigned int flags,
763 double depth,
764 unsigned int stencil,
765 unsigned int dstx,
766 unsigned int dsty,
767 unsigned int width,
768 unsigned int height,
769 bool render_condition)
770 {
771 struct tegra_context *context = to_tegra_context(pcontext);
772 struct tegra_surface *dst = to_tegra_surface(pdst);
773
774 context->gpu->clear_depth_stencil(context->gpu, dst->gpu, flags, depth,
775 stencil, dstx, dsty, width, height,
776 render_condition);
777 }
778
779 static void
tegra_clear_texture(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned int level,const struct pipe_box * box,const void * data)780 tegra_clear_texture(struct pipe_context *pcontext,
781 struct pipe_resource *presource,
782 unsigned int level,
783 const struct pipe_box *box,
784 const void *data)
785 {
786 struct tegra_resource *resource = to_tegra_resource(presource);
787 struct tegra_context *context = to_tegra_context(pcontext);
788
789 context->gpu->clear_texture(context->gpu, resource->gpu, level, box, data);
790 }
791
792 static void
tegra_clear_buffer(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned int offset,unsigned int size,const void * value,int value_size)793 tegra_clear_buffer(struct pipe_context *pcontext,
794 struct pipe_resource *presource,
795 unsigned int offset,
796 unsigned int size,
797 const void *value,
798 int value_size)
799 {
800 struct tegra_resource *resource = to_tegra_resource(presource);
801 struct tegra_context *context = to_tegra_context(pcontext);
802
803 context->gpu->clear_buffer(context->gpu, resource->gpu, offset, size,
804 value, value_size);
805 }
806
807 static void
tegra_flush(struct pipe_context * pcontext,struct pipe_fence_handle ** fence,unsigned flags)808 tegra_flush(struct pipe_context *pcontext, struct pipe_fence_handle **fence,
809 unsigned flags)
810 {
811 struct tegra_context *context = to_tegra_context(pcontext);
812
813 context->gpu->flush(context->gpu, fence, flags);
814 }
815
816 static void
tegra_create_fence_fd(struct pipe_context * pcontext,struct pipe_fence_handle ** fence,int fd,enum pipe_fd_type type)817 tegra_create_fence_fd(struct pipe_context *pcontext,
818 struct pipe_fence_handle **fence,
819 int fd, enum pipe_fd_type type)
820 {
821 struct tegra_context *context = to_tegra_context(pcontext);
822
823 assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
824 context->gpu->create_fence_fd(context->gpu, fence, fd, type);
825 }
826
827 static void
tegra_fence_server_sync(struct pipe_context * pcontext,struct pipe_fence_handle * fence)828 tegra_fence_server_sync(struct pipe_context *pcontext,
829 struct pipe_fence_handle *fence)
830 {
831 struct tegra_context *context = to_tegra_context(pcontext);
832
833 context->gpu->fence_server_sync(context->gpu, fence);
834 }
835
836 static struct pipe_sampler_view *
tegra_create_sampler_view(struct pipe_context * pcontext,struct pipe_resource * presource,const struct pipe_sampler_view * template)837 tegra_create_sampler_view(struct pipe_context *pcontext,
838 struct pipe_resource *presource,
839 const struct pipe_sampler_view *template)
840 {
841 struct tegra_resource *resource = to_tegra_resource(presource);
842 struct tegra_context *context = to_tegra_context(pcontext);
843 struct tegra_sampler_view *view;
844
845 view = calloc(1, sizeof(*view));
846 if (!view)
847 return NULL;
848
849 view->base = *template;
850 view->base.context = pcontext;
851 /* overwrite to prevent reference from being released */
852 view->base.texture = NULL;
853 pipe_reference_init(&view->base.reference, 1);
854 pipe_resource_reference(&view->base.texture, presource);
855
856 view->gpu = context->gpu->create_sampler_view(context->gpu, resource->gpu,
857 template);
858
859 /* use private reference count */
860 view->gpu->reference.count += 100000000;
861 view->refcount = 100000000;
862
863 return &view->base;
864 }
865
866 static void
tegra_sampler_view_destroy(struct pipe_context * pcontext,struct pipe_sampler_view * pview)867 tegra_sampler_view_destroy(struct pipe_context *pcontext,
868 struct pipe_sampler_view *pview)
869 {
870 struct tegra_sampler_view *view = to_tegra_sampler_view(pview);
871
872 pipe_resource_reference(&view->base.texture, NULL);
873 /* adjust private reference count */
874 p_atomic_add(&view->gpu->reference.count, -view->refcount);
875 pipe_sampler_view_reference(&view->gpu, NULL);
876 free(view);
877 }
878
879 static struct pipe_surface *
tegra_create_surface(struct pipe_context * pcontext,struct pipe_resource * presource,const struct pipe_surface * template)880 tegra_create_surface(struct pipe_context *pcontext,
881 struct pipe_resource *presource,
882 const struct pipe_surface *template)
883 {
884 struct tegra_resource *resource = to_tegra_resource(presource);
885 struct tegra_context *context = to_tegra_context(pcontext);
886 struct tegra_surface *surface;
887
888 surface = calloc(1, sizeof(*surface));
889 if (!surface)
890 return NULL;
891
892 surface->gpu = context->gpu->create_surface(context->gpu, resource->gpu,
893 template);
894 if (!surface->gpu) {
895 free(surface);
896 return NULL;
897 }
898
899 memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
900 /* overwrite to prevent reference from being released */
901 surface->base.texture = NULL;
902
903 pipe_reference_init(&surface->base.reference, 1);
904 pipe_resource_reference(&surface->base.texture, presource);
905 surface->base.context = &context->base;
906
907 return &surface->base;
908 }
909
910 static void
tegra_surface_destroy(struct pipe_context * pcontext,struct pipe_surface * psurface)911 tegra_surface_destroy(struct pipe_context *pcontext,
912 struct pipe_surface *psurface)
913 {
914 struct tegra_surface *surface = to_tegra_surface(psurface);
915
916 pipe_resource_reference(&surface->base.texture, NULL);
917 pipe_surface_reference(&surface->gpu, NULL);
918 free(surface);
919 }
920
921 static void *
tegra_transfer_map(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)922 tegra_transfer_map(struct pipe_context *pcontext,
923 struct pipe_resource *presource,
924 unsigned level, unsigned usage,
925 const struct pipe_box *box,
926 struct pipe_transfer **ptransfer)
927 {
928 struct tegra_resource *resource = to_tegra_resource(presource);
929 struct tegra_context *context = to_tegra_context(pcontext);
930 struct tegra_transfer *transfer;
931
932 transfer = calloc(1, sizeof(*transfer));
933 if (!transfer)
934 return NULL;
935
936 if (presource->target == PIPE_BUFFER) {
937 transfer->map = context->gpu->buffer_map(context->gpu, resource->gpu,
938 level, usage, box,
939 &transfer->gpu);
940 } else {
941 transfer->map = context->gpu->texture_map(context->gpu, resource->gpu,
942 level, usage, box,
943 &transfer->gpu);
944 }
945 memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
946 transfer->base.resource = NULL;
947 pipe_resource_reference(&transfer->base.resource, presource);
948
949 *ptransfer = &transfer->base;
950
951 return transfer->map;
952 }
953
954 static void
tegra_transfer_flush_region(struct pipe_context * pcontext,struct pipe_transfer * ptransfer,const struct pipe_box * box)955 tegra_transfer_flush_region(struct pipe_context *pcontext,
956 struct pipe_transfer *ptransfer,
957 const struct pipe_box *box)
958 {
959 struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
960 struct tegra_context *context = to_tegra_context(pcontext);
961
962 context->gpu->transfer_flush_region(context->gpu, transfer->gpu, box);
963 }
964
965 static void
tegra_transfer_unmap(struct pipe_context * pcontext,struct pipe_transfer * ptransfer)966 tegra_transfer_unmap(struct pipe_context *pcontext,
967 struct pipe_transfer *ptransfer)
968 {
969 struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
970 struct tegra_context *context = to_tegra_context(pcontext);
971
972 if (ptransfer->resource->target == PIPE_BUFFER)
973 context->gpu->buffer_unmap(context->gpu, transfer->gpu);
974 else
975 context->gpu->texture_unmap(context->gpu, transfer->gpu);
976 pipe_resource_reference(&transfer->base.resource, NULL);
977 free(transfer);
978 }
979
980 static void
tegra_buffer_subdata(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned usage,unsigned offset,unsigned size,const void * data)981 tegra_buffer_subdata(struct pipe_context *pcontext,
982 struct pipe_resource *presource,
983 unsigned usage, unsigned offset,
984 unsigned size, const void *data)
985 {
986 struct tegra_resource *resource = to_tegra_resource(presource);
987 struct tegra_context *context = to_tegra_context(pcontext);
988
989 context->gpu->buffer_subdata(context->gpu, resource->gpu, usage, offset,
990 size, data);
991 }
992
993 static void
tegra_texture_subdata(struct pipe_context * pcontext,struct pipe_resource * presource,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,uintptr_t layer_stride)994 tegra_texture_subdata(struct pipe_context *pcontext,
995 struct pipe_resource *presource,
996 unsigned level,
997 unsigned usage,
998 const struct pipe_box *box,
999 const void *data,
1000 unsigned stride,
1001 uintptr_t layer_stride)
1002 {
1003 struct tegra_resource *resource = to_tegra_resource(presource);
1004 struct tegra_context *context = to_tegra_context(pcontext);
1005
1006 context->gpu->texture_subdata(context->gpu, resource->gpu, level, usage,
1007 box, data, stride, layer_stride);
1008 }
1009
1010 static void
tegra_texture_barrier(struct pipe_context * pcontext,unsigned int flags)1011 tegra_texture_barrier(struct pipe_context *pcontext, unsigned int flags)
1012 {
1013 struct tegra_context *context = to_tegra_context(pcontext);
1014
1015 context->gpu->texture_barrier(context->gpu, flags);
1016 }
1017
1018 static void
tegra_memory_barrier(struct pipe_context * pcontext,unsigned int flags)1019 tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
1020 {
1021 struct tegra_context *context = to_tegra_context(pcontext);
1022
1023 if (!(flags & ~PIPE_BARRIER_UPDATE))
1024 return;
1025
1026 context->gpu->memory_barrier(context->gpu, flags);
1027 }
1028
1029 static struct pipe_video_codec *
tegra_create_video_codec(struct pipe_context * pcontext,const struct pipe_video_codec * template)1030 tegra_create_video_codec(struct pipe_context *pcontext,
1031 const struct pipe_video_codec *template)
1032 {
1033 struct tegra_context *context = to_tegra_context(pcontext);
1034
1035 return context->gpu->create_video_codec(context->gpu, template);
1036 }
1037
1038 static struct pipe_video_buffer *
tegra_create_video_buffer(struct pipe_context * pcontext,const struct pipe_video_buffer * template)1039 tegra_create_video_buffer(struct pipe_context *pcontext,
1040 const struct pipe_video_buffer *template)
1041 {
1042 struct tegra_context *context = to_tegra_context(pcontext);
1043
1044 return context->gpu->create_video_buffer(context->gpu, template);
1045 }
1046
1047 static void *
tegra_create_compute_state(struct pipe_context * pcontext,const struct pipe_compute_state * template)1048 tegra_create_compute_state(struct pipe_context *pcontext,
1049 const struct pipe_compute_state *template)
1050 {
1051 struct tegra_context *context = to_tegra_context(pcontext);
1052
1053 return context->gpu->create_compute_state(context->gpu, template);
1054 }
1055
1056 static void
tegra_bind_compute_state(struct pipe_context * pcontext,void * so)1057 tegra_bind_compute_state(struct pipe_context *pcontext, void *so)
1058 {
1059 struct tegra_context *context = to_tegra_context(pcontext);
1060
1061 context->gpu->bind_compute_state(context->gpu, so);
1062 }
1063
1064 static void
tegra_delete_compute_state(struct pipe_context * pcontext,void * so)1065 tegra_delete_compute_state(struct pipe_context *pcontext, void *so)
1066 {
1067 struct tegra_context *context = to_tegra_context(pcontext);
1068
1069 context->gpu->delete_compute_state(context->gpu, so);
1070 }
1071
1072 static void
tegra_set_compute_resources(struct pipe_context * pcontext,unsigned int start,unsigned int count,struct pipe_surface ** resources)1073 tegra_set_compute_resources(struct pipe_context *pcontext,
1074 unsigned int start, unsigned int count,
1075 struct pipe_surface **resources)
1076 {
1077 struct tegra_context *context = to_tegra_context(pcontext);
1078
1079 /* XXX unwrap resources */
1080
1081 context->gpu->set_compute_resources(context->gpu, start, count, resources);
1082 }
1083
1084 static void
tegra_set_global_binding(struct pipe_context * pcontext,unsigned int first,unsigned int count,struct pipe_resource ** resources,uint32_t ** handles)1085 tegra_set_global_binding(struct pipe_context *pcontext, unsigned int first,
1086 unsigned int count, struct pipe_resource **resources,
1087 uint32_t **handles)
1088 {
1089 struct tegra_context *context = to_tegra_context(pcontext);
1090
1091 /* XXX unwrap resources */
1092
1093 context->gpu->set_global_binding(context->gpu, first, count, resources,
1094 handles);
1095 }
1096
1097 static void
tegra_launch_grid(struct pipe_context * pcontext,const struct pipe_grid_info * info)1098 tegra_launch_grid(struct pipe_context *pcontext,
1099 const struct pipe_grid_info *info)
1100 {
1101 struct tegra_context *context = to_tegra_context(pcontext);
1102
1103 /* XXX unwrap info->indirect? */
1104
1105 context->gpu->launch_grid(context->gpu, info);
1106 }
1107
1108 static void
tegra_get_sample_position(struct pipe_context * pcontext,unsigned int count,unsigned int index,float * value)1109 tegra_get_sample_position(struct pipe_context *pcontext, unsigned int count,
1110 unsigned int index, float *value)
1111 {
1112 struct tegra_context *context = to_tegra_context(pcontext);
1113
1114 context->gpu->get_sample_position(context->gpu, count, index, value);
1115 }
1116
1117 static uint64_t
tegra_get_timestamp(struct pipe_context * pcontext)1118 tegra_get_timestamp(struct pipe_context *pcontext)
1119 {
1120 struct tegra_context *context = to_tegra_context(pcontext);
1121
1122 return context->gpu->get_timestamp(context->gpu);
1123 }
1124
1125 static void
tegra_flush_resource(struct pipe_context * pcontext,struct pipe_resource * presource)1126 tegra_flush_resource(struct pipe_context *pcontext,
1127 struct pipe_resource *presource)
1128 {
1129 struct tegra_resource *resource = to_tegra_resource(presource);
1130 struct tegra_context *context = to_tegra_context(pcontext);
1131
1132 context->gpu->flush_resource(context->gpu, resource->gpu);
1133 }
1134
1135 static void
tegra_invalidate_resource(struct pipe_context * pcontext,struct pipe_resource * presource)1136 tegra_invalidate_resource(struct pipe_context *pcontext,
1137 struct pipe_resource *presource)
1138 {
1139 struct tegra_resource *resource = to_tegra_resource(presource);
1140 struct tegra_context *context = to_tegra_context(pcontext);
1141
1142 context->gpu->invalidate_resource(context->gpu, resource->gpu);
1143 }
1144
1145 static enum pipe_reset_status
tegra_get_device_reset_status(struct pipe_context * pcontext)1146 tegra_get_device_reset_status(struct pipe_context *pcontext)
1147 {
1148 struct tegra_context *context = to_tegra_context(pcontext);
1149
1150 return context->gpu->get_device_reset_status(context->gpu);
1151 }
1152
1153 static void
tegra_set_device_reset_callback(struct pipe_context * pcontext,const struct pipe_device_reset_callback * cb)1154 tegra_set_device_reset_callback(struct pipe_context *pcontext,
1155 const struct pipe_device_reset_callback *cb)
1156 {
1157 struct tegra_context *context = to_tegra_context(pcontext);
1158
1159 context->gpu->set_device_reset_callback(context->gpu, cb);
1160 }
1161
1162 static void
tegra_dump_debug_state(struct pipe_context * pcontext,FILE * stream,unsigned int flags)1163 tegra_dump_debug_state(struct pipe_context *pcontext, FILE *stream,
1164 unsigned int flags)
1165 {
1166 struct tegra_context *context = to_tegra_context(pcontext);
1167
1168 context->gpu->dump_debug_state(context->gpu, stream, flags);
1169 }
1170
1171 static void
tegra_emit_string_marker(struct pipe_context * pcontext,const char * string,int length)1172 tegra_emit_string_marker(struct pipe_context *pcontext, const char *string,
1173 int length)
1174 {
1175 struct tegra_context *context = to_tegra_context(pcontext);
1176
1177 context->gpu->emit_string_marker(context->gpu, string, length);
1178 }
1179
1180 static bool
tegra_generate_mipmap(struct pipe_context * pcontext,struct pipe_resource * presource,enum pipe_format format,unsigned int base_level,unsigned int last_level,unsigned int first_layer,unsigned int last_layer)1181 tegra_generate_mipmap(struct pipe_context *pcontext,
1182 struct pipe_resource *presource,
1183 enum pipe_format format,
1184 unsigned int base_level,
1185 unsigned int last_level,
1186 unsigned int first_layer,
1187 unsigned int last_layer)
1188 {
1189 struct tegra_resource *resource = to_tegra_resource(presource);
1190 struct tegra_context *context = to_tegra_context(pcontext);
1191
1192 return context->gpu->generate_mipmap(context->gpu, resource->gpu, format,
1193 base_level, last_level, first_layer,
1194 last_layer);
1195 }
1196
1197 static uint64_t
tegra_create_texture_handle(struct pipe_context * pcontext,struct pipe_sampler_view * view,const struct pipe_sampler_state * state)1198 tegra_create_texture_handle(struct pipe_context *pcontext,
1199 struct pipe_sampler_view *view,
1200 const struct pipe_sampler_state *state)
1201 {
1202 struct tegra_context *context = to_tegra_context(pcontext);
1203
1204 return context->gpu->create_texture_handle(context->gpu, view, state);
1205 }
1206
tegra_delete_texture_handle(struct pipe_context * pcontext,uint64_t handle)1207 static void tegra_delete_texture_handle(struct pipe_context *pcontext,
1208 uint64_t handle)
1209 {
1210 struct tegra_context *context = to_tegra_context(pcontext);
1211
1212 context->gpu->delete_texture_handle(context->gpu, handle);
1213 }
1214
tegra_make_texture_handle_resident(struct pipe_context * pcontext,uint64_t handle,bool resident)1215 static void tegra_make_texture_handle_resident(struct pipe_context *pcontext,
1216 uint64_t handle, bool resident)
1217 {
1218 struct tegra_context *context = to_tegra_context(pcontext);
1219
1220 context->gpu->make_texture_handle_resident(context->gpu, handle, resident);
1221 }
1222
tegra_create_image_handle(struct pipe_context * pcontext,const struct pipe_image_view * image)1223 static uint64_t tegra_create_image_handle(struct pipe_context *pcontext,
1224 const struct pipe_image_view *image)
1225 {
1226 struct tegra_context *context = to_tegra_context(pcontext);
1227
1228 return context->gpu->create_image_handle(context->gpu, image);
1229 }
1230
tegra_delete_image_handle(struct pipe_context * pcontext,uint64_t handle)1231 static void tegra_delete_image_handle(struct pipe_context *pcontext,
1232 uint64_t handle)
1233 {
1234 struct tegra_context *context = to_tegra_context(pcontext);
1235
1236 context->gpu->delete_image_handle(context->gpu, handle);
1237 }
1238
tegra_make_image_handle_resident(struct pipe_context * pcontext,uint64_t handle,unsigned access,bool resident)1239 static void tegra_make_image_handle_resident(struct pipe_context *pcontext,
1240 uint64_t handle, unsigned access,
1241 bool resident)
1242 {
1243 struct tegra_context *context = to_tegra_context(pcontext);
1244
1245 context->gpu->make_image_handle_resident(context->gpu, handle, access,
1246 resident);
1247 }
1248
1249 struct pipe_context *
tegra_screen_context_create(struct pipe_screen * pscreen,void * priv,unsigned int flags)1250 tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
1251 unsigned int flags)
1252 {
1253 struct tegra_screen *screen = to_tegra_screen(pscreen);
1254 struct tegra_context *context;
1255
1256 context = calloc(1, sizeof(*context));
1257 if (!context)
1258 return NULL;
1259
1260 context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
1261 if (!context->gpu) {
1262 debug_error("failed to create GPU context\n");
1263 goto free;
1264 }
1265
1266 context->base.screen = &screen->base;
1267 context->base.priv = priv;
1268
1269 /*
1270 * Create custom stream and const uploaders. Note that technically nouveau
1271 * already creates uploaders that could be reused, but that would make the
1272 * resource unwrapping rather complicate. The reason for that is that both
1273 * uploaders create resources based on the context that they were created
1274 * from, which means that nouveau's uploader will use the nouveau context
1275 * which means that those resources must not be unwrapped. So before each
1276 * resource is unwrapped, the code would need to check that it does not
1277 * correspond to the uploaders' buffers.
1278 *
1279 * However, duplicating the uploaders here sounds worse than it is. The
1280 * default implementation that nouveau uses allocates buffers lazily, and
1281 * since it is never used, no buffers will every be allocated and the only
1282 * memory wasted is that occupied by the nouveau uploader itself.
1283 */
1284 context->base.stream_uploader = u_upload_create_default(&context->base);
1285 if (!context->base.stream_uploader)
1286 goto destroy;
1287
1288 context->base.const_uploader = context->base.stream_uploader;
1289
1290 context->base.destroy = tegra_destroy;
1291
1292 context->base.draw_vbo = tegra_draw_vbo;
1293
1294 context->base.render_condition = tegra_render_condition;
1295
1296 context->base.create_query = tegra_create_query;
1297 context->base.create_batch_query = tegra_create_batch_query;
1298 context->base.destroy_query = tegra_destroy_query;
1299 context->base.begin_query = tegra_begin_query;
1300 context->base.end_query = tegra_end_query;
1301 context->base.get_query_result = tegra_get_query_result;
1302 context->base.get_query_result_resource = tegra_get_query_result_resource;
1303 context->base.set_active_query_state = tegra_set_active_query_state;
1304
1305 context->base.create_blend_state = tegra_create_blend_state;
1306 context->base.bind_blend_state = tegra_bind_blend_state;
1307 context->base.delete_blend_state = tegra_delete_blend_state;
1308
1309 context->base.create_sampler_state = tegra_create_sampler_state;
1310 context->base.bind_sampler_states = tegra_bind_sampler_states;
1311 context->base.delete_sampler_state = tegra_delete_sampler_state;
1312
1313 context->base.create_rasterizer_state = tegra_create_rasterizer_state;
1314 context->base.bind_rasterizer_state = tegra_bind_rasterizer_state;
1315 context->base.delete_rasterizer_state = tegra_delete_rasterizer_state;
1316
1317 context->base.create_depth_stencil_alpha_state = tegra_create_depth_stencil_alpha_state;
1318 context->base.bind_depth_stencil_alpha_state = tegra_bind_depth_stencil_alpha_state;
1319 context->base.delete_depth_stencil_alpha_state = tegra_delete_depth_stencil_alpha_state;
1320
1321 context->base.create_fs_state = tegra_create_fs_state;
1322 context->base.bind_fs_state = tegra_bind_fs_state;
1323 context->base.delete_fs_state = tegra_delete_fs_state;
1324
1325 context->base.create_vs_state = tegra_create_vs_state;
1326 context->base.bind_vs_state = tegra_bind_vs_state;
1327 context->base.delete_vs_state = tegra_delete_vs_state;
1328
1329 context->base.create_gs_state = tegra_create_gs_state;
1330 context->base.bind_gs_state = tegra_bind_gs_state;
1331 context->base.delete_gs_state = tegra_delete_gs_state;
1332
1333 context->base.create_tcs_state = tegra_create_tcs_state;
1334 context->base.bind_tcs_state = tegra_bind_tcs_state;
1335 context->base.delete_tcs_state = tegra_delete_tcs_state;
1336
1337 context->base.create_tes_state = tegra_create_tes_state;
1338 context->base.bind_tes_state = tegra_bind_tes_state;
1339 context->base.delete_tes_state = tegra_delete_tes_state;
1340
1341 context->base.create_vertex_elements_state = tegra_create_vertex_elements_state;
1342 context->base.bind_vertex_elements_state = tegra_bind_vertex_elements_state;
1343 context->base.delete_vertex_elements_state = tegra_delete_vertex_elements_state;
1344
1345 context->base.set_blend_color = tegra_set_blend_color;
1346 context->base.set_stencil_ref = tegra_set_stencil_ref;
1347 context->base.set_sample_mask = tegra_set_sample_mask;
1348 context->base.set_min_samples = tegra_set_min_samples;
1349 context->base.set_clip_state = tegra_set_clip_state;
1350
1351 context->base.set_constant_buffer = tegra_set_constant_buffer;
1352 context->base.set_framebuffer_state = tegra_set_framebuffer_state;
1353 context->base.set_polygon_stipple = tegra_set_polygon_stipple;
1354 context->base.set_scissor_states = tegra_set_scissor_states;
1355 context->base.set_window_rectangles = tegra_set_window_rectangles;
1356 context->base.set_viewport_states = tegra_set_viewport_states;
1357 context->base.set_sampler_views = tegra_set_sampler_views;
1358 context->base.set_tess_state = tegra_set_tess_state;
1359
1360 context->base.set_debug_callback = tegra_set_debug_callback;
1361
1362 context->base.set_shader_buffers = tegra_set_shader_buffers;
1363 context->base.set_shader_images = tegra_set_shader_images;
1364 context->base.set_vertex_buffers = tegra_set_vertex_buffers;
1365
1366 context->base.create_stream_output_target = tegra_create_stream_output_target;
1367 context->base.stream_output_target_destroy = tegra_stream_output_target_destroy;
1368 context->base.set_stream_output_targets = tegra_set_stream_output_targets;
1369
1370 context->base.resource_copy_region = tegra_resource_copy_region;
1371 context->base.blit = tegra_blit;
1372 context->base.clear = tegra_clear;
1373 context->base.clear_render_target = tegra_clear_render_target;
1374 context->base.clear_depth_stencil = tegra_clear_depth_stencil;
1375 context->base.clear_texture = tegra_clear_texture;
1376 context->base.clear_buffer = tegra_clear_buffer;
1377 context->base.flush = tegra_flush;
1378
1379 context->base.create_fence_fd = tegra_create_fence_fd;
1380 context->base.fence_server_sync = tegra_fence_server_sync;
1381
1382 context->base.create_sampler_view = tegra_create_sampler_view;
1383 context->base.sampler_view_destroy = tegra_sampler_view_destroy;
1384
1385 context->base.create_surface = tegra_create_surface;
1386 context->base.surface_destroy = tegra_surface_destroy;
1387
1388 context->base.buffer_map = tegra_transfer_map;
1389 context->base.texture_map = tegra_transfer_map;
1390 context->base.transfer_flush_region = tegra_transfer_flush_region;
1391 context->base.buffer_unmap = tegra_transfer_unmap;
1392 context->base.texture_unmap = tegra_transfer_unmap;
1393 context->base.buffer_subdata = tegra_buffer_subdata;
1394 context->base.texture_subdata = tegra_texture_subdata;
1395
1396 context->base.texture_barrier = tegra_texture_barrier;
1397 context->base.memory_barrier = tegra_memory_barrier;
1398
1399 context->base.create_video_codec = tegra_create_video_codec;
1400 context->base.create_video_buffer = tegra_create_video_buffer;
1401
1402 context->base.create_compute_state = tegra_create_compute_state;
1403 context->base.bind_compute_state = tegra_bind_compute_state;
1404 context->base.delete_compute_state = tegra_delete_compute_state;
1405 context->base.set_compute_resources = tegra_set_compute_resources;
1406 context->base.set_global_binding = tegra_set_global_binding;
1407 context->base.launch_grid = tegra_launch_grid;
1408 context->base.get_sample_position = tegra_get_sample_position;
1409 context->base.get_timestamp = tegra_get_timestamp;
1410
1411 context->base.flush_resource = tegra_flush_resource;
1412 context->base.invalidate_resource = tegra_invalidate_resource;
1413
1414 context->base.get_device_reset_status = tegra_get_device_reset_status;
1415 context->base.set_device_reset_callback = tegra_set_device_reset_callback;
1416 context->base.dump_debug_state = tegra_dump_debug_state;
1417 context->base.emit_string_marker = tegra_emit_string_marker;
1418
1419 context->base.generate_mipmap = tegra_generate_mipmap;
1420
1421 context->base.create_texture_handle = tegra_create_texture_handle;
1422 context->base.delete_texture_handle = tegra_delete_texture_handle;
1423 context->base.make_texture_handle_resident = tegra_make_texture_handle_resident;
1424 context->base.create_image_handle = tegra_create_image_handle;
1425 context->base.delete_image_handle = tegra_delete_image_handle;
1426 context->base.make_image_handle_resident = tegra_make_image_handle_resident;
1427
1428 return &context->base;
1429
1430 destroy:
1431 context->gpu->destroy(context->gpu);
1432 free:
1433 free(context);
1434 return NULL;
1435 }
1436