• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include "r600_pipe.h"
24 #include "r600_public.h"
25 #include "r600_isa.h"
26 #include "r600_sfn.h"
27 #include "evergreen_compute.h"
28 #include "r600d.h"
29 
30 #include <errno.h>
31 #include "pipe/p_shader_tokens.h"
32 #include "util/u_debug.h"
33 #include "util/u_endian.h"
34 #include "util/u_memory.h"
35 #include "util/u_screen.h"
36 #include "util/u_simple_shaders.h"
37 #include "util/u_upload_mgr.h"
38 #include "util/u_math.h"
39 #include "vl/vl_decoder.h"
40 #include "vl/vl_video_buffer.h"
41 #include "radeon_video.h"
42 #include "radeon_uvd.h"
43 #include "util/os_time.h"
44 
45 static const struct debug_named_value r600_debug_options[] = {
46 	/* features */
47 	{ "nocpdma", DBG_NO_CP_DMA, "Disable CP DMA" },
48 
49         DEBUG_NAMED_VALUE_END /* must be last */
50 };
51 
52 /*
53  * pipe_context
54  */
55 
r600_destroy_context(struct pipe_context * context)56 static void r600_destroy_context(struct pipe_context *context)
57 {
58 	struct r600_context *rctx = (struct r600_context *)context;
59 	unsigned sh, i;
60 
61 	r600_isa_destroy(rctx->isa);
62 
63 	for (sh = 0; sh < (rctx->b.gfx_level < EVERGREEN ? R600_NUM_HW_STAGES : EG_NUM_HW_STAGES); sh++) {
64 		r600_resource_reference(&rctx->scratch_buffers[sh].buffer, NULL);
65 	}
66 	r600_resource_reference(&rctx->dummy_cmask, NULL);
67 	r600_resource_reference(&rctx->dummy_fmask, NULL);
68 
69 	if (rctx->append_fence)
70 		pipe_resource_reference((struct pipe_resource**)&rctx->append_fence, NULL);
71 	for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
72 		rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, NULL);
73 		free(rctx->driver_consts[sh].constants);
74 	}
75 
76 	if (rctx->fixed_func_tcs_shader)
77 		rctx->b.b.delete_tcs_state(&rctx->b.b, rctx->fixed_func_tcs_shader);
78 
79 	if (rctx->dummy_pixel_shader) {
80 		rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
81 	}
82 	if (rctx->custom_dsa_flush) {
83 		rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush);
84 	}
85 	if (rctx->custom_blend_resolve) {
86 		rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve);
87 	}
88 	if (rctx->custom_blend_decompress) {
89 		rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress);
90 	}
91 	if (rctx->custom_blend_fastclear) {
92 		rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_fastclear);
93 	}
94 	util_unreference_framebuffer_state(&rctx->framebuffer.state);
95 
96 	if (rctx->gs_rings.gsvs_ring.buffer)
97 		pipe_resource_reference(&rctx->gs_rings.gsvs_ring.buffer, NULL);
98 
99 	if (rctx->gs_rings.esgs_ring.buffer)
100 		pipe_resource_reference(&rctx->gs_rings.esgs_ring.buffer, NULL);
101 
102 	for (sh = 0; sh < PIPE_SHADER_TYPES; ++sh)
103 		for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; ++i)
104 			rctx->b.b.set_constant_buffer(context, sh, i, false, NULL);
105 
106 	if (rctx->blitter) {
107 		util_blitter_destroy(rctx->blitter);
108 	}
109 	u_suballocator_destroy(&rctx->allocator_fetch_shader);
110 
111 	r600_release_command_buffer(&rctx->start_cs_cmd);
112 
113 	FREE(rctx->start_compute_cs_cmd.buf);
114 
115 	r600_common_context_cleanup(&rctx->b);
116 
117 	r600_resource_reference(&rctx->trace_buf, NULL);
118 	r600_resource_reference(&rctx->last_trace_buf, NULL);
119 	radeon_clear_saved_cs(&rctx->last_gfx);
120 
121 	switch (rctx->b.gfx_level) {
122 	case EVERGREEN:
123 	case CAYMAN:
124 		for (i = 0; i < EG_MAX_ATOMIC_BUFFERS; ++i)
125 			pipe_resource_reference(&rctx->atomic_buffer_state.buffer[i].buffer, NULL);
126 		break;
127 	default:
128 		break;
129 	}
130 
131 	FREE(rctx);
132 }
133 
r600_create_context(struct pipe_screen * screen,void * priv,unsigned flags)134 static struct pipe_context *r600_create_context(struct pipe_screen *screen,
135                                                 void *priv, unsigned flags)
136 {
137 	struct r600_context *rctx = CALLOC_STRUCT(r600_context);
138 	struct r600_screen* rscreen = (struct r600_screen *)screen;
139 	struct radeon_winsys *ws = rscreen->b.ws;
140 
141 	if (!rctx)
142 		return NULL;
143 
144 	rctx->b.b.screen = screen;
145 	assert(!priv);
146 	rctx->b.b.priv = NULL; /* for threaded_context_unwrap_sync */
147 	rctx->b.b.destroy = r600_destroy_context;
148 	rctx->b.set_atom_dirty = (void *)r600_set_atom_dirty;
149 
150 	if (!r600_common_context_init(&rctx->b, &rscreen->b, flags))
151 		goto fail;
152 
153 	rctx->screen = rscreen;
154 	list_inithead(&rctx->texture_buffers);
155 
156 	r600_init_blit_functions(rctx);
157 
158 	if (rscreen->b.info.ip[AMD_IP_UVD].num_queues) {
159 		rctx->b.b.create_video_codec = r600_uvd_create_decoder;
160 		rctx->b.b.create_video_buffer = r600_video_buffer_create;
161 	} else {
162 		rctx->b.b.create_video_codec = vl_create_decoder;
163 		rctx->b.b.create_video_buffer = vl_video_buffer_create;
164 	}
165 
166 	if (getenv("R600_TRACE"))
167 		rctx->is_debug = true;
168 	r600_init_common_state_functions(rctx);
169 
170 	switch (rctx->b.gfx_level) {
171 	case R600:
172 	case R700:
173 		r600_init_state_functions(rctx);
174 		r600_init_atom_start_cs(rctx);
175 		rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
176 		rctx->custom_blend_resolve = rctx->b.gfx_level == R700 ? r700_create_resolve_blend(rctx)
177 								      : r600_create_resolve_blend(rctx);
178 		rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
179 		rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 ||
180 					   rctx->b.family == CHIP_RV620 ||
181 					   rctx->b.family == CHIP_RS780 ||
182 					   rctx->b.family == CHIP_RS880 ||
183 					   rctx->b.family == CHIP_RV710);
184 		break;
185 	case EVERGREEN:
186 	case CAYMAN:
187 		evergreen_init_state_functions(rctx);
188 		evergreen_init_atom_start_cs(rctx);
189 		evergreen_init_atom_start_compute_cs(rctx);
190 		rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
191 		rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
192 		rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
193 		rctx->custom_blend_fastclear = evergreen_create_fastclear_blend(rctx);
194 		rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR ||
195 					   rctx->b.family == CHIP_PALM ||
196 					   rctx->b.family == CHIP_SUMO ||
197 					   rctx->b.family == CHIP_SUMO2 ||
198 					   rctx->b.family == CHIP_CAICOS ||
199 					   rctx->b.family == CHIP_CAYMAN ||
200 					   rctx->b.family == CHIP_ARUBA);
201 
202 		rctx->append_fence = pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
203 							 PIPE_USAGE_DEFAULT, 32);
204 		break;
205 	default:
206 		R600_ERR("Unsupported gfx level %d.\n", rctx->b.gfx_level);
207 		goto fail;
208 	}
209 
210 	ws->cs_create(&rctx->b.gfx.cs, rctx->b.ctx, AMD_IP_GFX,
211                       r600_context_gfx_flush, rctx);
212 	rctx->b.gfx.flush = r600_context_gfx_flush;
213 
214 	u_suballocator_init(&rctx->allocator_fetch_shader, &rctx->b.b, 64 * 1024,
215                             0, PIPE_USAGE_DEFAULT, 0, false);
216 
217 	rctx->isa = calloc(1, sizeof(struct r600_isa));
218 	if (!rctx->isa || r600_isa_init(rctx->b.gfx_level, rctx->isa))
219 		goto fail;
220 
221 	if (rscreen->b.debug_flags & DBG_FORCE_DMA)
222 		rctx->b.b.resource_copy_region = rctx->b.dma_copy;
223 
224 	rctx->blitter = util_blitter_create(&rctx->b.b);
225 	if (rctx->blitter == NULL)
226 		goto fail;
227 	util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
228 	rctx->blitter->draw_rectangle = r600_draw_rectangle;
229 
230 	r600_begin_new_cs(rctx);
231 
232 	rctx->dummy_pixel_shader =
233 		util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
234 						     TGSI_SEMANTIC_GENERIC,
235 						     TGSI_INTERPOLATE_CONSTANT);
236 	rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
237 
238 	return &rctx->b.b;
239 
240 fail:
241 	r600_destroy_context(&rctx->b.b);
242 	return NULL;
243 }
244 
245 /*
246  * pipe_screen
247  */
248 
r600_get_param(struct pipe_screen * pscreen,enum pipe_cap param)249 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
250 {
251 	struct r600_screen *rscreen = (struct r600_screen *)pscreen;
252 	enum radeon_family family = rscreen->b.family;
253 
254 	switch (param) {
255 	/* Supported features (boolean caps). */
256 	case PIPE_CAP_NPOT_TEXTURES:
257 	case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
258 	case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
259 	case PIPE_CAP_ANISOTROPIC_FILTER:
260 	case PIPE_CAP_OCCLUSION_QUERY:
261 	case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
262 	case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
263 	case PIPE_CAP_BLEND_EQUATION_SEPARATE:
264 	case PIPE_CAP_TEXTURE_SWIZZLE:
265 	case PIPE_CAP_DEPTH_CLIP_DISABLE:
266 	case PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE:
267 	case PIPE_CAP_SHADER_STENCIL_EXPORT:
268 	case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
269 	case PIPE_CAP_FS_COORD_ORIGIN_UPPER_LEFT:
270 	case PIPE_CAP_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
271 	case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
272 	case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
273 	case PIPE_CAP_SEAMLESS_CUBE_MAP:
274 	case PIPE_CAP_PRIMITIVE_RESTART:
275 	case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
276 	case PIPE_CAP_CONDITIONAL_RENDER:
277 	case PIPE_CAP_TEXTURE_BARRIER:
278 	case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
279 	case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
280 	case PIPE_CAP_VS_INSTANCEID:
281 	case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
282 	case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
283 	case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
284 	case PIPE_CAP_START_INSTANCE:
285 	case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
286 	case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
287 	case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
288 	case PIPE_CAP_TEXTURE_MULTISAMPLE:
289 	case PIPE_CAP_VS_WINDOW_SPACE_POSITION:
290 	case PIPE_CAP_VS_LAYER_VIEWPORT:
291 	case PIPE_CAP_SAMPLE_SHADING:
292         case PIPE_CAP_MEMOBJ:
293 	case PIPE_CAP_CLIP_HALFZ:
294 	case PIPE_CAP_POLYGON_OFFSET_CLAMP:
295 	case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
296 	case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
297 	case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
298 	case PIPE_CAP_TEXTURE_QUERY_SAMPLES:
299 	case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
300 	case PIPE_CAP_INVALIDATE_BUFFER:
301 	case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS:
302 	case PIPE_CAP_QUERY_MEMORY_INFO:
303 	case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
304 	case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
305 	case PIPE_CAP_LEGACY_MATH_RULES:
306 	case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
307 	case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
308 	case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
309       return 1;
310 
311 	case PIPE_CAP_NIR_ATOMICS_AS_DEREF:
312 	case PIPE_CAP_GL_SPIRV:
313 		return 1;
314 
315 	case PIPE_CAP_TEXTURE_TRANSFER_MODES:
316 		return PIPE_TEXTURE_TRANSFER_BLIT;
317 
318 	case PIPE_CAP_SHAREABLE_SHADERS:
319 		return 0;
320 
321 	case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
322 		/* Optimal number for good TexSubImage performance on Polaris10. */
323 		return 64 * 1024 * 1024;
324 
325 	case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
326 		return 1;
327 
328 	case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
329 		return !UTIL_ARCH_BIG_ENDIAN && rscreen->b.info.has_userptr;
330 
331 	case PIPE_CAP_COMPUTE:
332 		return rscreen->b.gfx_level > R700;
333 
334 	case PIPE_CAP_TGSI_TEXCOORD:
335 		return 1;
336 
337 	case PIPE_CAP_NIR_IMAGES_AS_DEREF:
338 	case PIPE_CAP_FAKE_SW_MSAA:
339 		return 0;
340 
341 	case PIPE_CAP_MAX_TEXEL_BUFFER_ELEMENTS_UINT:
342 		return MIN2(rscreen->b.info.max_heap_size_kb * 1024ull / 4, INT_MAX);
343 
344         case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
345                 return R600_MAP_BUFFER_ALIGNMENT;
346 
347 	case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
348 		return 256;
349 
350 	case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
351 		return 4;
352 	case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
353 	case PIPE_CAP_GLSL_FEATURE_LEVEL:
354 		if (family >= CHIP_CEDAR)
355 		   return 450;
356 		return 330;
357 
358 	/* Supported except the original R600. */
359 	case PIPE_CAP_INDEP_BLEND_ENABLE:
360 	case PIPE_CAP_INDEP_BLEND_FUNC:
361 		/* R600 doesn't support per-MRT blends */
362 		return family == CHIP_R600 ? 0 : 1;
363 
364 	/* Supported on Evergreen. */
365 	case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
366 	case PIPE_CAP_CUBE_MAP_ARRAY:
367 	case PIPE_CAP_TEXTURE_GATHER_SM5:
368 	case PIPE_CAP_TEXTURE_QUERY_LOD:
369 	case PIPE_CAP_FS_FINE_DERIVATIVE:
370 	case PIPE_CAP_SAMPLER_VIEW_TARGET:
371 	case PIPE_CAP_SHADER_PACK_HALF_FLOAT:
372 	case PIPE_CAP_SHADER_CLOCK:
373 	case PIPE_CAP_SHADER_ARRAY_COMPONENTS:
374 	case PIPE_CAP_QUERY_BUFFER_OBJECT:
375 	case PIPE_CAP_IMAGE_STORE_FORMATTED:
376 	case PIPE_CAP_ALPHA_TO_COVERAGE_DITHER_CONTROL:
377 		return family >= CHIP_CEDAR ? 1 : 0;
378 	case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
379 		return family >= CHIP_CEDAR ? 4 : 0;
380 	case PIPE_CAP_DRAW_INDIRECT:
381 		/* kernel command checker support is also required */
382 		return family >= CHIP_CEDAR;
383 
384 	case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY:
385 		return family >= CHIP_CEDAR ? 0 : 1;
386 
387 	case PIPE_CAP_MAX_COMBINED_SHADER_OUTPUT_RESOURCES:
388 		return 8;
389 
390 	case PIPE_CAP_MAX_GS_INVOCATIONS:
391 		return 32;
392 
393 	/* shader buffer objects */
394 	case PIPE_CAP_MAX_SHADER_BUFFER_SIZE_UINT:
395 		return 1 << 27;
396 	case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
397 		return 8;
398 
399         case PIPE_CAP_INT64:
400 	case PIPE_CAP_DOUBLES:
401 		if (rscreen->b.family == CHIP_ARUBA ||
402 		    rscreen->b.family == CHIP_CAYMAN ||
403 		    rscreen->b.family == CHIP_CYPRESS ||
404 		    rscreen->b.family == CHIP_HEMLOCK)
405 			return 1;
406 		if (rscreen->b.family >= CHIP_CEDAR)
407 			return 1;
408 		return 0;
409 
410 	case PIPE_CAP_TWO_SIDED_COLOR:
411 		return 0;
412 	case PIPE_CAP_CULL_DISTANCE:
413 		return 1;
414 
415 	case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
416 		if (family >= CHIP_CEDAR)
417 			return 256;
418 		return 0;
419 
420 	case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
421 		if (family >= CHIP_CEDAR)
422 			return 30;
423 		else
424 			return 0;
425 	/* Stream output. */
426 	case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
427 		return rscreen->b.has_streamout ? 4 : 0;
428 	case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
429 	case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
430 		return rscreen->b.has_streamout ? 1 : 0;
431 	case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
432 	case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
433 		return 32*4;
434 
435 	/* Geometry shader output. */
436 	case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
437 		return 1024;
438 	case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
439 		return 16384;
440 	case PIPE_CAP_MAX_VERTEX_STREAMS:
441 		return family >= CHIP_CEDAR ? 4 : 1;
442 
443 	case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
444 		/* Should be 2047, but 2048 is a requirement for GL 4.4 */
445 		return 2048;
446 
447 	/* Texturing. */
448 	case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
449 		if (family >= CHIP_CEDAR)
450 			return 16384;
451 		else
452 			return 8192;
453 	case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
454 		if (family >= CHIP_CEDAR)
455 			return 15;
456 		else
457 			return 14;
458 	case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
459 		/* textures support 8192, but layered rendering supports 2048 */
460 		return 12;
461 	case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
462 		/* textures support 8192, but layered rendering supports 2048 */
463 		return 2048;
464 
465 	/* Render targets. */
466 	case PIPE_CAP_MAX_RENDER_TARGETS:
467 		/* XXX some r6xx are buggy and can only do 4 */
468 		return 8;
469 
470 	case PIPE_CAP_MAX_VIEWPORTS:
471 		return R600_MAX_VIEWPORTS;
472 	case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
473 	case PIPE_CAP_RASTERIZER_SUBPIXEL_BITS:
474 		return 8;
475 
476 	/* Timer queries, present when the clock frequency is non zero. */
477 	case PIPE_CAP_QUERY_TIME_ELAPSED:
478 	case PIPE_CAP_QUERY_TIMESTAMP:
479 		return rscreen->b.info.clock_crystal_freq != 0;
480 
481 	case PIPE_CAP_TIMER_RESOLUTION:
482 		/* Conversion to nanos from cycles per millisecond */
483 		return DIV_ROUND_UP(1000000, rscreen->b.info.clock_crystal_freq);
484 
485 	case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
486 	case PIPE_CAP_MIN_TEXEL_OFFSET:
487 		return -8;
488 
489 	case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
490 	case PIPE_CAP_MAX_TEXEL_OFFSET:
491 		return 7;
492 
493 	case PIPE_CAP_MAX_VARYINGS:
494 		return 32;
495 
496 	case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
497 		return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
498 	case PIPE_CAP_ENDIANNESS:
499 		return PIPE_ENDIAN_LITTLE;
500 
501 	case PIPE_CAP_VENDOR_ID:
502 		return ATI_VENDOR_ID;
503 	case PIPE_CAP_DEVICE_ID:
504 		return rscreen->b.info.pci_id;
505 	case PIPE_CAP_ACCELERATED:
506 		return 1;
507 	case PIPE_CAP_VIDEO_MEMORY:
508 		return rscreen->b.info.vram_size_kb >> 10;
509 	case PIPE_CAP_UMA:
510 		return 0;
511 	case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
512 		return rscreen->b.gfx_level >= R700;
513 	case PIPE_CAP_PCI_GROUP:
514 		return rscreen->b.info.pci.domain;
515 	case PIPE_CAP_PCI_BUS:
516 		return rscreen->b.info.pci.bus;
517 	case PIPE_CAP_PCI_DEVICE:
518 		return rscreen->b.info.pci.dev;
519 	case PIPE_CAP_PCI_FUNCTION:
520 		return rscreen->b.info.pci.func;
521 
522 	case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS:
523 		if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
524 			return 8;
525 		return 0;
526 	case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS:
527 		if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
528 			return EG_MAX_ATOMIC_BUFFERS;
529 		return 0;
530 
531 	case PIPE_CAP_VALIDATE_ALL_DIRTY_STATES:
532 		return 1;
533 
534 	default:
535 		return u_pipe_screen_get_param_defaults(pscreen, param);
536 	}
537 }
538 
r600_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type shader,enum pipe_shader_cap param)539 static int r600_get_shader_param(struct pipe_screen* pscreen,
540 				 enum pipe_shader_type shader,
541 				 enum pipe_shader_cap param)
542 {
543 	struct r600_screen *rscreen = (struct r600_screen *)pscreen;
544 
545 	switch(shader)
546 	{
547 	case PIPE_SHADER_FRAGMENT:
548 	case PIPE_SHADER_VERTEX:
549 		break;
550 	case PIPE_SHADER_GEOMETRY:
551 		break;
552 	case PIPE_SHADER_TESS_CTRL:
553 	case PIPE_SHADER_TESS_EVAL:
554 	case PIPE_SHADER_COMPUTE:
555 		if (rscreen->b.family >= CHIP_CEDAR)
556 			break;
557 		FALLTHROUGH;
558 	default:
559 		return 0;
560 	}
561 
562 	switch (param) {
563 	case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
564 	case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
565 	case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
566 	case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
567 		return 16384;
568 	case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
569 		return 32;
570 	case PIPE_SHADER_CAP_MAX_INPUTS:
571 		return shader == PIPE_SHADER_VERTEX ? 16 : 32;
572 	case PIPE_SHADER_CAP_MAX_OUTPUTS:
573 		return shader == PIPE_SHADER_FRAGMENT ? 8 : 32;
574 	case PIPE_SHADER_CAP_MAX_TEMPS:
575 		return 256; /* Max native temporaries. */
576 	case PIPE_SHADER_CAP_MAX_CONST_BUFFER0_SIZE:
577 		if (shader == PIPE_SHADER_COMPUTE) {
578 			uint64_t max_const_buffer_size;
579 			pscreen->get_compute_param(pscreen, PIPE_SHADER_IR_NIR,
580 						   PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
581 						   &max_const_buffer_size);
582 			return MIN2(max_const_buffer_size, INT_MAX);
583 
584 		} else {
585 			return R600_MAX_CONST_BUFFER_SIZE;
586 		}
587 	case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
588 		return R600_MAX_USER_CONST_BUFFERS;
589 	case PIPE_SHADER_CAP_CONT_SUPPORTED:
590 		return 1;
591 	case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
592 		return 1;
593 	case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
594 	case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
595 	case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
596 	case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
597 		return 1;
598 	case PIPE_SHADER_CAP_SUBROUTINES:
599 	case PIPE_SHADER_CAP_INT64_ATOMICS:
600 	case PIPE_SHADER_CAP_FP16:
601         case PIPE_SHADER_CAP_FP16_DERIVATIVES:
602 	case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
603         case PIPE_SHADER_CAP_INT16:
604         case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
605 		return 0;
606 	case PIPE_SHADER_CAP_INTEGERS:
607 	case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
608 		return 1;
609 	case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
610 	case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
611 		return 16;
612 	case PIPE_SHADER_CAP_SUPPORTED_IRS: {
613 		int ir = 0;
614 		if (shader == PIPE_SHADER_COMPUTE)
615 			ir = 1 << PIPE_SHADER_IR_NATIVE;
616 		ir |= 1 << PIPE_SHADER_IR_NIR;
617 		return ir;
618 	}
619 	case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
620 	case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
621 		if (rscreen->b.family >= CHIP_CEDAR &&
622 		    (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE))
623 		    return 8;
624 		return 0;
625 	case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
626 		if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
627 			return 8;
628 		return 0;
629 	case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
630 		/* having to allocate the atomics out amongst shaders stages is messy,
631 		   so give compute 8 buffers and all the others one */
632 		if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics) {
633 			return EG_MAX_ATOMIC_BUFFERS;
634 		}
635 		return 0;
636 	}
637 	return 0;
638 }
639 
r600_destroy_screen(struct pipe_screen * pscreen)640 static void r600_destroy_screen(struct pipe_screen* pscreen)
641 {
642 	struct r600_screen *rscreen = (struct r600_screen *)pscreen;
643 
644 	if (!rscreen)
645 		return;
646 
647 	if (!rscreen->b.ws->unref(rscreen->b.ws))
648 		return;
649 
650 	if (rscreen->global_pool) {
651 		compute_memory_pool_delete(rscreen->global_pool);
652 	}
653 
654 	r600_destroy_common_screen(&rscreen->b);
655 }
656 
r600_resource_create(struct pipe_screen * screen,const struct pipe_resource * templ)657 static struct pipe_resource *r600_resource_create(struct pipe_screen *screen,
658 						  const struct pipe_resource *templ)
659 {
660 	if (templ->target == PIPE_BUFFER &&
661 	    (templ->bind & PIPE_BIND_GLOBAL))
662 		return r600_compute_global_buffer_create(screen, templ);
663 
664 	return r600_resource_create_common(screen, templ);
665 }
666 
r600_screen_create(struct radeon_winsys * ws,const struct pipe_screen_config * config)667 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws,
668 				       const struct pipe_screen_config *config)
669 {
670 	struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
671 
672 	if (!rscreen) {
673 		return NULL;
674 	}
675 
676 	/* Set functions first. */
677 	rscreen->b.b.context_create = r600_create_context;
678 	rscreen->b.b.destroy = r600_destroy_screen;
679 	rscreen->b.b.get_param = r600_get_param;
680 	rscreen->b.b.get_shader_param = r600_get_shader_param;
681 	rscreen->b.b.resource_create = r600_resource_create;
682 
683 	if (!r600_common_screen_init(&rscreen->b, ws)) {
684 		FREE(rscreen);
685 		return NULL;
686 	}
687 
688 	if (rscreen->b.info.gfx_level >= EVERGREEN) {
689 		rscreen->b.b.is_format_supported = evergreen_is_format_supported;
690 	} else {
691 		rscreen->b.b.is_format_supported = r600_is_format_supported;
692 	}
693 
694 	rscreen->b.debug_flags |= debug_get_flags_option("R600_DEBUG", r600_debug_options, 0);
695 	if (debug_get_bool_option("R600_DEBUG_COMPUTE", false))
696 		rscreen->b.debug_flags |= DBG_COMPUTE;
697 	if (debug_get_bool_option("R600_DUMP_SHADERS", false))
698 		rscreen->b.debug_flags |= DBG_ALL_SHADERS | DBG_FS;
699 	if (!debug_get_bool_option("R600_HYPERZ", true))
700 		rscreen->b.debug_flags |= DBG_NO_HYPERZ;
701 
702 	if (rscreen->b.family == CHIP_UNKNOWN) {
703 		fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->b.info.pci_id);
704 		FREE(rscreen);
705 		return NULL;
706 	}
707 
708 	rscreen->b.b.finalize_nir = r600_finalize_nir;
709 
710 	rscreen->b.has_streamout = true;
711 
712 	rscreen->has_msaa = true;
713 
714 	/* MSAA support. */
715 	switch (rscreen->b.gfx_level) {
716 	case R600:
717 	case R700:
718 		rscreen->has_compressed_msaa_texturing = false;
719 		break;
720 	case EVERGREEN:
721 		rscreen->has_compressed_msaa_texturing = true;
722 		break;
723 	case CAYMAN:
724 		rscreen->has_compressed_msaa_texturing = true;
725 		break;
726 	default:
727 		rscreen->has_compressed_msaa_texturing = false;
728 	}
729 
730 	rscreen->b.has_cp_dma = !(rscreen->b.debug_flags & DBG_NO_CP_DMA);
731 
732 	rscreen->b.barrier_flags.cp_to_L2 =
733 		R600_CONTEXT_INV_VERTEX_CACHE |
734 		R600_CONTEXT_INV_TEX_CACHE |
735 		R600_CONTEXT_INV_CONST_CACHE;
736 	rscreen->b.barrier_flags.compute_to_L2 = R600_CONTEXT_CS_PARTIAL_FLUSH | R600_CONTEXT_FLUSH_AND_INV;
737 
738 	rscreen->global_pool = compute_memory_pool_new(rscreen);
739 
740 	/* Create the auxiliary context. This must be done last. */
741 	rscreen->b.aux_context = rscreen->b.b.context_create(&rscreen->b.b, NULL, 0);
742 
743 	rscreen->has_atomics = true;
744 #if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */
745 	struct pipe_resource templ = {};
746 
747 	templ.width0 = 4;
748 	templ.height0 = 2048;
749 	templ.depth0 = 1;
750 	templ.array_size = 1;
751 	templ.target = PIPE_TEXTURE_2D;
752 	templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
753 	templ.usage = PIPE_USAGE_DEFAULT;
754 
755 	struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
756 	unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_MAP_WRITE);
757 
758 	memset(map, 0, 256);
759 
760 	r600_screen_clear_buffer(rscreen, &res->b.b, 4, 4, 0xCC);
761 	r600_screen_clear_buffer(rscreen, &res->b.b, 8, 4, 0xDD);
762 	r600_screen_clear_buffer(rscreen, &res->b.b, 12, 4, 0xEE);
763 	r600_screen_clear_buffer(rscreen, &res->b.b, 20, 4, 0xFF);
764 	r600_screen_clear_buffer(rscreen, &res->b.b, 32, 20, 0x87);
765 
766 	ws->buffer_wait(res->buf, RADEON_USAGE_WRITE);
767 
768 	int i;
769 	for (i = 0; i < 256; i++) {
770 		printf("%02X", map[i]);
771 		if (i % 16 == 15)
772 			printf("\n");
773 	}
774 #endif
775 
776 	if (rscreen->b.debug_flags & DBG_TEST_DMA)
777 		r600_test_dma(&rscreen->b);
778 
779 	r600_query_fix_enabled_rb_mask(&rscreen->b);
780 	return &rscreen->b.b;
781 }
782