• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat
3  * based on intel anv code:
4  * Copyright © 2015 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  */
25 
26 #include "radv_meta.h"
27 
28 #include <fcntl.h>
29 #include <limits.h>
30 #include <pwd.h>
31 #include <sys/stat.h>
32 
33 void
radv_meta_save(struct radv_meta_saved_state * state,struct radv_cmd_buffer * cmd_buffer,uint32_t flags)34 radv_meta_save(struct radv_meta_saved_state *state,
35 	       struct radv_cmd_buffer *cmd_buffer, uint32_t flags)
36 {
37 	VkPipelineBindPoint bind_point =
38 		flags & RADV_META_SAVE_GRAPHICS_PIPELINE ?
39 			VK_PIPELINE_BIND_POINT_GRAPHICS :
40 			VK_PIPELINE_BIND_POINT_COMPUTE;
41 	struct radv_descriptor_state *descriptors_state =
42 		radv_get_descriptors_state(cmd_buffer, bind_point);
43 
44 	assert(flags & (RADV_META_SAVE_GRAPHICS_PIPELINE |
45 			RADV_META_SAVE_COMPUTE_PIPELINE));
46 
47 	state->flags = flags;
48 
49 	if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
50 		assert(!(state->flags & RADV_META_SAVE_COMPUTE_PIPELINE));
51 
52 		state->old_pipeline = cmd_buffer->state.pipeline;
53 
54 		/* Save all viewports. */
55 		state->viewport.count = cmd_buffer->state.dynamic.viewport.count;
56 		typed_memcpy(state->viewport.viewports,
57 			     cmd_buffer->state.dynamic.viewport.viewports,
58 			     MAX_VIEWPORTS);
59 
60 		/* Save all scissors. */
61 		state->scissor.count = cmd_buffer->state.dynamic.scissor.count;
62 		typed_memcpy(state->scissor.scissors,
63 			     cmd_buffer->state.dynamic.scissor.scissors,
64 			     MAX_SCISSORS);
65 
66 		state->cull_mode = cmd_buffer->state.dynamic.cull_mode;
67 		state->front_face = cmd_buffer->state.dynamic.front_face;
68 
69 		state->primitive_topology = cmd_buffer->state.dynamic.primitive_topology;
70 
71 		state->depth_test_enable = cmd_buffer->state.dynamic.depth_test_enable;
72 		state->depth_write_enable = cmd_buffer->state.dynamic.depth_write_enable;
73 		state->depth_compare_op = cmd_buffer->state.dynamic.depth_compare_op;
74 		state->depth_bounds_test_enable = cmd_buffer->state.dynamic.depth_bounds_test_enable;
75 		state->stencil_test_enable = cmd_buffer->state.dynamic.stencil_test_enable;
76 
77 		state->stencil_op.front.compare_op = cmd_buffer->state.dynamic.stencil_op.front.compare_op;
78 		state->stencil_op.front.fail_op = cmd_buffer->state.dynamic.stencil_op.front.fail_op;
79 		state->stencil_op.front.pass_op = cmd_buffer->state.dynamic.stencil_op.front.pass_op;
80 		state->stencil_op.front.depth_fail_op = cmd_buffer->state.dynamic.stencil_op.front.depth_fail_op;
81 
82 		state->stencil_op.back.compare_op = cmd_buffer->state.dynamic.stencil_op.back.compare_op;
83 		state->stencil_op.back.fail_op = cmd_buffer->state.dynamic.stencil_op.back.fail_op;
84 		state->stencil_op.back.pass_op = cmd_buffer->state.dynamic.stencil_op.back.pass_op;
85 		state->stencil_op.back.depth_fail_op = cmd_buffer->state.dynamic.stencil_op.back.depth_fail_op;
86 	}
87 
88 	if (state->flags & RADV_META_SAVE_SAMPLE_LOCATIONS) {
89 		typed_memcpy(&state->sample_location,
90 			     &cmd_buffer->state.dynamic.sample_location, 1);
91 	}
92 
93 	if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) {
94 		assert(!(state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE));
95 
96 		state->old_pipeline = cmd_buffer->state.compute_pipeline;
97 	}
98 
99 	if (state->flags & RADV_META_SAVE_DESCRIPTORS) {
100 		state->old_descriptor_set0 = descriptors_state->sets[0];
101 		if (!(descriptors_state->valid & 1) || !state->old_descriptor_set0)
102 			state->flags &= ~RADV_META_SAVE_DESCRIPTORS;
103 	}
104 
105 	if (state->flags & RADV_META_SAVE_CONSTANTS) {
106 		memcpy(state->push_constants, cmd_buffer->push_constants,
107 		       MAX_PUSH_CONSTANTS_SIZE);
108 	}
109 
110 	if (state->flags & RADV_META_SAVE_PASS) {
111 		state->pass = cmd_buffer->state.pass;
112 		state->subpass = cmd_buffer->state.subpass;
113 		state->framebuffer = cmd_buffer->state.framebuffer;
114 		state->attachments = cmd_buffer->state.attachments;
115 		state->render_area = cmd_buffer->state.render_area;
116 	}
117 }
118 
119 void
radv_meta_restore(const struct radv_meta_saved_state * state,struct radv_cmd_buffer * cmd_buffer)120 radv_meta_restore(const struct radv_meta_saved_state *state,
121 		  struct radv_cmd_buffer *cmd_buffer)
122 {
123 	VkPipelineBindPoint bind_point =
124 		state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE ?
125 			VK_PIPELINE_BIND_POINT_GRAPHICS :
126 			VK_PIPELINE_BIND_POINT_COMPUTE;
127 
128 	if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) {
129 		radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
130 				     VK_PIPELINE_BIND_POINT_GRAPHICS,
131 				     radv_pipeline_to_handle(state->old_pipeline));
132 
133 		cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
134 
135 		/* Restore all viewports. */
136 		cmd_buffer->state.dynamic.viewport.count = state->viewport.count;
137 		typed_memcpy(cmd_buffer->state.dynamic.viewport.viewports,
138 			     state->viewport.viewports,
139 			     MAX_VIEWPORTS);
140 
141 		/* Restore all scissors. */
142 		cmd_buffer->state.dynamic.scissor.count = state->scissor.count;
143 		typed_memcpy(cmd_buffer->state.dynamic.scissor.scissors,
144 			     state->scissor.scissors,
145 			     MAX_SCISSORS);
146 
147 		cmd_buffer->state.dynamic.cull_mode = state->cull_mode;
148 		cmd_buffer->state.dynamic.front_face = state->front_face;
149 
150 		cmd_buffer->state.dynamic.primitive_topology = state->primitive_topology;
151 
152 		cmd_buffer->state.dynamic.depth_test_enable = state->depth_test_enable;
153 		cmd_buffer->state.dynamic.depth_write_enable = state->depth_write_enable;
154 		cmd_buffer->state.dynamic.depth_compare_op = state->depth_compare_op;
155 		cmd_buffer->state.dynamic.depth_bounds_test_enable = state->depth_bounds_test_enable;
156 		cmd_buffer->state.dynamic.stencil_test_enable = state->stencil_test_enable;
157 
158 		cmd_buffer->state.dynamic.stencil_op.front.compare_op = state->stencil_op.front.compare_op;
159 		cmd_buffer->state.dynamic.stencil_op.front.fail_op = state->stencil_op.front.fail_op;
160 		cmd_buffer->state.dynamic.stencil_op.front.pass_op = state->stencil_op.front.pass_op;
161 		cmd_buffer->state.dynamic.stencil_op.front.depth_fail_op = state->stencil_op.front.depth_fail_op;
162 
163 		cmd_buffer->state.dynamic.stencil_op.back.compare_op = state->stencil_op.back.compare_op;
164 		cmd_buffer->state.dynamic.stencil_op.back.fail_op = state->stencil_op.back.fail_op;
165 		cmd_buffer->state.dynamic.stencil_op.back.pass_op = state->stencil_op.back.pass_op;
166 		cmd_buffer->state.dynamic.stencil_op.back.depth_fail_op = state->stencil_op.back.depth_fail_op;
167 
168 		cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
169 					   RADV_CMD_DIRTY_DYNAMIC_SCISSOR |
170 					   RADV_CMD_DIRTY_DYNAMIC_CULL_MODE |
171 					   RADV_CMD_DIRTY_DYNAMIC_FRONT_FACE |
172 					   RADV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY |
173 					   RADV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE |
174 					   RADV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE |
175 					   RADV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP |
176 					   RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE |
177 					   RADV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE |
178 					   RADV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
179 	}
180 
181 	if (state->flags & RADV_META_SAVE_SAMPLE_LOCATIONS) {
182 		typed_memcpy(&cmd_buffer->state.dynamic.sample_location.locations,
183 			     &state->sample_location.locations, 1);
184 
185 		cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SAMPLE_LOCATIONS;
186 	}
187 
188 	if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) {
189 		radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer),
190 				     VK_PIPELINE_BIND_POINT_COMPUTE,
191 				     radv_pipeline_to_handle(state->old_pipeline));
192 	}
193 
194 	if (state->flags & RADV_META_SAVE_DESCRIPTORS) {
195 		radv_set_descriptor_set(cmd_buffer, bind_point,
196 					state->old_descriptor_set0, 0);
197 	}
198 
199 	if (state->flags & RADV_META_SAVE_CONSTANTS) {
200 		VkShaderStageFlags stages = VK_SHADER_STAGE_COMPUTE_BIT;
201 
202 		if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE)
203 			stages |= VK_SHADER_STAGE_ALL_GRAPHICS;
204 
205 		radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer),
206 				      VK_NULL_HANDLE, stages, 0,
207 				      MAX_PUSH_CONSTANTS_SIZE,
208 				      state->push_constants);
209 	}
210 
211 	if (state->flags & RADV_META_SAVE_PASS) {
212 		cmd_buffer->state.pass = state->pass;
213 		cmd_buffer->state.subpass = state->subpass;
214 		cmd_buffer->state.framebuffer = state->framebuffer;
215 		cmd_buffer->state.attachments = state->attachments;
216 		cmd_buffer->state.render_area = state->render_area;
217 		if (state->subpass)
218 			cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
219 	}
220 }
221 
222 VkImageViewType
radv_meta_get_view_type(const struct radv_image * image)223 radv_meta_get_view_type(const struct radv_image *image)
224 {
225 	switch (image->type) {
226 	case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
227 	case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
228 	case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
229 	default:
230 		unreachable("bad VkImageViewType");
231 	}
232 }
233 
234 /**
235  * When creating a destination VkImageView, this function provides the needed
236  * VkImageViewCreateInfo::subresourceRange::baseArrayLayer.
237  */
238 uint32_t
radv_meta_get_iview_layer(const struct radv_image * dest_image,const VkImageSubresourceLayers * dest_subresource,const VkOffset3D * dest_offset)239 radv_meta_get_iview_layer(const struct radv_image *dest_image,
240 			  const VkImageSubresourceLayers *dest_subresource,
241 			  const VkOffset3D *dest_offset)
242 {
243 	switch (dest_image->type) {
244 	case VK_IMAGE_TYPE_1D:
245 	case VK_IMAGE_TYPE_2D:
246 		return dest_subresource->baseArrayLayer;
247 	case VK_IMAGE_TYPE_3D:
248 		/* HACK: Vulkan does not allow attaching a 3D image to a framebuffer,
249 		 * but meta does it anyway. When doing so, we translate the
250 		 * destination's z offset into an array offset.
251 		 */
252 		return dest_offset->z;
253 	default:
254 		assert(!"bad VkImageType");
255 		return 0;
256 	}
257 }
258 
259 static void *
meta_alloc(void * _device,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)260 meta_alloc(void* _device, size_t size, size_t alignment,
261            VkSystemAllocationScope allocationScope)
262 {
263 	struct radv_device *device = _device;
264 	return device->vk.alloc.pfnAllocation(device->vk.alloc.pUserData, size, alignment,
265 					   VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
266 }
267 
268 static void *
meta_realloc(void * _device,void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)269 meta_realloc(void* _device, void *original, size_t size, size_t alignment,
270              VkSystemAllocationScope allocationScope)
271 {
272 	struct radv_device *device = _device;
273 	return device->vk.alloc.pfnReallocation(device->vk.alloc.pUserData, original,
274 					     size, alignment,
275 					     VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
276 }
277 
278 static void
meta_free(void * _device,void * data)279 meta_free(void* _device, void *data)
280 {
281 	struct radv_device *device = _device;
282 	return device->vk.alloc.pfnFree(device->vk.alloc.pUserData, data);
283 }
284 
285 static bool
radv_builtin_cache_path(char * path)286 radv_builtin_cache_path(char *path)
287 {
288 	char *xdg_cache_home = getenv("XDG_CACHE_HOME");
289 	const char *suffix = "/radv_builtin_shaders";
290 	const char *suffix2 = "/.cache/radv_builtin_shaders";
291 	struct passwd pwd, *result;
292 	char path2[PATH_MAX + 1]; /* PATH_MAX is not a real max,but suffices here. */
293 	int ret;
294 
295 	if (xdg_cache_home) {
296 		ret = snprintf(path, PATH_MAX + 1, "%s%s%zd",
297 			       xdg_cache_home, suffix, sizeof(void *) * 8);
298 		return ret > 0 && ret < PATH_MAX + 1;
299 	}
300 
301 	getpwuid_r(getuid(), &pwd, path2, PATH_MAX - strlen(suffix2), &result);
302 	if (!result)
303 		return false;
304 
305 	strcpy(path, pwd.pw_dir);
306 	strcat(path, "/.cache");
307 	if (mkdir(path, 0755) && errno != EEXIST)
308 		return false;
309 
310 	ret = snprintf(path, PATH_MAX + 1, "%s%s%zd",
311 		       pwd.pw_dir, suffix2, sizeof(void *) * 8);
312 	return ret > 0 && ret < PATH_MAX + 1;
313 }
314 
315 static bool
radv_load_meta_pipeline(struct radv_device * device)316 radv_load_meta_pipeline(struct radv_device *device)
317 {
318 	char path[PATH_MAX + 1];
319 	struct stat st;
320 	void *data = NULL;
321 	bool ret = false;
322 
323 	if (!radv_builtin_cache_path(path))
324 		return false;
325 
326 	int fd = open(path, O_RDONLY);
327 	if (fd < 0)
328 		return false;
329 	if (fstat(fd, &st))
330 		goto fail;
331 	data = malloc(st.st_size);
332 	if (!data)
333 		goto fail;
334 	if(read(fd, data, st.st_size) == -1)
335 		goto fail;
336 
337 	ret = radv_pipeline_cache_load(&device->meta_state.cache, data, st.st_size);
338 fail:
339 	free(data);
340 	close(fd);
341 	return ret;
342 }
343 
344 static void
radv_store_meta_pipeline(struct radv_device * device)345 radv_store_meta_pipeline(struct radv_device *device)
346 {
347 	char path[PATH_MAX + 1], path2[PATH_MAX + 7];
348 	size_t size;
349 	void *data = NULL;
350 
351 	if (!device->meta_state.cache.modified)
352 		return;
353 
354 	if (radv_GetPipelineCacheData(radv_device_to_handle(device),
355 				      radv_pipeline_cache_to_handle(&device->meta_state.cache),
356 				      &size, NULL))
357 		return;
358 
359 	if (!radv_builtin_cache_path(path))
360 		return;
361 
362 	strcpy(path2, path);
363 	strcat(path2, "XXXXXX");
364 	int fd = mkstemp(path2);//open(path, O_WRONLY | O_CREAT, 0600);
365 	if (fd < 0)
366 		return;
367 	data = malloc(size);
368 	if (!data)
369 		goto fail;
370 
371 	if (radv_GetPipelineCacheData(radv_device_to_handle(device),
372 				      radv_pipeline_cache_to_handle(&device->meta_state.cache),
373 				      &size, data))
374 		goto fail;
375 	if(write(fd, data, size) == -1)
376 		goto fail;
377 
378 	rename(path2, path);
379 fail:
380 	free(data);
381 	close(fd);
382 	unlink(path2);
383 }
384 
385 VkResult
radv_device_init_meta(struct radv_device * device)386 radv_device_init_meta(struct radv_device *device)
387 {
388 	VkResult result;
389 
390 	memset(&device->meta_state, 0, sizeof(device->meta_state));
391 
392 	device->meta_state.alloc = (VkAllocationCallbacks) {
393 		.pUserData = device,
394 		.pfnAllocation = meta_alloc,
395 		.pfnReallocation = meta_realloc,
396 		.pfnFree = meta_free,
397 	};
398 
399 	device->meta_state.cache.alloc = device->meta_state.alloc;
400 	radv_pipeline_cache_init(&device->meta_state.cache, device);
401 	bool loaded_cache = radv_load_meta_pipeline(device);
402 	bool on_demand = !loaded_cache;
403 
404 	mtx_init(&device->meta_state.mtx, mtx_plain);
405 
406 	result = radv_device_init_meta_clear_state(device, on_demand);
407 	if (result != VK_SUCCESS)
408 		goto fail_clear;
409 
410 	result = radv_device_init_meta_resolve_state(device, on_demand);
411 	if (result != VK_SUCCESS)
412 		goto fail_resolve;
413 
414 	result = radv_device_init_meta_blit_state(device, on_demand);
415 	if (result != VK_SUCCESS)
416 		goto fail_blit;
417 
418 	result = radv_device_init_meta_blit2d_state(device, on_demand);
419 	if (result != VK_SUCCESS)
420 		goto fail_blit2d;
421 
422 	result = radv_device_init_meta_bufimage_state(device);
423 	if (result != VK_SUCCESS)
424 		goto fail_bufimage;
425 
426 	result = radv_device_init_meta_depth_decomp_state(device, on_demand);
427 	if (result != VK_SUCCESS)
428 		goto fail_depth_decomp;
429 
430 	result = radv_device_init_meta_buffer_state(device);
431 	if (result != VK_SUCCESS)
432 		goto fail_buffer;
433 
434 	result = radv_device_init_meta_query_state(device, on_demand);
435 	if (result != VK_SUCCESS)
436 		goto fail_query;
437 
438 	result = radv_device_init_meta_fast_clear_flush_state(device, on_demand);
439 	if (result != VK_SUCCESS)
440 		goto fail_fast_clear;
441 
442 	result = radv_device_init_meta_resolve_compute_state(device, on_demand);
443 	if (result != VK_SUCCESS)
444 		goto fail_resolve_compute;
445 
446 	result = radv_device_init_meta_resolve_fragment_state(device, on_demand);
447 	if (result != VK_SUCCESS)
448 		goto fail_resolve_fragment;
449 
450 	result = radv_device_init_meta_fmask_expand_state(device);
451 	if (result != VK_SUCCESS)
452 		goto fail_fmask_expand;
453 
454 	return VK_SUCCESS;
455 
456 fail_fmask_expand:
457 	radv_device_finish_meta_resolve_fragment_state(device);
458 fail_resolve_fragment:
459 	radv_device_finish_meta_resolve_compute_state(device);
460 fail_resolve_compute:
461 	radv_device_finish_meta_fast_clear_flush_state(device);
462 fail_fast_clear:
463 	radv_device_finish_meta_query_state(device);
464 fail_query:
465 	radv_device_finish_meta_buffer_state(device);
466 fail_buffer:
467 	radv_device_finish_meta_depth_decomp_state(device);
468 fail_depth_decomp:
469 	radv_device_finish_meta_bufimage_state(device);
470 fail_bufimage:
471 	radv_device_finish_meta_blit2d_state(device);
472 fail_blit2d:
473 	radv_device_finish_meta_blit_state(device);
474 fail_blit:
475 	radv_device_finish_meta_resolve_state(device);
476 fail_resolve:
477 	radv_device_finish_meta_clear_state(device);
478 fail_clear:
479 	mtx_destroy(&device->meta_state.mtx);
480 	radv_pipeline_cache_finish(&device->meta_state.cache);
481 	return result;
482 }
483 
484 void
radv_device_finish_meta(struct radv_device * device)485 radv_device_finish_meta(struct radv_device *device)
486 {
487 	radv_device_finish_meta_clear_state(device);
488 	radv_device_finish_meta_resolve_state(device);
489 	radv_device_finish_meta_blit_state(device);
490 	radv_device_finish_meta_blit2d_state(device);
491 	radv_device_finish_meta_bufimage_state(device);
492 	radv_device_finish_meta_depth_decomp_state(device);
493 	radv_device_finish_meta_query_state(device);
494 	radv_device_finish_meta_buffer_state(device);
495 	radv_device_finish_meta_fast_clear_flush_state(device);
496 	radv_device_finish_meta_resolve_compute_state(device);
497 	radv_device_finish_meta_resolve_fragment_state(device);
498 	radv_device_finish_meta_fmask_expand_state(device);
499 
500 	radv_store_meta_pipeline(device);
501 	radv_pipeline_cache_finish(&device->meta_state.cache);
502 	mtx_destroy(&device->meta_state.mtx);
503 }
504 
radv_meta_gen_rect_vertices_comp2(nir_builder * vs_b,nir_ssa_def * comp2)505 nir_ssa_def *radv_meta_gen_rect_vertices_comp2(nir_builder *vs_b, nir_ssa_def *comp2)
506 {
507 
508 	nir_intrinsic_instr *vertex_id = nir_intrinsic_instr_create(vs_b->shader, nir_intrinsic_load_vertex_id_zero_base);
509 	nir_ssa_dest_init(&vertex_id->instr, &vertex_id->dest, 1, 32, "vertexid");
510 	nir_builder_instr_insert(vs_b, &vertex_id->instr);
511 
512 	/* vertex 0 - -1.0, -1.0 */
513 	/* vertex 1 - -1.0, 1.0 */
514 	/* vertex 2 - 1.0, -1.0 */
515 	/* so channel 0 is vertex_id != 2 ? -1.0 : 1.0
516 	   channel 1 is vertex id != 1 ? -1.0 : 1.0 */
517 
518 	nir_ssa_def *c0cmp = nir_ine(vs_b, &vertex_id->dest.ssa,
519 				     nir_imm_int(vs_b, 2));
520 	nir_ssa_def *c1cmp = nir_ine(vs_b, &vertex_id->dest.ssa,
521 				     nir_imm_int(vs_b, 1));
522 
523 	nir_ssa_def *comp[4];
524 	comp[0] = nir_bcsel(vs_b, c0cmp,
525 			    nir_imm_float(vs_b, -1.0),
526 			    nir_imm_float(vs_b, 1.0));
527 
528 	comp[1] = nir_bcsel(vs_b, c1cmp,
529 			    nir_imm_float(vs_b, -1.0),
530 			    nir_imm_float(vs_b, 1.0));
531 	comp[2] = comp2;
532 	comp[3] = nir_imm_float(vs_b, 1.0);
533 	nir_ssa_def *outvec = nir_vec(vs_b, comp, 4);
534 
535 	return outvec;
536 }
537 
radv_meta_gen_rect_vertices(nir_builder * vs_b)538 nir_ssa_def *radv_meta_gen_rect_vertices(nir_builder *vs_b)
539 {
540 	return radv_meta_gen_rect_vertices_comp2(vs_b, nir_imm_float(vs_b, 0.0));
541 }
542 
543 /* vertex shader that generates vertices */
544 nir_shader *
radv_meta_build_nir_vs_generate_vertices(void)545 radv_meta_build_nir_vs_generate_vertices(void)
546 {
547 	const struct glsl_type *vec4 = glsl_vec4_type();
548 
549 	nir_builder b;
550 	nir_variable *v_position;
551 
552 	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL);
553 	b.shader->info.name = ralloc_strdup(b.shader, "meta_vs_gen_verts");
554 
555 	nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&b);
556 
557 	v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4,
558 					 "gl_Position");
559 	v_position->data.location = VARYING_SLOT_POS;
560 
561 	nir_store_var(&b, v_position, outvec, 0xf);
562 
563 	return b.shader;
564 }
565 
566 nir_shader *
radv_meta_build_nir_fs_noop(void)567 radv_meta_build_nir_fs_noop(void)
568 {
569 	nir_builder b;
570 
571 	nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
572 	b.shader->info.name = ralloc_asprintf(b.shader,
573 					       "meta_noop_fs");
574 
575 	return b.shader;
576 }
577 
radv_meta_build_resolve_shader_core(nir_builder * b,bool is_integer,int samples,nir_variable * input_img,nir_variable * color,nir_ssa_def * img_coord)578 void radv_meta_build_resolve_shader_core(nir_builder *b,
579 					 bool is_integer,
580 					 int samples,
581 					 nir_variable *input_img,
582 					 nir_variable *color,
583 					 nir_ssa_def *img_coord)
584 {
585 	/* do a txf_ms on each sample */
586 	nir_ssa_def *tmp;
587 	bool inserted_if = false;
588 
589 	nir_ssa_def *input_img_deref = &nir_build_deref_var(b, input_img)->dest.ssa;
590 
591 	nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3);
592 	tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
593 	tex->op = nir_texop_txf_ms;
594 	tex->src[0].src_type = nir_tex_src_coord;
595 	tex->src[0].src = nir_src_for_ssa(img_coord);
596 	tex->src[1].src_type = nir_tex_src_ms_index;
597 	tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0));
598 	tex->src[2].src_type = nir_tex_src_texture_deref;
599 	tex->src[2].src = nir_src_for_ssa(input_img_deref);
600 	tex->dest_type = nir_type_float;
601 	tex->is_array = false;
602 	tex->coord_components = 2;
603 
604 	nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
605 	nir_builder_instr_insert(b, &tex->instr);
606 
607 	tmp = &tex->dest.ssa;
608 
609 	if (!is_integer && samples > 1) {
610 		nir_tex_instr *tex_all_same = nir_tex_instr_create(b->shader, 2);
611 		tex_all_same->sampler_dim = GLSL_SAMPLER_DIM_MS;
612 		tex_all_same->op = nir_texop_samples_identical;
613 		tex_all_same->src[0].src_type = nir_tex_src_coord;
614 		tex_all_same->src[0].src = nir_src_for_ssa(img_coord);
615 		tex_all_same->src[1].src_type = nir_tex_src_texture_deref;
616 		tex_all_same->src[1].src = nir_src_for_ssa(input_img_deref);
617 		tex_all_same->dest_type = nir_type_uint;
618 		tex_all_same->is_array = false;
619 		tex_all_same->coord_components = 2;
620 
621 		nir_ssa_dest_init(&tex_all_same->instr, &tex_all_same->dest, 1, 1, "tex");
622 		nir_builder_instr_insert(b, &tex_all_same->instr);
623 
624 		nir_ssa_def *all_same = nir_ieq(b, &tex_all_same->dest.ssa, nir_imm_bool(b, false));
625 		nir_push_if(b, all_same);
626 		for (int i = 1; i < samples; i++) {
627 			nir_tex_instr *tex_add = nir_tex_instr_create(b->shader, 3);
628 			tex_add->sampler_dim = GLSL_SAMPLER_DIM_MS;
629 			tex_add->op = nir_texop_txf_ms;
630 			tex_add->src[0].src_type = nir_tex_src_coord;
631 			tex_add->src[0].src = nir_src_for_ssa(img_coord);
632 			tex_add->src[1].src_type = nir_tex_src_ms_index;
633 			tex_add->src[1].src = nir_src_for_ssa(nir_imm_int(b, i));
634 			tex_add->src[2].src_type = nir_tex_src_texture_deref;
635 			tex_add->src[2].src = nir_src_for_ssa(input_img_deref);
636 			tex_add->dest_type = nir_type_float;
637 			tex_add->is_array = false;
638 			tex_add->coord_components = 2;
639 
640 			nir_ssa_dest_init(&tex_add->instr, &tex_add->dest, 4, 32, "tex");
641 			nir_builder_instr_insert(b, &tex_add->instr);
642 
643 			tmp = nir_fadd(b, tmp, &tex_add->dest.ssa);
644 		}
645 
646 		tmp = nir_fdiv(b, tmp, nir_imm_float(b, samples));
647 		nir_store_var(b, color, tmp, 0xf);
648 		nir_push_else(b, NULL);
649 		inserted_if = true;
650 	}
651 	nir_store_var(b, color, &tex->dest.ssa, 0xf);
652 
653 	if (inserted_if)
654 		nir_pop_if(b, NULL);
655 }
656 
657 nir_ssa_def *
radv_meta_load_descriptor(nir_builder * b,unsigned desc_set,unsigned binding)658 radv_meta_load_descriptor(nir_builder *b, unsigned desc_set, unsigned binding)
659 {
660 	nir_intrinsic_instr *rsrc =
661 		nir_intrinsic_instr_create(b->shader,
662 					   nir_intrinsic_vulkan_resource_index);
663 
664 	rsrc->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
665 	rsrc->num_components = 2;
666 	nir_intrinsic_set_desc_set(rsrc, desc_set);
667 	nir_intrinsic_set_binding(rsrc, binding);
668 	nir_ssa_dest_init(&rsrc->instr, &rsrc->dest, rsrc->num_components, 32, NULL);
669 	nir_builder_instr_insert(b, &rsrc->instr);
670 
671 	return nir_channel(b, &rsrc->dest.ssa, 0);
672 }
673